aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2009-06-15 03:02:23 -0700
committerDavid S. Miller <davem@davemloft.net>2009-06-15 03:02:23 -0700
commit9cbc1cb8cd46ce1f7645b9de249b2ce8460129bb (patch)
tree8d104ec2a459346b99413b0b77421ca7b9936c1a /include
parentca44d6e60f9de26281fda203f58b570e1748c015 (diff)
parent45e3e1935e2857c54783291107d33323b3ef33c8 (diff)
downloadkernel_goldelico_gta04-9cbc1cb8cd46ce1f7645b9de249b2ce8460129bb.zip
kernel_goldelico_gta04-9cbc1cb8cd46ce1f7645b9de249b2ce8460129bb.tar.gz
kernel_goldelico_gta04-9cbc1cb8cd46ce1f7645b9de249b2ce8460129bb.tar.bz2
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: Documentation/feature-removal-schedule.txt drivers/scsi/fcoe/fcoe.c net/core/drop_monitor.c net/core/net-traces.c
Diffstat (limited to 'include')
-rw-r--r--include/Kbuild1
-rw-r--r--include/asm-generic/Kbuild22
-rw-r--r--include/asm-generic/Kbuild.asm1
-rw-r--r--include/asm-generic/atomic-long.h258
-rw-r--r--include/asm-generic/atomic.h321
-rw-r--r--include/asm-generic/auxvec.h8
-rw-r--r--include/asm-generic/bitops.h24
-rw-r--r--include/asm-generic/bitops/atomic.h1
-rw-r--r--include/asm-generic/bitsperlong.h32
-rw-r--r--include/asm-generic/bugs.h10
-rw-r--r--include/asm-generic/cache.h12
-rw-r--r--include/asm-generic/cacheflush.h30
-rw-r--r--include/asm-generic/checksum.h79
-rw-r--r--include/asm-generic/current.h9
-rw-r--r--include/asm-generic/delay.h9
-rw-r--r--include/asm-generic/dma.h15
-rw-r--r--include/asm-generic/fb.h12
-rw-r--r--include/asm-generic/getorder.h24
-rw-r--r--include/asm-generic/hardirq.h34
-rw-r--r--include/asm-generic/hw_irq.h9
-rw-r--r--include/asm-generic/int-l64.h2
-rw-r--r--include/asm-generic/int-ll64.h2
-rw-r--r--include/asm-generic/io.h300
-rw-r--r--include/asm-generic/ioctls.h110
-rw-r--r--include/asm-generic/ipcbuf.h34
-rw-r--r--include/asm-generic/irq.h18
-rw-r--r--include/asm-generic/irqflags.h72
-rw-r--r--include/asm-generic/kmap_types.h32
-rw-r--r--include/asm-generic/linkage.h8
-rw-r--r--include/asm-generic/local.h2
-rw-r--r--include/asm-generic/mman-common.h41
-rw-r--r--include/asm-generic/mman.h51
-rw-r--r--include/asm-generic/mmu.h15
-rw-r--r--include/asm-generic/mmu_context.h45
-rw-r--r--include/asm-generic/module.h22
-rw-r--r--include/asm-generic/msgbuf.h47
-rw-r--r--include/asm-generic/mutex.h9
-rw-r--r--include/asm-generic/page.h109
-rw-r--r--include/asm-generic/param.h24
-rw-r--r--include/asm-generic/parport.h23
-rw-r--r--include/asm-generic/pci.h8
-rw-r--r--include/asm-generic/pgalloc.h12
-rw-r--r--include/asm-generic/pgtable.h21
-rw-r--r--include/asm-generic/posix_types.h165
-rw-r--r--include/asm-generic/rtc.h2
-rw-r--r--include/asm-generic/scatterlist.h43
-rw-r--r--include/asm-generic/segment.h9
-rw-r--r--include/asm-generic/sembuf.h38
-rw-r--r--include/asm-generic/serial.h13
-rw-r--r--include/asm-generic/setup.h6
-rw-r--r--include/asm-generic/shmbuf.h59
-rw-r--r--include/asm-generic/shmparam.h6
-rw-r--r--include/asm-generic/signal-defs.h28
-rw-r--r--include/asm-generic/signal.h137
-rw-r--r--include/asm-generic/socket.h63
-rw-r--r--include/asm-generic/sockios.h13
-rw-r--r--include/asm-generic/spinlock.h11
-rw-r--r--include/asm-generic/stat.h72
-rw-r--r--include/asm-generic/string.h10
-rw-r--r--include/asm-generic/swab.h18
-rw-r--r--include/asm-generic/syscalls.h60
-rw-r--r--include/asm-generic/system.h161
-rw-r--r--include/asm-generic/termbits.h198
-rw-r--r--include/asm-generic/termios-base.h77
-rw-r--r--include/asm-generic/termios.h105
-rw-r--r--include/asm-generic/timex.h22
-rw-r--r--include/asm-generic/tlbflush.h18
-rw-r--r--include/asm-generic/types.h42
-rw-r--r--include/asm-generic/uaccess-unaligned.h26
-rw-r--r--include/asm-generic/uaccess.h333
-rw-r--r--include/asm-generic/ucontext.h12
-rw-r--r--include/asm-generic/unaligned.h30
-rw-r--r--include/asm-generic/unistd.h854
-rw-r--r--include/asm-generic/user.h8
-rw-r--r--include/asm-generic/vga.h24
-rw-r--r--include/asm-generic/vmlinux.lds.h243
-rw-r--r--include/drm/drmP.h134
-rw-r--r--include/drm/drm_crtc.h3
-rw-r--r--include/drm/drm_crtc_helper.h2
-rw-r--r--include/drm/drm_hashtab.h2
-rw-r--r--include/drm/drm_mm.h90
-rw-r--r--include/drm/drm_pciids.h9
-rw-r--r--include/linux/Kbuild2
-rw-r--r--include/linux/acpi.h2
-rw-r--r--include/linux/amba/bus.h2
-rw-r--r--include/linux/amba/pl022.h264
-rw-r--r--include/linux/amba/serial.h4
-rw-r--r--include/linux/atmel-mci.h2
-rw-r--r--include/linux/auto_fs.h3
-rw-r--r--include/linux/bio.h10
-rw-r--r--include/linux/blkdev.h247
-rw-r--r--include/linux/blktrace_api.h45
-rw-r--r--include/linux/cb710.h231
-rw-r--r--include/linux/cdev.h2
-rw-r--r--include/linux/clk.h13
-rw-r--r--include/linux/clocksource.h10
-rw-r--r--include/linux/compat.h2
-rw-r--r--include/linux/compiler.h5
-rw-r--r--include/linux/cpumask.h15
-rw-r--r--include/linux/cramfs_fs.h3
-rw-r--r--include/linux/cred.h1
-rw-r--r--include/linux/cyclades.h37
-rw-r--r--include/linux/dcache.h11
-rw-r--r--include/linux/device-mapper.h2
-rw-r--r--include/linux/device.h5
-rw-r--r--include/linux/dlm.h4
-rw-r--r--include/linux/dma-debug.h7
-rw-r--r--include/linux/dmar.h3
-rw-r--r--include/linux/dnotify.h29
-rw-r--r--include/linux/elevator.h4
-rw-r--r--include/linux/fs.h29
-rw-r--r--include/linux/fsnotify.h199
-rw-r--r--include/linux/fsnotify_backend.h387
-rw-r--r--include/linux/ftrace.h17
-rw-r--r--include/linux/ftrace_event.h172
-rw-r--r--include/linux/fuse.h31
-rw-r--r--include/linux/futex.h6
-rw-r--r--include/linux/genhd.h2
-rw-r--r--include/linux/gfp.h3
-rw-r--r--include/linux/hid.h36
-rw-r--r--include/linux/i2c-ocores.h2
-rw-r--r--include/linux/i7300_idle.h20
-rw-r--r--include/linux/ide.h75
-rw-r--r--include/linux/if_ether.h1
-rw-r--r--include/linux/ima.h11
-rw-r--r--include/linux/init.h7
-rw-r--r--include/linux/init_task.h15
-rw-r--r--include/linux/input.h1
-rw-r--r--include/linux/interrupt.h8
-rw-r--r--include/linux/iocontext.h6
-rw-r--r--include/linux/irq.h74
-rw-r--r--include/linux/kernel_stat.h5
-rw-r--r--include/linux/keyboard.h1
-rw-r--r--include/linux/kmemleak.h96
-rw-r--r--include/linux/kmemtrace.h25
-rw-r--r--include/linux/kvm.h46
-rw-r--r--include/linux/kvm_host.h21
-rw-r--r--include/linux/kvm_types.h27
-rw-r--r--include/linux/lguest.h4
-rw-r--r--include/linux/lguest_launcher.h3
-rw-r--r--include/linux/loop.h3
-rw-r--r--include/linux/lsm_audit.h111
-rw-r--r--include/linux/magic.h3
-rw-r--r--include/linux/mfd/tmio.h7
-rw-r--r--include/linux/mg_disk.h206
-rw-r--r--include/linux/mlx4/device.h1
-rw-r--r--include/linux/mlx4/qp.h1
-rw-r--r--include/linux/mm.h11
-rw-r--r--include/linux/mmiotrace.h2
-rw-r--r--include/linux/mmzone.h26
-rw-r--r--include/linux/module.h9
-rw-r--r--include/linux/moduleparam.h40
-rw-r--r--include/linux/mount.h25
-rw-r--r--include/linux/mutex.h1
-rw-r--r--include/linux/namei.h5
-rw-r--r--include/linux/net_dropmon.h1
-rw-r--r--include/linux/nfsd/export.h6
-rw-r--r--include/linux/page_cgroup.h18
-rw-r--r--include/linux/parport.h4
-rw-r--r--include/linux/pci_ids.h17
-rw-r--r--include/linux/percpu.h5
-rw-r--r--include/linux/perf_counter.h709
-rw-r--r--include/linux/pipe_fs_i.h1
-rw-r--r--include/linux/pm.h11
-rw-r--r--include/linux/pnp.h2
-rw-r--r--include/linux/prctl.h3
-rw-r--r--include/linux/proc_fs.h24
-rw-r--r--include/linux/ptrace.h10
-rw-r--r--include/linux/qnx4_fs.h61
-rw-r--r--include/linux/quotaops.h20
-rw-r--r--include/linux/rational.h19
-rw-r--r--include/linux/rculist.h30
-rw-r--r--include/linux/rcutree.h9
-rw-r--r--include/linux/reiserfs_fs_sb.h2
-rw-r--r--include/linux/reiserfs_xattr.h4
-rw-r--r--include/linux/ring_buffer.h66
-rw-r--r--include/linux/sched.h79
-rw-r--r--include/linux/section-names.h6
-rw-r--r--include/linux/security.h2
-rw-r--r--include/linux/serial.h116
-rw-r--r--include/linux/serial_core.h6
-rw-r--r--include/linux/serial_sci.h3
-rw-r--r--include/linux/sh_cmt.h13
-rw-r--r--include/linux/sh_timer.h13
-rw-r--r--include/linux/signal.h2
-rw-r--r--include/linux/slab.h4
-rw-r--r--include/linux/slab_def.h2
-rw-r--r--include/linux/slob_def.h5
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--include/linux/spinlock_up.h1
-rw-r--r--include/linux/splice.h3
-rw-r--r--include/linux/suspend.h18
-rw-r--r--include/linux/swap.h5
-rw-r--r--include/linux/swiotlb.h3
-rw-r--r--include/linux/syscalls.h5
-rw-r--r--include/linux/thread_info.h3
-rw-r--r--include/linux/time.h15
-rw-r--r--include/linux/trace_seq.h92
-rw-r--r--include/linux/tracehook.h11
-rw-r--r--include/linux/tracepoint.h8
-rw-r--r--include/linux/tty.h18
-rw-r--r--include/linux/tty_driver.h6
-rw-r--r--include/linux/ultrasound.h2
-rw-r--r--include/linux/usb/serial.h10
-rw-r--r--include/linux/virtio.h15
-rw-r--r--include/linux/virtio_blk.h12
-rw-r--r--include/linux/virtio_config.h49
-rw-r--r--include/linux/virtio_pci.h10
-rw-r--r--include/linux/virtio_ring.h8
-rw-r--r--include/linux/wait.h2
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/scsi/fc/fc_fip.h7
-rw-r--r--include/scsi/iscsi_if.h49
-rw-r--r--include/scsi/libfc.h1
-rw-r--r--include/scsi/libiscsi.h8
-rw-r--r--include/scsi/osd_attributes.h74
-rw-r--r--include/scsi/osd_initiator.h14
-rw-r--r--include/scsi/osd_protocol.h8
-rw-r--r--include/scsi/scsi_cmnd.h2
-rw-r--r--include/scsi/scsi_transport_fc.h4
-rw-r--r--include/scsi/scsi_transport_iscsi.h8
-rw-r--r--include/sound/asound.h1
-rw-r--r--include/sound/core.h11
-rw-r--r--include/sound/driver.h1
-rw-r--r--include/sound/pcm.h76
-rw-r--r--include/sound/soc-dai.h30
-rw-r--r--include/sound/soc-dapm.h24
-rw-r--r--include/sound/soc.h34
-rw-r--r--include/sound/wm9081.h25
-rw-r--r--include/trace/block.h76
-rw-r--r--include/trace/define_trace.h75
-rw-r--r--include/trace/events/block.h493
-rw-r--r--include/trace/events/irq.h145
-rw-r--r--include/trace/events/kmem.h231
-rw-r--r--include/trace/events/lockdep.h96
-rw-r--r--include/trace/events/napi.h (renamed from include/trace/napi.h)0
-rw-r--r--include/trace/events/sched.h (renamed from include/trace/sched_event_types.h)29
-rw-r--r--include/trace/events/skb.h40
-rw-r--r--include/trace/events/workqueue.h100
-rw-r--r--include/trace/ftrace.h591
-rw-r--r--include/trace/irq.h9
-rw-r--r--include/trace/irq_event_types.h55
-rw-r--r--include/trace/kmemtrace.h63
-rw-r--r--include/trace/lockdep.h9
-rw-r--r--include/trace/lockdep_event_types.h44
-rw-r--r--include/trace/sched.h9
-rw-r--r--include/trace/skb.h11
-rw-r--r--include/trace/trace_event_types.h5
-rw-r--r--include/trace/trace_events.h5
-rw-r--r--include/trace/workqueue.h25
-rw-r--r--include/video/pxa168fb.h127
-rw-r--r--include/xen/Kbuild1
-rw-r--r--include/xen/events.h3
-rw-r--r--include/xen/evtchn.h88
-rw-r--r--include/xen/interface/version.h3
-rw-r--r--include/xen/xenbus.h3
256 files changed, 10381 insertions, 1716 deletions
diff --git a/include/Kbuild b/include/Kbuild
index d8c3e3c..fe36acc 100644
--- a/include/Kbuild
+++ b/include/Kbuild
@@ -8,3 +8,4 @@ header-y += mtd/
header-y += rdma/
header-y += video/
header-y += drm/
+header-y += xen/
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 4c9932a..eb62334 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -1,11 +1,33 @@
+header-y += auxvec.h
+header-y += bitsperlong.h
header-y += errno-base.h
header-y += errno.h
header-y += fcntl.h
header-y += ioctl.h
+header-y += ioctls.h
+header-y += ipcbuf.h
+header-y += mman-common.h
header-y += mman.h
+header-y += msgbuf.h
+header-y += param.h
header-y += poll.h
+header-y += posix_types.h
+header-y += sembuf.h
+header-y += setup.h
+header-y += shmbuf.h
+header-y += shmparam.h
+header-y += signal-defs.h
header-y += signal.h
+header-y += socket.h
+header-y += sockios.h
+header-y += stat.h
header-y += statfs.h
+header-y += swab.h
+header-y += termbits.h
+header-y += termios.h
+header-y += types.h
+header-y += ucontext.h
+header-y += unistd.h
unifdef-y += int-l64.h
unifdef-y += int-ll64.h
diff --git a/include/asm-generic/Kbuild.asm b/include/asm-generic/Kbuild.asm
index 70d1855..290910e 100644
--- a/include/asm-generic/Kbuild.asm
+++ b/include/asm-generic/Kbuild.asm
@@ -9,6 +9,7 @@ unifdef-y += a.out.h
endif
unifdef-y += auxvec.h
unifdef-y += byteorder.h
+unifdef-y += bitsperlong.h
unifdef-y += errno.h
unifdef-y += fcntl.h
unifdef-y += ioctl.h
diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
new file mode 100644
index 0000000..b7babf0
--- /dev/null
+++ b/include/asm-generic/atomic-long.h
@@ -0,0 +1,258 @@
+#ifndef _ASM_GENERIC_ATOMIC_LONG_H
+#define _ASM_GENERIC_ATOMIC_LONG_H
+/*
+ * Copyright (C) 2005 Silicon Graphics, Inc.
+ * Christoph Lameter
+ *
+ * Allows to provide arch independent atomic definitions without the need to
+ * edit all arch specific atomic.h files.
+ */
+
+#include <asm/types.h>
+
+/*
+ * Suppport for atomic_long_t
+ *
+ * Casts for parameters are avoided for existing atomic functions in order to
+ * avoid issues with cast-as-lval under gcc 4.x and other limitations that the
+ * macros of a platform may have.
+ */
+
+#if BITS_PER_LONG == 64
+
+typedef atomic64_t atomic_long_t;
+
+#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+
+static inline long atomic_long_read(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_read(v);
+}
+
+static inline void atomic_long_set(atomic_long_t *l, long i)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_set(v, i);
+}
+
+static inline void atomic_long_inc(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_inc(v);
+}
+
+static inline void atomic_long_dec(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_dec(v);
+}
+
+static inline void atomic_long_add(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_add(i, v);
+}
+
+static inline void atomic_long_sub(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ atomic64_sub(i, v);
+}
+
+static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_sub_and_test(i, v);
+}
+
+static inline int atomic_long_dec_and_test(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_dec_and_test(v);
+}
+
+static inline int atomic_long_inc_and_test(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_inc_and_test(v);
+}
+
+static inline int atomic_long_add_negative(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return atomic64_add_negative(i, v);
+}
+
+static inline long atomic_long_add_return(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_add_return(i, v);
+}
+
+static inline long atomic_long_sub_return(long i, atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_sub_return(i, v);
+}
+
+static inline long atomic_long_inc_return(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_inc_return(v);
+}
+
+static inline long atomic_long_dec_return(atomic_long_t *l)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_dec_return(v);
+}
+
+static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+{
+ atomic64_t *v = (atomic64_t *)l;
+
+ return (long)atomic64_add_unless(v, a, u);
+}
+
+#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
+
+#define atomic_long_cmpxchg(l, old, new) \
+ (atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
+#define atomic_long_xchg(v, new) \
+ (atomic64_xchg((atomic64_t *)(v), (new)))
+
+#else /* BITS_PER_LONG == 64 */
+
+typedef atomic_t atomic_long_t;
+
+#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
+static inline long atomic_long_read(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_read(v);
+}
+
+static inline void atomic_long_set(atomic_long_t *l, long i)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_set(v, i);
+}
+
+static inline void atomic_long_inc(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_inc(v);
+}
+
+static inline void atomic_long_dec(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_dec(v);
+}
+
+static inline void atomic_long_add(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_add(i, v);
+}
+
+static inline void atomic_long_sub(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ atomic_sub(i, v);
+}
+
+static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_sub_and_test(i, v);
+}
+
+static inline int atomic_long_dec_and_test(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_dec_and_test(v);
+}
+
+static inline int atomic_long_inc_and_test(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_inc_and_test(v);
+}
+
+static inline int atomic_long_add_negative(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return atomic_add_negative(i, v);
+}
+
+static inline long atomic_long_add_return(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_add_return(i, v);
+}
+
+static inline long atomic_long_sub_return(long i, atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_sub_return(i, v);
+}
+
+static inline long atomic_long_inc_return(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_inc_return(v);
+}
+
+static inline long atomic_long_dec_return(atomic_long_t *l)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_dec_return(v);
+}
+
+static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+{
+ atomic_t *v = (atomic_t *)l;
+
+ return (long)atomic_add_unless(v, a, u);
+}
+
+#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
+
+#define atomic_long_cmpxchg(l, old, new) \
+ (atomic_cmpxchg((atomic_t *)(l), (old), (new)))
+#define atomic_long_xchg(v, new) \
+ (atomic_xchg((atomic_t *)(v), (new)))
+
+#endif /* BITS_PER_LONG == 64 */
+
+#endif /* _ASM_GENERIC_ATOMIC_LONG_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index 3673a13..c99c64d 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -1,258 +1,165 @@
-#ifndef _ASM_GENERIC_ATOMIC_H
-#define _ASM_GENERIC_ATOMIC_H
/*
- * Copyright (C) 2005 Silicon Graphics, Inc.
- * Christoph Lameter
+ * Generic C implementation of atomic counter operations
+ * Originally implemented for MN10300.
*
- * Allows to provide arch independent atomic definitions without the need to
- * edit all arch specific atomic.h files.
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
*/
+#ifndef __ASM_GENERIC_ATOMIC_H
+#define __ASM_GENERIC_ATOMIC_H
-#include <asm/types.h>
+#ifdef CONFIG_SMP
+#error not SMP safe
+#endif
/*
- * Suppport for atomic_long_t
- *
- * Casts for parameters are avoided for existing atomic functions in order to
- * avoid issues with cast-as-lval under gcc 4.x and other limitations that the
- * macros of a platform may have.
+ * Atomic operations that C can't guarantee us. Useful for
+ * resource counting etc..
*/
-#if BITS_PER_LONG == 64
-
-typedef atomic64_t atomic_long_t;
-
-#define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
+#define ATOMIC_INIT(i) { (i) }
-static inline long atomic_long_read(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_read(v);
-}
-
-static inline void atomic_long_set(atomic_long_t *l, long i)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_set(v, i);
-}
-
-static inline void atomic_long_inc(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_inc(v);
-}
-
-static inline void atomic_long_dec(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_dec(v);
-}
-
-static inline void atomic_long_add(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_add(i, v);
-}
-
-static inline void atomic_long_sub(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- atomic64_sub(i, v);
-}
-
-static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return atomic64_sub_and_test(i, v);
-}
-
-static inline int atomic_long_dec_and_test(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return atomic64_dec_and_test(v);
-}
-
-static inline int atomic_long_inc_and_test(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return atomic64_inc_and_test(v);
-}
-
-static inline int atomic_long_add_negative(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return atomic64_add_negative(i, v);
-}
-
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_add_return(i, v);
-}
-
-static inline long atomic_long_sub_return(long i, atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
+#ifdef __KERNEL__
- return (long)atomic64_sub_return(i, v);
-}
-
-static inline long atomic_long_inc_return(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_inc_return(v);
-}
-
-static inline long atomic_long_dec_return(atomic_long_t *l)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_dec_return(v);
-}
-
-static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
-{
- atomic64_t *v = (atomic64_t *)l;
-
- return (long)atomic64_add_unless(v, a, u);
-}
-
-#define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
-
-#define atomic_long_cmpxchg(l, old, new) \
- (atomic64_cmpxchg((atomic64_t *)(l), (old), (new)))
-#define atomic_long_xchg(v, new) \
- (atomic64_xchg((atomic64_t *)(l), (new)))
-
-#else /* BITS_PER_LONG == 64 */
-
-typedef atomic_t atomic_long_t;
-
-#define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
-static inline long atomic_long_read(atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
-
- return (long)atomic_read(v);
-}
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_read(v) ((v)->counter)
-static inline void atomic_long_set(atomic_long_t *l, long i)
-{
- atomic_t *v = (atomic_t *)l;
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i. Note that the guaranteed
+ * useful range of an atomic_t is only 24 bits.
+ */
+#define atomic_set(v, i) (((v)->counter) = (i))
- atomic_set(v, i);
-}
+#include <asm/system.h>
-static inline void atomic_long_inc(atomic_long_t *l)
+/**
+ * atomic_add_return - add integer to atomic variable
+ * @i: integer value to add
+ * @v: pointer of type atomic_t
+ *
+ * Atomically adds @i to @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_add_return(int i, atomic_t *v)
{
- atomic_t *v = (atomic_t *)l;
-
- atomic_inc(v);
-}
+ unsigned long flags;
+ int temp;
-static inline void atomic_long_dec(atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
+ local_irq_save(flags);
+ temp = v->counter;
+ temp += i;
+ v->counter = temp;
+ local_irq_restore(flags);
- atomic_dec(v);
+ return temp;
}
-static inline void atomic_long_add(long i, atomic_long_t *l)
+/**
+ * atomic_sub_return - subtract integer from atomic variable
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns the result
+ * Note that the guaranteed useful range of an atomic_t is only 24 bits.
+ */
+static inline int atomic_sub_return(int i, atomic_t *v)
{
- atomic_t *v = (atomic_t *)l;
-
- atomic_add(i, v);
-}
+ unsigned long flags;
+ int temp;
-static inline void atomic_long_sub(long i, atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
+ local_irq_save(flags);
+ temp = v->counter;
+ temp -= i;
+ v->counter = temp;
+ local_irq_restore(flags);
- atomic_sub(i, v);
+ return temp;
}
-static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
+static inline int atomic_add_negative(int i, atomic_t *v)
{
- atomic_t *v = (atomic_t *)l;
-
- return atomic_sub_and_test(i, v);
+ return atomic_add_return(i, v) < 0;
}
-static inline int atomic_long_dec_and_test(atomic_long_t *l)
+static inline void atomic_add(int i, atomic_t *v)
{
- atomic_t *v = (atomic_t *)l;
-
- return atomic_dec_and_test(v);
+ atomic_add_return(i, v);
}
-static inline int atomic_long_inc_and_test(atomic_long_t *l)
+static inline void atomic_sub(int i, atomic_t *v)
{
- atomic_t *v = (atomic_t *)l;
-
- return atomic_inc_and_test(v);
+ atomic_sub_return(i, v);
}
-static inline int atomic_long_add_negative(long i, atomic_long_t *l)
+static inline void atomic_inc(atomic_t *v)
{
- atomic_t *v = (atomic_t *)l;
-
- return atomic_add_negative(i, v);
+ atomic_add_return(1, v);
}
-static inline long atomic_long_add_return(long i, atomic_long_t *l)
+static inline void atomic_dec(atomic_t *v)
{
- atomic_t *v = (atomic_t *)l;
-
- return (long)atomic_add_return(i, v);
+ atomic_sub_return(1, v);
}
-static inline long atomic_long_sub_return(long i, atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
- return (long)atomic_sub_return(i, v);
-}
+#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_dec_and_test(v) (atomic_sub_return(1, (v)) == 0)
+#define atomic_inc_and_test(v) (atomic_add_return(1, (v)) == 0)
-static inline long atomic_long_inc_return(atomic_long_t *l)
-{
- atomic_t *v = (atomic_t *)l;
+#define atomic_add_unless(v, a, u) \
+({ \
+ int c, old; \
+ c = atomic_read(v); \
+ while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
+ c = old; \
+ c != (u); \
+})
- return (long)atomic_inc_return(v);
-}
+#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-static inline long atomic_long_dec_return(atomic_long_t *l)
+static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
{
- atomic_t *v = (atomic_t *)l;
+ unsigned long flags;
- return (long)atomic_dec_return(v);
+ mask = ~mask;
+ local_irq_save(flags);
+ *addr &= mask;
+ local_irq_restore(flags);
}
-static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
-{
- atomic_t *v = (atomic_t *)l;
+#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
+#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
- return (long)atomic_add_unless(v, a, u);
-}
+#define cmpxchg_local(ptr, o, n) \
+ ((__typeof__(*(ptr)))__cmpxchg_local_generic((ptr), (unsigned long)(o),\
+ (unsigned long)(n), sizeof(*(ptr))))
-#define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
+#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
-#define atomic_long_cmpxchg(l, old, new) \
- (atomic_cmpxchg((atomic_t *)(l), (old), (new)))
-#define atomic_long_xchg(v, new) \
- (atomic_xchg((atomic_t *)(v), (new)))
+/* Assume that atomic operations are already serializing */
+#define smp_mb__before_atomic_dec() barrier()
+#define smp_mb__after_atomic_dec() barrier()
+#define smp_mb__before_atomic_inc() barrier()
+#define smp_mb__after_atomic_inc() barrier()
-#endif /* BITS_PER_LONG == 64 */
+#include <asm-generic/atomic-long.h>
-#endif /* _ASM_GENERIC_ATOMIC_H */
+#endif /* __KERNEL__ */
+#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/auxvec.h b/include/asm-generic/auxvec.h
new file mode 100644
index 0000000..b99573b
--- /dev/null
+++ b/include/asm-generic/auxvec.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_GENERIC_AUXVEC_H
+#define __ASM_GENERIC_AUXVEC_H
+/*
+ * Not all architectures need their own auxvec.h, the most
+ * common definitions are already in linux/auxvec.h.
+ */
+
+#endif /* __ASM_GENERIC_AUXVEC_H */
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
index c9f369c..a54f442 100644
--- a/include/asm-generic/bitops.h
+++ b/include/asm-generic/bitops.h
@@ -1,19 +1,29 @@
-#ifndef _ASM_GENERIC_BITOPS_H_
-#define _ASM_GENERIC_BITOPS_H_
+#ifndef __ASM_GENERIC_BITOPS_H
+#define __ASM_GENERIC_BITOPS_H
/*
* For the benefit of those who are trying to port Linux to another
* architecture, here are some C-language equivalents. You should
* recode these in the native assembly language, if at all possible.
- *
+ *
* C language equivalents written by Theodore Ts'o, 9/26/92
*/
-#include <asm-generic/bitops/atomic.h>
-#include <asm-generic/bitops/non-atomic.h>
+#include <linux/irqflags.h>
+#include <linux/compiler.h>
+
+/*
+ * clear_bit may not imply a memory barrier
+ */
+#ifndef smp_mb__before_clear_bit
+#define smp_mb__before_clear_bit() smp_mb()
+#define smp_mb__after_clear_bit() smp_mb()
+#endif
+
#include <asm-generic/bitops/__ffs.h>
#include <asm-generic/bitops/ffz.h>
#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
#include <asm-generic/bitops/fls64.h>
#include <asm-generic/bitops/find.h>
@@ -26,8 +36,10 @@
#include <asm-generic/bitops/hweight.h>
#include <asm-generic/bitops/lock.h>
+#include <asm-generic/bitops/atomic.h>
+#include <asm-generic/bitops/non-atomic.h>
#include <asm-generic/bitops/ext2-non-atomic.h>
#include <asm-generic/bitops/ext2-atomic.h>
#include <asm-generic/bitops/minix.h>
-#endif /* _ASM_GENERIC_BITOPS_H */
+#endif /* __ASM_GENERIC_BITOPS_H */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 4657f3e..c894646 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -2,6 +2,7 @@
#define _ASM_GENERIC_BITOPS_ATOMIC_H_
#include <asm/types.h>
+#include <asm/system.h>
#ifdef CONFIG_SMP
#include <asm/spinlock.h>
diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h
new file mode 100644
index 0000000..4ae54e0
--- /dev/null
+++ b/include/asm-generic/bitsperlong.h
@@ -0,0 +1,32 @@
+#ifndef __ASM_GENERIC_BITS_PER_LONG
+#define __ASM_GENERIC_BITS_PER_LONG
+
+/*
+ * There seems to be no way of detecting this automatically from user
+ * space, so 64 bit architectures should override this in their
+ * bitsperlong.h. In particular, an architecture that supports
+ * both 32 and 64 bit user space must not rely on CONFIG_64BIT
+ * to decide it, but rather check a compiler provided macro.
+ */
+#ifndef __BITS_PER_LONG
+#define __BITS_PER_LONG 32
+#endif
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_64BIT
+#define BITS_PER_LONG 64
+#else
+#define BITS_PER_LONG 32
+#endif /* CONFIG_64BIT */
+
+/*
+ * FIXME: The check currently breaks x86-64 build, so it's
+ * temporarily disabled. Please fix x86-64 and reenable
+ */
+#if 0 && BITS_PER_LONG != __BITS_PER_LONG
+#error Inconsistent word size. Check asm/bitsperlong.h
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_GENERIC_BITS_PER_LONG */
diff --git a/include/asm-generic/bugs.h b/include/asm-generic/bugs.h
new file mode 100644
index 0000000..6c4f62e
--- /dev/null
+++ b/include/asm-generic/bugs.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_GENERIC_BUGS_H
+#define __ASM_GENERIC_BUGS_H
+/*
+ * This file is included by 'init/main.c' to check for
+ * architecture-dependent bugs.
+ */
+
+static inline void check_bugs(void) { }
+
+#endif /* __ASM_GENERIC_BUGS_H */
diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
new file mode 100644
index 0000000..1bfcfe5
--- /dev/null
+++ b/include/asm-generic/cache.h
@@ -0,0 +1,12 @@
+#ifndef __ASM_GENERIC_CACHE_H
+#define __ASM_GENERIC_CACHE_H
+/*
+ * 32 bytes appears to be the most common cache line size,
+ * so make that the default here. Architectures with larger
+ * cache lines need to provide their own cache.h.
+ */
+
+#define L1_CACHE_SHIFT 5
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#endif /* __ASM_GENERIC_CACHE_H */
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
new file mode 100644
index 0000000..ba4ec39
--- /dev/null
+++ b/include/asm-generic/cacheflush.h
@@ -0,0 +1,30 @@
+#ifndef __ASM_CACHEFLUSH_H
+#define __ASM_CACHEFLUSH_H
+
+/* Keep includes the same across arches. */
+#include <linux/mm.h>
+
+/*
+ * The cache doesn't need to be flushed when TLB entries change when
+ * the cache is mapped to physical memory, not virtual memory
+ */
+#define flush_cache_all() do { } while (0)
+#define flush_cache_mm(mm) do { } while (0)
+#define flush_cache_dup_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
+#define flush_dcache_page(page) do { } while (0)
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+#define flush_icache_range(start, end) do { } while (0)
+#define flush_icache_page(vma,pg) do { } while (0)
+#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
+#define flush_cache_vmap(start, end) do { } while (0)
+#define flush_cache_vunmap(start, end) do { } while (0)
+
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
+ memcpy(dst, src, len)
+
+#endif /* __ASM_CACHEFLUSH_H */
diff --git a/include/asm-generic/checksum.h b/include/asm-generic/checksum.h
new file mode 100644
index 0000000..4647c76
--- /dev/null
+++ b/include/asm-generic/checksum.h
@@ -0,0 +1,79 @@
+#ifndef __ASM_GENERIC_CHECKSUM_H
+#define __ASM_GENERIC_CHECKSUM_H
+
+/*
+ * computes the checksum of a memory block at buff, length len,
+ * and adds in "sum" (32-bit)
+ *
+ * returns a 32-bit number suitable for feeding into itself
+ * or csum_tcpudp_magic
+ *
+ * this function must be called with even lengths, except
+ * for the last fragment, which may be odd
+ *
+ * it's best to have buff aligned on a 32-bit boundary
+ */
+extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+
+/*
+ * the same as csum_partial, but copies from src while it
+ * checksums
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy(const void *src, void *dst, int len, __wsum sum);
+
+/*
+ * the same as csum_partial_copy, but copies from user space.
+ *
+ * here even more important to align src and dst on a 32-bit (or even
+ * better 64-bit) boundary
+ */
+extern __wsum csum_partial_copy_from_user(const void __user *src, void *dst,
+ int len, __wsum sum, int *csum_err);
+
+#define csum_partial_copy_nocheck(src, dst, len, sum) \
+ csum_partial_copy((src), (dst), (len), (sum))
+
+/*
+ * This is a version of ip_compute_csum() optimized for IP headers,
+ * which always checksum on 4 octet boundaries.
+ */
+extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+
+/*
+ * Fold a partial checksum
+ */
+static inline __sum16 csum_fold(__wsum csum)
+{
+ u32 sum = (__force u32)csum;
+ sum = (sum & 0xffff) + (sum >> 16);
+ sum = (sum & 0xffff) + (sum >> 16);
+ return (__force __sum16)~sum;
+}
+
+#ifndef csum_tcpudp_nofold
+/*
+ * computes the checksum of the TCP/UDP pseudo-header
+ * returns a 16-bit checksum, already complemented
+ */
+extern __wsum
+csum_tcpudp_nofold(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum);
+#endif
+
+static inline __sum16
+csum_tcpudp_magic(__be32 saddr, __be32 daddr, unsigned short len,
+ unsigned short proto, __wsum sum)
+{
+ return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
+}
+
+/*
+ * this routine is used for miscellaneous IP-like checksums, mainly
+ * in icmp.c
+ */
+extern __sum16 ip_compute_csum(const void *buff, int len);
+
+#endif /* __ASM_GENERIC_CHECKSUM_H */
diff --git a/include/asm-generic/current.h b/include/asm-generic/current.h
new file mode 100644
index 0000000..5e86f6a
--- /dev/null
+++ b/include/asm-generic/current.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_GENERIC_CURRENT_H
+#define __ASM_GENERIC_CURRENT_H
+
+#include <linux/thread_info.h>
+
+#define get_current() (current_thread_info()->task)
+#define current get_current()
+
+#endif /* __ASM_GENERIC_CURRENT_H */
diff --git a/include/asm-generic/delay.h b/include/asm-generic/delay.h
new file mode 100644
index 0000000..4586fec
--- /dev/null
+++ b/include/asm-generic/delay.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_GENERIC_DELAY_H
+#define __ASM_GENERIC_DELAY_H
+
+extern void __udelay(unsigned long usecs);
+extern void __delay(unsigned long loops);
+
+#define udelay(n) __udelay(n)
+
+#endif /* __ASM_GENERIC_DELAY_H */
diff --git a/include/asm-generic/dma.h b/include/asm-generic/dma.h
new file mode 100644
index 0000000..9dfc3a7
--- /dev/null
+++ b/include/asm-generic/dma.h
@@ -0,0 +1,15 @@
+#ifndef __ASM_GENERIC_DMA_H
+#define __ASM_GENERIC_DMA_H
+/*
+ * This file traditionally describes the i8237 PC style DMA controller.
+ * Most architectures don't have these any more and can get the minimal
+ * implementation from kernel/dma.c by not defining MAX_DMA_CHANNELS.
+ *
+ * Some code relies on seeing MAX_DMA_ADDRESS though.
+ */
+#define MAX_DMA_ADDRESS PAGE_OFFSET
+
+extern int request_dma(unsigned int dmanr, const char *device_id);
+extern void free_dma(unsigned int dmanr);
+
+#endif /* __ASM_GENERIC_DMA_H */
diff --git a/include/asm-generic/fb.h b/include/asm-generic/fb.h
new file mode 100644
index 0000000..fe8ca7f
--- /dev/null
+++ b/include/asm-generic/fb.h
@@ -0,0 +1,12 @@
+#ifndef __ASM_GENERIC_FB_H_
+#define __ASM_GENERIC_FB_H_
+#include <linux/fb.h>
+
+#define fb_pgprotect(...) do {} while (0)
+
+static inline int fb_is_primary_device(struct fb_info *info)
+{
+ return 0;
+}
+
+#endif /* __ASM_GENERIC_FB_H_ */
diff --git a/include/asm-generic/getorder.h b/include/asm-generic/getorder.h
new file mode 100644
index 0000000..67e7245
--- /dev/null
+++ b/include/asm-generic/getorder.h
@@ -0,0 +1,24 @@
+#ifndef __ASM_GENERIC_GETORDER_H
+#define __ASM_GENERIC_GETORDER_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+
+/* Pure 2^n version of get_order */
+static inline __attribute_const__ int get_order(unsigned long size)
+{
+ int order;
+
+ size = (size - 1) >> (PAGE_SHIFT - 1);
+ order = -1;
+ do {
+ size >>= 1;
+ order++;
+ } while (size);
+ return order;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_GENERIC_GETORDER_H */
diff --git a/include/asm-generic/hardirq.h b/include/asm-generic/hardirq.h
new file mode 100644
index 0000000..3d5d2c9
--- /dev/null
+++ b/include/asm-generic/hardirq.h
@@ -0,0 +1,34 @@
+#ifndef __ASM_GENERIC_HARDIRQ_H
+#define __ASM_GENERIC_HARDIRQ_H
+
+#include <linux/cache.h>
+#include <linux/threads.h>
+#include <linux/irq.h>
+
+typedef struct {
+ unsigned long __softirq_pending;
+} ____cacheline_aligned irq_cpustat_t;
+
+#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
+
+#ifndef HARDIRQ_BITS
+#define HARDIRQ_BITS 8
+#endif
+
+/*
+ * The hardirq mask has to be large enough to have
+ * space for potentially all IRQ sources in the system
+ * nesting on a single CPU:
+ */
+#if (1 << HARDIRQ_BITS) < NR_IRQS
+# error HARDIRQ_BITS is too low!
+#endif
+
+#ifndef ack_bad_irq
+static inline void ack_bad_irq(unsigned int irq)
+{
+ printk(KERN_CRIT "unexpected IRQ trap at vector %02x\n", irq);
+}
+#endif
+
+#endif /* __ASM_GENERIC_HARDIRQ_H */
diff --git a/include/asm-generic/hw_irq.h b/include/asm-generic/hw_irq.h
new file mode 100644
index 0000000..89036d7
--- /dev/null
+++ b/include/asm-generic/hw_irq.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_GENERIC_HW_IRQ_H
+#define __ASM_GENERIC_HW_IRQ_H
+/*
+ * hw_irq.h has internal declarations for the low-level interrupt
+ * controller, like the original i8259A.
+ * In general, this is not needed for new architectures.
+ */
+
+#endif /* __ASM_GENERIC_HW_IRQ_H */
diff --git a/include/asm-generic/int-l64.h b/include/asm-generic/int-l64.h
index 2af9b75..1ca3efc 100644
--- a/include/asm-generic/int-l64.h
+++ b/include/asm-generic/int-l64.h
@@ -8,6 +8,8 @@
#ifndef _ASM_GENERIC_INT_L64_H
#define _ASM_GENERIC_INT_L64_H
+#include <asm/bitsperlong.h>
+
#ifndef __ASSEMBLY__
/*
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
diff --git a/include/asm-generic/int-ll64.h b/include/asm-generic/int-ll64.h
index f9bc9ac..f394147 100644
--- a/include/asm-generic/int-ll64.h
+++ b/include/asm-generic/int-ll64.h
@@ -8,6 +8,8 @@
#ifndef _ASM_GENERIC_INT_LL64_H
#define _ASM_GENERIC_INT_LL64_H
+#include <asm/bitsperlong.h>
+
#ifndef __ASSEMBLY__
/*
* __xx is ok: it doesn't pollute the POSIX namespace. Use these in the
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
new file mode 100644
index 0000000..bcee636
--- /dev/null
+++ b/include/asm-generic/io.h
@@ -0,0 +1,300 @@
+/* Generic I/O port emulation, based on MN10300 code
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef __ASM_GENERIC_IO_H
+#define __ASM_GENERIC_IO_H
+
+#include <asm/page.h> /* I/O is all done through memory accesses */
+#include <asm/cacheflush.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_GENERIC_IOMAP
+#include <asm-generic/iomap.h>
+#endif
+
+#define mmiowb() do {} while (0)
+
+/*****************************************************************************/
+/*
+ * readX/writeX() are used to access memory mapped devices. On some
+ * architectures the memory mapped IO stuff needs to be accessed
+ * differently. On the simple architectures, we just read/write the
+ * memory location directly.
+ */
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ return *(const volatile u8 __force *) addr;
+}
+
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ return *(const volatile u16 __force *) addr;
+}
+
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ return *(const volatile u32 __force *) addr;
+}
+
+#define readb __raw_readb
+#define readw(addr) __le16_to_cpu(__raw_readw(addr))
+#define readl(addr) __le32_to_cpu(__raw_readl(addr))
+
+static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
+{
+ *(volatile u8 __force *) addr = b;
+}
+
+static inline void __raw_writew(u16 b, volatile void __iomem *addr)
+{
+ *(volatile u16 __force *) addr = b;
+}
+
+static inline void __raw_writel(u32 b, volatile void __iomem *addr)
+{
+ *(volatile u32 __force *) addr = b;
+}
+
+#define writeb __raw_writeb
+#define writew(b,addr) __raw_writew(__cpu_to_le16(b),addr)
+#define writel(b,addr) __raw_writel(__cpu_to_le32(b),addr)
+
+#ifdef CONFIG_64BIT
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+ return *(const volatile u64 __force *) addr;
+}
+#define readq(addr) __le64_to_cpu(__raw_readq(addr))
+
+static inline void __raw_writeq(u64 b, volatile void __iomem *addr)
+{
+ *(volatile u64 __force *) addr = b;
+}
+#define writeq(b,addr) __raw_writeq(__cpu_to_le64(b),addr)
+#endif
+
+/*****************************************************************************/
+/*
+ * traditional input/output functions
+ */
+
+static inline u8 inb(unsigned long addr)
+{
+ return readb((volatile void __iomem *) addr);
+}
+
+static inline u16 inw(unsigned long addr)
+{
+ return readw((volatile void __iomem *) addr);
+}
+
+static inline u32 inl(unsigned long addr)
+{
+ return readl((volatile void __iomem *) addr);
+}
+
+static inline void outb(u8 b, unsigned long addr)
+{
+ writeb(b, (volatile void __iomem *) addr);
+}
+
+static inline void outw(u16 b, unsigned long addr)
+{
+ writew(b, (volatile void __iomem *) addr);
+}
+
+static inline void outl(u32 b, unsigned long addr)
+{
+ writel(b, (volatile void __iomem *) addr);
+}
+
+#define inb_p(addr) inb(addr)
+#define inw_p(addr) inw(addr)
+#define inl_p(addr) inl(addr)
+#define outb_p(x, addr) outb((x), (addr))
+#define outw_p(x, addr) outw((x), (addr))
+#define outl_p(x, addr) outl((x), (addr))
+
+static inline void insb(unsigned long addr, void *buffer, int count)
+{
+ if (count) {
+ u8 *buf = buffer;
+ do {
+ u8 x = inb(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+
+static inline void insw(unsigned long addr, void *buffer, int count)
+{
+ if (count) {
+ u16 *buf = buffer;
+ do {
+ u16 x = inw(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+
+static inline void insl(unsigned long addr, void *buffer, int count)
+{
+ if (count) {
+ u32 *buf = buffer;
+ do {
+ u32 x = inl(addr);
+ *buf++ = x;
+ } while (--count);
+ }
+}
+
+static inline void outsb(unsigned long addr, const void *buffer, int count)
+{
+ if (count) {
+ const u8 *buf = buffer;
+ do {
+ outb(*buf++, addr);
+ } while (--count);
+ }
+}
+
+static inline void outsw(unsigned long addr, const void *buffer, int count)
+{
+ if (count) {
+ const u16 *buf = buffer;
+ do {
+ outw(*buf++, addr);
+ } while (--count);
+ }
+}
+
+static inline void outsl(unsigned long addr, const void *buffer, int count)
+{
+ if (count) {
+ const u32 *buf = buffer;
+ do {
+ outl(*buf++, addr);
+ } while (--count);
+ }
+}
+
+#ifndef CONFIG_GENERIC_IOMAP
+#define ioread8(addr) readb(addr)
+#define ioread16(addr) readw(addr)
+#define ioread32(addr) readl(addr)
+
+#define iowrite8(v, addr) writeb((v), (addr))
+#define iowrite16(v, addr) writew((v), (addr))
+#define iowrite32(v, addr) writel((v), (addr))
+
+#define ioread8_rep(p, dst, count) \
+ insb((unsigned long) (p), (dst), (count))
+#define ioread16_rep(p, dst, count) \
+ insw((unsigned long) (p), (dst), (count))
+#define ioread32_rep(p, dst, count) \
+ insl((unsigned long) (p), (dst), (count))
+
+#define iowrite8_rep(p, src, count) \
+ outsb((unsigned long) (p), (src), (count))
+#define iowrite16_rep(p, src, count) \
+ outsw((unsigned long) (p), (src), (count))
+#define iowrite32_rep(p, src, count) \
+ outsl((unsigned long) (p), (src), (count))
+#endif /* CONFIG_GENERIC_IOMAP */
+
+
+#define IO_SPACE_LIMIT 0xffffffff
+
+#ifdef __KERNEL__
+
+#include <linux/vmalloc.h>
+#define __io_virt(x) ((void __force *) (x))
+
+#ifndef CONFIG_GENERIC_IOMAP
+/* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
+struct pci_dev;
+extern void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long max);
+static inline void pci_iounmap(struct pci_dev *dev, void __iomem *p)
+{
+}
+#endif /* CONFIG_GENERIC_IOMAP */
+
+/*
+ * Change virtual addresses to physical addresses and vv.
+ * These are pretty trivial
+ */
+static inline unsigned long virt_to_phys(volatile void *address)
+{
+ return __pa((unsigned long)address);
+}
+
+static inline void *phys_to_virt(unsigned long address)
+{
+ return __va(address);
+}
+
+/*
+ * Change "struct page" to physical address.
+ */
+static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
+{
+ return (void __iomem*) (unsigned long)offset;
+}
+
+#define __ioremap(offset, size, flags) ioremap(offset, size)
+
+#ifndef ioremap_nocache
+#define ioremap_nocache ioremap
+#endif
+
+#ifndef ioremap_wc
+#define ioremap_wc ioremap_nocache
+#endif
+
+static inline void iounmap(void *addr)
+{
+}
+
+#ifndef CONFIG_GENERIC_IOMAP
+static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
+{
+ return (void __iomem *) port;
+}
+
+static inline void ioport_unmap(void __iomem *p)
+{
+}
+#else /* CONFIG_GENERIC_IOMAP */
+extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
+extern void ioport_unmap(void __iomem *p);
+#endif /* CONFIG_GENERIC_IOMAP */
+
+#define xlate_dev_kmem_ptr(p) p
+#define xlate_dev_mem_ptr(p) ((void *) (p))
+
+#ifndef virt_to_bus
+static inline unsigned long virt_to_bus(volatile void *address)
+{
+ return ((unsigned long) address);
+}
+
+static inline void *bus_to_virt(unsigned long address)
+{
+ return (void *) address;
+}
+#endif
+
+#define memset_io(a, b, c) memset(__io_virt(a), (b), (c))
+#define memcpy_fromio(a, b, c) memcpy((a), __io_virt(b), (c))
+#define memcpy_toio(a, b, c) memcpy(__io_virt(a), (b), (c))
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_GENERIC_IO_H */
diff --git a/include/asm-generic/ioctls.h b/include/asm-generic/ioctls.h
new file mode 100644
index 0000000..a799e20
--- /dev/null
+++ b/include/asm-generic/ioctls.h
@@ -0,0 +1,110 @@
+#ifndef __ASM_GENERIC_IOCTLS_H
+#define __ASM_GENERIC_IOCTLS_H
+
+#include <linux/ioctl.h>
+
+/*
+ * These are the most common definitions for tty ioctl numbers.
+ * Most of them do not use the recommended _IOC(), but there is
+ * probably some source code out there hardcoding the number,
+ * so we might as well use them for all new platforms.
+ *
+ * The architectures that use different values here typically
+ * try to be compatible with some Unix variants for the same
+ * architecture.
+ */
+
+/* 0x54 is just a magic number to make these relatively unique ('T') */
+
+#define TCGETS 0x5401
+#define TCSETS 0x5402
+#define TCSETSW 0x5403
+#define TCSETSF 0x5404
+#define TCGETA 0x5405
+#define TCSETA 0x5406
+#define TCSETAW 0x5407
+#define TCSETAF 0x5408
+#define TCSBRK 0x5409
+#define TCXONC 0x540A
+#define TCFLSH 0x540B
+#define TIOCEXCL 0x540C
+#define TIOCNXCL 0x540D
+#define TIOCSCTTY 0x540E
+#define TIOCGPGRP 0x540F
+#define TIOCSPGRP 0x5410
+#define TIOCOUTQ 0x5411
+#define TIOCSTI 0x5412
+#define TIOCGWINSZ 0x5413
+#define TIOCSWINSZ 0x5414
+#define TIOCMGET 0x5415
+#define TIOCMBIS 0x5416
+#define TIOCMBIC 0x5417
+#define TIOCMSET 0x5418
+#define TIOCGSOFTCAR 0x5419
+#define TIOCSSOFTCAR 0x541A
+#define FIONREAD 0x541B
+#define TIOCINQ FIONREAD
+#define TIOCLINUX 0x541C
+#define TIOCCONS 0x541D
+#define TIOCGSERIAL 0x541E
+#define TIOCSSERIAL 0x541F
+#define TIOCPKT 0x5420
+#define FIONBIO 0x5421
+#define TIOCNOTTY 0x5422
+#define TIOCSETD 0x5423
+#define TIOCGETD 0x5424
+#define TCSBRKP 0x5425 /* Needed for POSIX tcsendbreak() */
+#define TIOCSBRK 0x5427 /* BSD compatibility */
+#define TIOCCBRK 0x5428 /* BSD compatibility */
+#define TIOCGSID 0x5429 /* Return the session ID of FD */
+#define TCGETS2 _IOR('T', 0x2A, struct termios2)
+#define TCSETS2 _IOW('T', 0x2B, struct termios2)
+#define TCSETSW2 _IOW('T', 0x2C, struct termios2)
+#define TCSETSF2 _IOW('T', 0x2D, struct termios2)
+#define TIOCGRS485 0x542E
+#define TIOCSRS485 0x542F
+#define TIOCGPTN _IOR('T', 0x30, unsigned int) /* Get Pty Number (of pty-mux device) */
+#define TIOCSPTLCK _IOW('T', 0x31, int) /* Lock/unlock Pty */
+#define TCGETX 0x5432 /* SYS5 TCGETX compatibility */
+#define TCSETX 0x5433
+#define TCSETXF 0x5434
+#define TCSETXW 0x5435
+
+#define FIONCLEX 0x5450
+#define FIOCLEX 0x5451
+#define FIOASYNC 0x5452
+#define TIOCSERCONFIG 0x5453
+#define TIOCSERGWILD 0x5454
+#define TIOCSERSWILD 0x5455
+#define TIOCGLCKTRMIOS 0x5456
+#define TIOCSLCKTRMIOS 0x5457
+#define TIOCSERGSTRUCT 0x5458 /* For debugging only */
+#define TIOCSERGETLSR 0x5459 /* Get line status register */
+#define TIOCSERGETMULTI 0x545A /* Get multiport config */
+#define TIOCSERSETMULTI 0x545B /* Set multiport config */
+
+#define TIOCMIWAIT 0x545C /* wait for a change on serial input line(s) */
+#define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */
+
+/*
+ * some architectures define FIOQSIZE as 0x545E, which is used for
+ * TIOCGHAYESESP on others
+ */
+#ifndef FIOQSIZE
+# define TIOCGHAYESESP 0x545E /* Get Hayes ESP configuration */
+# define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
+# define FIOQSIZE 0x5460
+#endif
+
+/* Used for packet mode */
+#define TIOCPKT_DATA 0
+#define TIOCPKT_FLUSHREAD 1
+#define TIOCPKT_FLUSHWRITE 2
+#define TIOCPKT_STOP 4
+#define TIOCPKT_START 8
+#define TIOCPKT_NOSTOP 16
+#define TIOCPKT_DOSTOP 32
+
+#define TIOCSER_TEMT 0x01 /* Transmitter physically empty */
+
+#endif /* __ASM_GENERIC_IOCTLS_H */
diff --git a/include/asm-generic/ipcbuf.h b/include/asm-generic/ipcbuf.h
new file mode 100644
index 0000000..76982b2
--- /dev/null
+++ b/include/asm-generic/ipcbuf.h
@@ -0,0 +1,34 @@
+#ifndef __ASM_GENERIC_IPCBUF_H
+#define __ASM_GENERIC_IPCBUF_H
+
+/*
+ * The generic ipc64_perm structure:
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * ipc64_perm was originally meant to be architecture specific, but
+ * everyone just ended up making identical copies without specific
+ * optimizations, so we may just as well all use the same one.
+ *
+ * Pad space is left for:
+ * - 32-bit mode_t on architectures that only had 16 bit
+ * - 32-bit seq
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct ipc64_perm {
+ __kernel_key_t key;
+ __kernel_uid32_t uid;
+ __kernel_gid32_t gid;
+ __kernel_uid32_t cuid;
+ __kernel_gid32_t cgid;
+ __kernel_mode_t mode;
+ /* pad if mode_t is u16: */
+ unsigned char __pad1[4 - sizeof(__kernel_mode_t)];
+ unsigned short seq;
+ unsigned short __pad2;
+ unsigned long __unused1;
+ unsigned long __unused2;
+};
+
+#endif /* __ASM_GENERIC_IPCBUF_H */
diff --git a/include/asm-generic/irq.h b/include/asm-generic/irq.h
new file mode 100644
index 0000000..b90ec0b
--- /dev/null
+++ b/include/asm-generic/irq.h
@@ -0,0 +1,18 @@
+#ifndef __ASM_GENERIC_IRQ_H
+#define __ASM_GENERIC_IRQ_H
+
+/*
+ * NR_IRQS is the upper bound of how many interrupts can be handled
+ * in the platform. It is used to size the static irq_map array,
+ * so don't make it too big.
+ */
+#ifndef NR_IRQS
+#define NR_IRQS 64
+#endif
+
+static inline int irq_canonicalize(int irq)
+{
+ return irq;
+}
+
+#endif /* __ASM_GENERIC_IRQ_H */
diff --git a/include/asm-generic/irqflags.h b/include/asm-generic/irqflags.h
new file mode 100644
index 0000000..9aebf61
--- /dev/null
+++ b/include/asm-generic/irqflags.h
@@ -0,0 +1,72 @@
+#ifndef __ASM_GENERIC_IRQFLAGS_H
+#define __ASM_GENERIC_IRQFLAGS_H
+
+/*
+ * All architectures should implement at least the first two functions,
+ * usually inline assembly will be the best way.
+ */
+#ifndef RAW_IRQ_DISABLED
+#define RAW_IRQ_DISABLED 0
+#define RAW_IRQ_ENABLED 1
+#endif
+
+/* read interrupt enabled status */
+#ifndef __raw_local_save_flags
+unsigned long __raw_local_save_flags(void);
+#endif
+
+/* set interrupt enabled status */
+#ifndef raw_local_irq_restore
+void raw_local_irq_restore(unsigned long flags);
+#endif
+
+/* get status and disable interrupts */
+#ifndef __raw_local_irq_save
+static inline unsigned long __raw_local_irq_save(void)
+{
+ unsigned long flags;
+ flags = __raw_local_save_flags();
+ raw_local_irq_restore(RAW_IRQ_DISABLED);
+ return flags;
+}
+#endif
+
+/* test flags */
+#ifndef raw_irqs_disabled_flags
+static inline int raw_irqs_disabled_flags(unsigned long flags)
+{
+ return flags == RAW_IRQ_DISABLED;
+}
+#endif
+
+/* unconditionally enable interrupts */
+#ifndef raw_local_irq_enable
+static inline void raw_local_irq_enable(void)
+{
+ raw_local_irq_restore(RAW_IRQ_ENABLED);
+}
+#endif
+
+/* unconditionally disable interrupts */
+#ifndef raw_local_irq_disable
+static inline void raw_local_irq_disable(void)
+{
+ raw_local_irq_restore(RAW_IRQ_DISABLED);
+}
+#endif
+
+/* test hardware interrupt enable bit */
+#ifndef raw_irqs_disabled
+static inline int raw_irqs_disabled(void)
+{
+ return raw_irqs_disabled_flags(__raw_local_save_flags());
+}
+#endif
+
+#define raw_local_save_flags(flags) \
+ do { (flags) = __raw_local_save_flags(); } while (0)
+
+#define raw_local_irq_save(flags) \
+ do { (flags) = __raw_local_irq_save(); } while (0)
+
+#endif /* __ASM_GENERIC_IRQFLAGS_H */
diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
new file mode 100644
index 0000000..58c3305
--- /dev/null
+++ b/include/asm-generic/kmap_types.h
@@ -0,0 +1,32 @@
+#ifndef _ASM_GENERIC_KMAP_TYPES_H
+#define _ASM_GENERIC_KMAP_TYPES_H
+
+#ifdef CONFIG_DEBUG_HIGHMEM
+# define D(n) __KM_FENCE_##n ,
+#else
+# define D(n)
+#endif
+
+enum km_type {
+D(0) KM_BOUNCE_READ,
+D(1) KM_SKB_SUNRPC_DATA,
+D(2) KM_SKB_DATA_SOFTIRQ,
+D(3) KM_USER0,
+D(4) KM_USER1,
+D(5) KM_BIO_SRC_IRQ,
+D(6) KM_BIO_DST_IRQ,
+D(7) KM_PTE0,
+D(8) KM_PTE1,
+D(9) KM_IRQ0,
+D(10) KM_IRQ1,
+D(11) KM_SOFTIRQ0,
+D(12) KM_SOFTIRQ1,
+D(13) KM_SYNC_ICACHE,
+D(14) KM_SYNC_DCACHE,
+D(15) KM_UML_USERCOPY, /* UML specific, for copy_*_user - used in do_op_one_page */
+D(16) KM_TYPE_NR
+};
+
+#undef D
+
+#endif
diff --git a/include/asm-generic/linkage.h b/include/asm-generic/linkage.h
new file mode 100644
index 0000000..fef7a01
--- /dev/null
+++ b/include/asm-generic/linkage.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_GENERIC_LINKAGE_H
+#define __ASM_GENERIC_LINKAGE_H
+/*
+ * linux/linkage.h provides reasonable defaults.
+ * an architecture can override them by providing its own version.
+ */
+
+#endif /* __ASM_GENERIC_LINKAGE_H */
diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
index dbd6150..fc21844 100644
--- a/include/asm-generic/local.h
+++ b/include/asm-generic/local.h
@@ -42,7 +42,7 @@ typedef struct
#define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
#define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
-#define local_add_unless(l, a, u) atomic_long_add_unless((&(l)->a), (a), (u))
+#define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
#define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
/* Non-atomic variants, ie. preemption disabled and won't be touched
diff --git a/include/asm-generic/mman-common.h b/include/asm-generic/mman-common.h
new file mode 100644
index 0000000..3b69ad3
--- /dev/null
+++ b/include/asm-generic/mman-common.h
@@ -0,0 +1,41 @@
+#ifndef __ASM_GENERIC_MMAN_COMMON_H
+#define __ASM_GENERIC_MMAN_COMMON_H
+
+/*
+ Author: Michael S. Tsirkin <mst@mellanox.co.il>, Mellanox Technologies Ltd.
+ Based on: asm-xxx/mman.h
+*/
+
+#define PROT_READ 0x1 /* page can be read */
+#define PROT_WRITE 0x2 /* page can be written */
+#define PROT_EXEC 0x4 /* page can be executed */
+#define PROT_SEM 0x8 /* page may be used for atomic ops */
+#define PROT_NONE 0x0 /* page can not be accessed */
+#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
+#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
+
+#define MAP_SHARED 0x01 /* Share changes */
+#define MAP_PRIVATE 0x02 /* Changes are private */
+#define MAP_TYPE 0x0f /* Mask for type of mapping */
+#define MAP_FIXED 0x10 /* Interpret addr exactly */
+#define MAP_ANONYMOUS 0x20 /* don't use a file */
+
+#define MS_ASYNC 1 /* sync memory asynchronously */
+#define MS_INVALIDATE 2 /* invalidate the caches */
+#define MS_SYNC 4 /* synchronous memory sync */
+
+#define MADV_NORMAL 0 /* no further special treatment */
+#define MADV_RANDOM 1 /* expect random page references */
+#define MADV_SEQUENTIAL 2 /* expect sequential page references */
+#define MADV_WILLNEED 3 /* will need these pages */
+#define MADV_DONTNEED 4 /* don't need these pages */
+
+/* common parameters: try to keep these consistent across architectures */
+#define MADV_REMOVE 9 /* remove these pages & resources */
+#define MADV_DONTFORK 10 /* don't inherit across fork */
+#define MADV_DOFORK 11 /* do inherit across fork */
+
+/* compatibility flags */
+#define MAP_FILE 0
+
+#endif /* __ASM_GENERIC_MMAN_COMMON_H */
diff --git a/include/asm-generic/mman.h b/include/asm-generic/mman.h
index 5e3dde2..7cab4de 100644
--- a/include/asm-generic/mman.h
+++ b/include/asm-generic/mman.h
@@ -1,41 +1,18 @@
-#ifndef _ASM_GENERIC_MMAN_H
-#define _ASM_GENERIC_MMAN_H
+#ifndef __ASM_GENERIC_MMAN_H
+#define __ASM_GENERIC_MMAN_H
-/*
- Author: Michael S. Tsirkin <mst@mellanox.co.il>, Mellanox Technologies Ltd.
- Based on: asm-xxx/mman.h
-*/
+#include <asm-generic/mman-common.h>
-#define PROT_READ 0x1 /* page can be read */
-#define PROT_WRITE 0x2 /* page can be written */
-#define PROT_EXEC 0x4 /* page can be executed */
-#define PROT_SEM 0x8 /* page may be used for atomic ops */
-#define PROT_NONE 0x0 /* page can not be accessed */
-#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
-#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
+#define MAP_GROWSDOWN 0x0100 /* stack-like segment */
+#define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+#define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+#define MAP_LOCKED 0x2000 /* pages are locked */
+#define MAP_NORESERVE 0x4000 /* don't check for reservations */
+#define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
+#define MAP_NONBLOCK 0x10000 /* do not block on IO */
+#define MAP_STACK 0x20000 /* give out an address that is best suited for process/thread stacks */
-#define MAP_SHARED 0x01 /* Share changes */
-#define MAP_PRIVATE 0x02 /* Changes are private */
-#define MAP_TYPE 0x0f /* Mask for type of mapping */
-#define MAP_FIXED 0x10 /* Interpret addr exactly */
-#define MAP_ANONYMOUS 0x20 /* don't use a file */
+#define MCL_CURRENT 1 /* lock all current mappings */
+#define MCL_FUTURE 2 /* lock all future mappings */
-#define MS_ASYNC 1 /* sync memory asynchronously */
-#define MS_INVALIDATE 2 /* invalidate the caches */
-#define MS_SYNC 4 /* synchronous memory sync */
-
-#define MADV_NORMAL 0 /* no further special treatment */
-#define MADV_RANDOM 1 /* expect random page references */
-#define MADV_SEQUENTIAL 2 /* expect sequential page references */
-#define MADV_WILLNEED 3 /* will need these pages */
-#define MADV_DONTNEED 4 /* don't need these pages */
-
-/* common parameters: try to keep these consistent across architectures */
-#define MADV_REMOVE 9 /* remove these pages & resources */
-#define MADV_DONTFORK 10 /* don't inherit across fork */
-#define MADV_DOFORK 11 /* do inherit across fork */
-
-/* compatibility flags */
-#define MAP_FILE 0
-
-#endif
+#endif /* __ASM_GENERIC_MMAN_H */
diff --git a/include/asm-generic/mmu.h b/include/asm-generic/mmu.h
new file mode 100644
index 0000000..4f4aa56
--- /dev/null
+++ b/include/asm-generic/mmu.h
@@ -0,0 +1,15 @@
+#ifndef __ASM_GENERIC_MMU_H
+#define __ASM_GENERIC_MMU_H
+
+/*
+ * This is the mmu.h header for nommu implementations.
+ * Architectures with an MMU need something more complex.
+ */
+#ifndef __ASSEMBLY__
+typedef struct {
+ struct vm_list_struct *vmlist;
+ unsigned long end_brk;
+} mm_context_t;
+#endif
+
+#endif /* __ASM_GENERIC_MMU_H */
diff --git a/include/asm-generic/mmu_context.h b/include/asm-generic/mmu_context.h
new file mode 100644
index 0000000..a7eec91
--- /dev/null
+++ b/include/asm-generic/mmu_context.h
@@ -0,0 +1,45 @@
+#ifndef __ASM_GENERIC_MMU_CONTEXT_H
+#define __ASM_GENERIC_MMU_CONTEXT_H
+
+/*
+ * Generic hooks for NOMMU architectures, which do not need to do
+ * anything special here.
+ */
+
+#include <asm-generic/mm_hooks.h>
+
+struct task_struct;
+struct mm_struct;
+
+static inline void enter_lazy_tlb(struct mm_struct *mm,
+ struct task_struct *tsk)
+{
+}
+
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+}
+
+static inline void deactivate_mm(struct task_struct *task,
+ struct mm_struct *mm)
+{
+}
+
+static inline void switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+}
+
+static inline void activate_mm(struct mm_struct *prev_mm,
+ struct mm_struct *next_mm)
+{
+}
+
+#endif /* __ASM_GENERIC_MMU_CONTEXT_H */
diff --git a/include/asm-generic/module.h b/include/asm-generic/module.h
new file mode 100644
index 0000000..ed5b44d
--- /dev/null
+++ b/include/asm-generic/module.h
@@ -0,0 +1,22 @@
+#ifndef __ASM_GENERIC_MODULE_H
+#define __ASM_GENERIC_MODULE_H
+
+/*
+ * Many architectures just need a simple module
+ * loader without arch specific data.
+ */
+struct mod_arch_specific
+{
+};
+
+#ifdef CONFIG_64BIT
+#define Elf_Shdr Elf64_Shdr
+#define Elf_Sym Elf64_Sym
+#define Elf_Ehdr Elf64_Ehdr
+#else
+#define Elf_Shdr Elf32_Shdr
+#define Elf_Sym Elf32_Sym
+#define Elf_Ehdr Elf32_Ehdr
+#endif
+
+#endif /* __ASM_GENERIC_MODULE_H */
diff --git a/include/asm-generic/msgbuf.h b/include/asm-generic/msgbuf.h
new file mode 100644
index 0000000..aec850d
--- /dev/null
+++ b/include/asm-generic/msgbuf.h
@@ -0,0 +1,47 @@
+#ifndef __ASM_GENERIC_MSGBUF_H
+#define __ASM_GENERIC_MSGBUF_H
+
+#include <asm/bitsperlong.h>
+/*
+ * generic msqid64_ds structure.
+ *
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * msqid64_ds was originally meant to be architecture specific, but
+ * everyone just ended up making identical copies without specific
+ * optimizations, so we may just as well all use the same one.
+ *
+ * 64 bit architectures typically define a 64 bit __kernel_time_t,
+ * so they do not need the first three padding words.
+ * On big-endian systems, the padding is in the wrong place.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct msqid64_ds {
+ struct ipc64_perm msg_perm;
+ __kernel_time_t msg_stime; /* last msgsnd time */
+#if __BITS_PER_LONG != 64
+ unsigned long __unused1;
+#endif
+ __kernel_time_t msg_rtime; /* last msgrcv time */
+#if __BITS_PER_LONG != 64
+ unsigned long __unused2;
+#endif
+ __kernel_time_t msg_ctime; /* last change time */
+#if __BITS_PER_LONG != 64
+ unsigned long __unused3;
+#endif
+ unsigned long msg_cbytes; /* current number of bytes on queue */
+ unsigned long msg_qnum; /* number of messages in queue */
+ unsigned long msg_qbytes; /* max number of bytes on queue */
+ __kernel_pid_t msg_lspid; /* pid of last msgsnd */
+ __kernel_pid_t msg_lrpid; /* last receive pid */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+#endif /* __ASM_GENERIC_MSGBUF_H */
diff --git a/include/asm-generic/mutex.h b/include/asm-generic/mutex.h
new file mode 100644
index 0000000..fe91ab5
--- /dev/null
+++ b/include/asm-generic/mutex.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_GENERIC_MUTEX_H
+#define __ASM_GENERIC_MUTEX_H
+/*
+ * Pull in the generic implementation for the mutex fastpath,
+ * which is a reasonable default on many architectures.
+ */
+
+#include <asm-generic/mutex-dec.h>
+#endif /* __ASM_GENERIC_MUTEX_H */
diff --git a/include/asm-generic/page.h b/include/asm-generic/page.h
index 14db733..75fec18 100644
--- a/include/asm-generic/page.h
+++ b/include/asm-generic/page.h
@@ -1,24 +1,99 @@
-#ifndef _ASM_GENERIC_PAGE_H
-#define _ASM_GENERIC_PAGE_H
+#ifndef __ASM_GENERIC_PAGE_H
+#define __ASM_GENERIC_PAGE_H
+/*
+ * Generic page.h implementation, for NOMMU architectures.
+ * This provides the dummy definitions for the memory management.
+ */
+
+#ifdef CONFIG_MMU
+#error need to prove a real asm/page.h
+#endif
+
+
+/* PAGE_SHIFT determines the page size */
+
+#define PAGE_SHIFT 12
+#ifdef __ASSEMBLY__
+#define PAGE_SIZE (1 << PAGE_SHIFT)
+#else
+#define PAGE_SIZE (1UL << PAGE_SHIFT)
+#endif
+#define PAGE_MASK (~(PAGE_SIZE-1))
+
+#include <asm/setup.h>
+
+#ifndef __ASSEMBLY__
+
+#define get_user_page(vaddr) __get_free_page(GFP_KERNEL)
+#define free_user_page(page, addr) free_page(addr)
+
+#define clear_page(page) memset((page), 0, PAGE_SIZE)
+#define copy_page(to,from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(page, vaddr, pg) clear_page(page)
+#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
+
+/*
+ * These are used to make use of C type-checking..
+ */
+typedef struct {
+ unsigned long pte;
+} pte_t;
+typedef struct {
+ unsigned long pmd[16];
+} pmd_t;
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pmd_val(x) ((&x)->pmd[0])
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) } )
+#define __pmd(x) ((pmd_t) { (x) } )
+#define __pgd(x) ((pgd_t) { (x) } )
+#define __pgprot(x) ((pgprot_t) { (x) } )
+
+extern unsigned long memory_start;
+extern unsigned long memory_end;
+
+#endif /* !__ASSEMBLY__ */
+
+#ifdef CONFIG_KERNEL_RAM_BASE_ADDRESS
+#define PAGE_OFFSET (CONFIG_KERNEL_RAM_BASE_ADDRESS)
+#else
+#define PAGE_OFFSET (0)
+#endif
#ifndef __ASSEMBLY__
-#include <linux/compiler.h>
+#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET))
+#define __pa(x) ((unsigned long) (x) - PAGE_OFFSET)
+
+#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
+#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
+
+#define virt_to_page(addr) (mem_map + (((unsigned long)(addr)-PAGE_OFFSET) >> PAGE_SHIFT))
+#define page_to_virt(page) ((((page) - mem_map) << PAGE_SHIFT) + PAGE_OFFSET)
+
+#ifndef page_to_phys
+#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
+#endif
+
+#define pfn_valid(pfn) ((pfn) < max_mapnr)
-/* Pure 2^n version of get_order */
-static __inline__ __attribute_const__ int get_order(unsigned long size)
-{
- int order;
+#define virt_addr_valid(kaddr) (((void *)(kaddr) >= (void *)PAGE_OFFSET) && \
+ ((void *)(kaddr) < (void *)memory_end))
- size = (size - 1) >> (PAGE_SHIFT - 1);
- order = -1;
- do {
- size >>= 1;
- order++;
- } while (size);
- return order;
-}
+#endif /* __ASSEMBLY__ */
-#endif /* __ASSEMBLY__ */
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
-#endif /* _ASM_GENERIC_PAGE_H */
+#endif /* __ASM_GENERIC_PAGE_H */
diff --git a/include/asm-generic/param.h b/include/asm-generic/param.h
new file mode 100644
index 0000000..cdf8251
--- /dev/null
+++ b/include/asm-generic/param.h
@@ -0,0 +1,24 @@
+#ifndef __ASM_GENERIC_PARAM_H
+#define __ASM_GENERIC_PARAM_H
+
+#ifdef __KERNEL__
+# define HZ CONFIG_HZ /* Internal kernel timer frequency */
+# define USER_HZ 100 /* some user interfaces are */
+# define CLOCKS_PER_SEC (USER_HZ) /* in "ticks" like times() */
+#endif
+
+#ifndef HZ
+#define HZ 100
+#endif
+
+#ifndef EXEC_PAGESIZE
+#define EXEC_PAGESIZE 4096
+#endif
+
+#ifndef NOGROUP
+#define NOGROUP (-1)
+#endif
+
+#define MAXHOSTNAMELEN 64 /* max length of hostname */
+
+#endif /* __ASM_GENERIC_PARAM_H */
diff --git a/include/asm-generic/parport.h b/include/asm-generic/parport.h
new file mode 100644
index 0000000..40528cb
--- /dev/null
+++ b/include/asm-generic/parport.h
@@ -0,0 +1,23 @@
+#ifndef __ASM_GENERIC_PARPORT_H
+#define __ASM_GENERIC_PARPORT_H
+
+/*
+ * An ISA bus may have i8255 parallel ports at well-known
+ * locations in the I/O space, which are scanned by
+ * parport_pc_find_isa_ports.
+ *
+ * Without ISA support, the driver will only attach
+ * to devices on the PCI bus.
+ */
+
+static int __devinit parport_pc_find_isa_ports(int autoirq, int autodma);
+static int __devinit parport_pc_find_nonpci_ports(int autoirq, int autodma)
+{
+#ifdef CONFIG_ISA
+ return parport_pc_find_isa_ports(autoirq, autodma);
+#else
+ return 0;
+#endif
+}
+
+#endif /* __ASM_GENERIC_PARPORT_H */
diff --git a/include/asm-generic/pci.h b/include/asm-generic/pci.h
index c36a77d..515c6e2 100644
--- a/include/asm-generic/pci.h
+++ b/include/asm-generic/pci.h
@@ -52,4 +52,12 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
}
#endif /* HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ */
+/*
+ * By default, assume that no iommu is in use and that the PCI
+ * space is mapped to address physical 0.
+ */
+#ifndef PCI_DMA_BUS_IS_PHYS
+#define PCI_DMA_BUS_IS_PHYS (1)
#endif
+
+#endif /* _ASM_GENERIC_PCI_H */
diff --git a/include/asm-generic/pgalloc.h b/include/asm-generic/pgalloc.h
new file mode 100644
index 0000000..9e429d0
--- /dev/null
+++ b/include/asm-generic/pgalloc.h
@@ -0,0 +1,12 @@
+#ifndef __ASM_GENERIC_PGALLOC_H
+#define __ASM_GENERIC_PGALLOC_H
+/*
+ * an empty file is enough for a nommu architecture
+ */
+#ifdef CONFIG_MMU
+#error need to implement an architecture specific asm/pgalloc.h
+#endif
+
+#define check_pgt_cache() do { } while (0)
+
+#endif /* __ASM_GENERIC_PGALLOC_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 8e6d0ca..e410f60 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -280,17 +280,18 @@ static inline void ptep_modify_prot_commit(struct mm_struct *mm,
#endif
/*
- * A facility to provide batching of the reload of page tables with the
- * actual context switch code for paravirtualized guests. By convention,
- * only one of the lazy modes (CPU, MMU) should be active at any given
- * time, entry should never be nested, and entry and exits should always
- * be paired. This is for sanity of maintaining and reasoning about the
- * kernel code.
+ * A facility to provide batching of the reload of page tables and
+ * other process state with the actual context switch code for
+ * paravirtualized guests. By convention, only one of the batched
+ * update (lazy) modes (CPU, MMU) should be active at any given time,
+ * entry should never be nested, and entry and exits should always be
+ * paired. This is for sanity of maintaining and reasoning about the
+ * kernel code. In this case, the exit (end of the context switch) is
+ * in architecture-specific code, and so doesn't need a generic
+ * definition.
*/
-#ifndef __HAVE_ARCH_ENTER_LAZY_CPU_MODE
-#define arch_enter_lazy_cpu_mode() do {} while (0)
-#define arch_leave_lazy_cpu_mode() do {} while (0)
-#define arch_flush_lazy_cpu_mode() do {} while (0)
+#ifndef __HAVE_ARCH_START_CONTEXT_SWITCH
+#define arch_start_context_switch(prev) do {} while (0)
#endif
#ifndef __HAVE_PFNMAP_TRACKING
diff --git a/include/asm-generic/posix_types.h b/include/asm-generic/posix_types.h
new file mode 100644
index 0000000..3dab008
--- /dev/null
+++ b/include/asm-generic/posix_types.h
@@ -0,0 +1,165 @@
+#ifndef __ASM_GENERIC_POSIX_TYPES_H
+#define __ASM_GENERIC_POSIX_TYPES_H
+
+#include <asm/bitsperlong.h>
+/*
+ * This file is generally used by user-level software, so you need to
+ * be a little careful about namespace pollution etc.
+ *
+ * First the types that are often defined in different ways across
+ * architectures, so that you can override them.
+ */
+
+#ifndef __kernel_ino_t
+typedef unsigned long __kernel_ino_t;
+#endif
+
+#ifndef __kernel_mode_t
+typedef unsigned int __kernel_mode_t;
+#endif
+
+#ifndef __kernel_nlink_t
+typedef unsigned long __kernel_nlink_t;
+#endif
+
+#ifndef __kernel_pid_t
+typedef int __kernel_pid_t;
+#endif
+
+#ifndef __kernel_ipc_pid_t
+typedef int __kernel_ipc_pid_t;
+#endif
+
+#ifndef __kernel_uid_t
+typedef unsigned int __kernel_uid_t;
+typedef unsigned int __kernel_gid_t;
+#endif
+
+#ifndef __kernel_suseconds_t
+typedef long __kernel_suseconds_t;
+#endif
+
+#ifndef __kernel_daddr_t
+typedef int __kernel_daddr_t;
+#endif
+
+#ifndef __kernel_uid32_t
+typedef __kernel_uid_t __kernel_uid32_t;
+typedef __kernel_gid_t __kernel_gid32_t;
+#endif
+
+#ifndef __kernel_old_uid_t
+typedef __kernel_uid_t __kernel_old_uid_t;
+typedef __kernel_gid_t __kernel_old_gid_t;
+#endif
+
+#ifndef __kernel_old_dev_t
+typedef unsigned int __kernel_old_dev_t;
+#endif
+
+/*
+ * Most 32 bit architectures use "unsigned int" size_t,
+ * and all 64 bit architectures use "unsigned long" size_t.
+ */
+#ifndef __kernel_size_t
+#if __BITS_PER_LONG != 64
+typedef unsigned int __kernel_size_t;
+typedef int __kernel_ssize_t;
+typedef int __kernel_ptrdiff_t;
+#else
+typedef unsigned long __kernel_size_t;
+typedef long __kernel_ssize_t;
+typedef long __kernel_ptrdiff_t;
+#endif
+#endif
+
+/*
+ * anything below here should be completely generic
+ */
+typedef long __kernel_off_t;
+typedef long long __kernel_loff_t;
+typedef long __kernel_time_t;
+typedef long __kernel_clock_t;
+typedef int __kernel_timer_t;
+typedef int __kernel_clockid_t;
+typedef char * __kernel_caddr_t;
+typedef unsigned short __kernel_uid16_t;
+typedef unsigned short __kernel_gid16_t;
+
+typedef struct {
+ int val[2];
+} __kernel_fsid_t;
+
+#ifdef __KERNEL__
+
+#undef __FD_SET
+static inline void __FD_SET(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+ unsigned long __tmp = __fd / __NFDBITS;
+ unsigned long __rem = __fd % __NFDBITS;
+ __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
+}
+
+#undef __FD_CLR
+static inline void __FD_CLR(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+ unsigned long __tmp = __fd / __NFDBITS;
+ unsigned long __rem = __fd % __NFDBITS;
+ __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
+}
+
+#undef __FD_ISSET
+static inline int __FD_ISSET(unsigned long __fd, const __kernel_fd_set *__p)
+{
+ unsigned long __tmp = __fd / __NFDBITS;
+ unsigned long __rem = __fd % __NFDBITS;
+ return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
+}
+
+/*
+ * This will unroll the loop for the normal constant case (8 ints,
+ * for a 256-bit fd_set)
+ */
+#undef __FD_ZERO
+static inline void __FD_ZERO(__kernel_fd_set *__p)
+{
+ unsigned long *__tmp = __p->fds_bits;
+ int __i;
+
+ if (__builtin_constant_p(__FDSET_LONGS)) {
+ switch (__FDSET_LONGS) {
+ case 16:
+ __tmp[ 0] = 0; __tmp[ 1] = 0;
+ __tmp[ 2] = 0; __tmp[ 3] = 0;
+ __tmp[ 4] = 0; __tmp[ 5] = 0;
+ __tmp[ 6] = 0; __tmp[ 7] = 0;
+ __tmp[ 8] = 0; __tmp[ 9] = 0;
+ __tmp[10] = 0; __tmp[11] = 0;
+ __tmp[12] = 0; __tmp[13] = 0;
+ __tmp[14] = 0; __tmp[15] = 0;
+ return;
+
+ case 8:
+ __tmp[ 0] = 0; __tmp[ 1] = 0;
+ __tmp[ 2] = 0; __tmp[ 3] = 0;
+ __tmp[ 4] = 0; __tmp[ 5] = 0;
+ __tmp[ 6] = 0; __tmp[ 7] = 0;
+ return;
+
+ case 4:
+ __tmp[ 0] = 0; __tmp[ 1] = 0;
+ __tmp[ 2] = 0; __tmp[ 3] = 0;
+ return;
+ }
+ }
+ __i = __FDSET_LONGS;
+ while (__i) {
+ __i--;
+ *__tmp = 0;
+ __tmp++;
+ }
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __ASM_GENERIC_POSIX_TYPES_H */
diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h
index 763e3b0..fa86f24 100644
--- a/include/asm-generic/rtc.h
+++ b/include/asm-generic/rtc.h
@@ -202,7 +202,7 @@ static inline unsigned int get_rtc_ss(void)
{
struct rtc_time h;
- __get_rtc_time(&h);
+ get_rtc_time(&h);
return h.tm_sec;
}
diff --git a/include/asm-generic/scatterlist.h b/include/asm-generic/scatterlist.h
new file mode 100644
index 0000000..8b94544
--- /dev/null
+++ b/include/asm-generic/scatterlist.h
@@ -0,0 +1,43 @@
+#ifndef __ASM_GENERIC_SCATTERLIST_H
+#define __ASM_GENERIC_SCATTERLIST_H
+
+#include <linux/types.h>
+
+struct scatterlist {
+#ifdef CONFIG_DEBUG_SG
+ unsigned long sg_magic;
+#endif
+ unsigned long page_link;
+ unsigned int offset;
+ unsigned int length;
+ dma_addr_t dma_address;
+ unsigned int dma_length;
+};
+
+/*
+ * These macros should be used after a dma_map_sg call has been done
+ * to get bus addresses of each of the SG entries and their lengths.
+ * You should only work with the number of sg entries pci_map_sg
+ * returns, or alternatively stop on the first sg_dma_len(sg) which
+ * is 0.
+ */
+#define sg_dma_address(sg) ((sg)->dma_address)
+#ifndef sg_dma_len
+/*
+ * Normally, you have an iommu on 64 bit machines, but not on 32 bit
+ * machines. Architectures that are differnt should override this.
+ */
+#if __BITS_PER_LONG == 64
+#define sg_dma_len(sg) ((sg)->dma_length)
+#else
+#define sg_dma_len(sg) ((sg)->length)
+#endif /* 64 bit */
+#endif /* sg_dma_len */
+
+#ifndef ISA_DMA_THRESHOLD
+#define ISA_DMA_THRESHOLD (~0UL)
+#endif
+
+#define ARCH_HAS_SG_CHAIN
+
+#endif /* __ASM_GENERIC_SCATTERLIST_H */
diff --git a/include/asm-generic/segment.h b/include/asm-generic/segment.h
new file mode 100644
index 0000000..5580eac
--- /dev/null
+++ b/include/asm-generic/segment.h
@@ -0,0 +1,9 @@
+#ifndef __ASM_GENERIC_SEGMENT_H
+#define __ASM_GENERIC_SEGMENT_H
+/*
+ * Only here because we have some old header files that expect it...
+ *
+ * New architectures probably don't want to have their own version.
+ */
+
+#endif /* __ASM_GENERIC_SEGMENT_H */
diff --git a/include/asm-generic/sembuf.h b/include/asm-generic/sembuf.h
new file mode 100644
index 0000000..4cb2c13
--- /dev/null
+++ b/include/asm-generic/sembuf.h
@@ -0,0 +1,38 @@
+#ifndef __ASM_GENERIC_SEMBUF_H
+#define __ASM_GENERIC_SEMBUF_H
+
+#include <asm/bitsperlong.h>
+
+/*
+ * The semid64_ds structure for x86 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * semid64_ds was originally meant to be architecture specific, but
+ * everyone just ended up making identical copies without specific
+ * optimizations, so we may just as well all use the same one.
+ *
+ * 64 bit architectures typically define a 64 bit __kernel_time_t,
+ * so they do not need the first two padding words.
+ * On big-endian systems, the padding is in the wrong place.
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+struct semid64_ds {
+ struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
+ __kernel_time_t sem_otime; /* last semop time */
+#if __BITS_PER_LONG != 64
+ unsigned long __unused1;
+#endif
+ __kernel_time_t sem_ctime; /* last change time */
+#if __BITS_PER_LONG != 64
+ unsigned long __unused2;
+#endif
+ unsigned long sem_nsems; /* no. of semaphores in array */
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* __ASM_GENERIC_SEMBUF_H */
diff --git a/include/asm-generic/serial.h b/include/asm-generic/serial.h
new file mode 100644
index 0000000..5e29109
--- /dev/null
+++ b/include/asm-generic/serial.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_GENERIC_SERIAL_H
+#define __ASM_GENERIC_SERIAL_H
+
+/*
+ * This should not be an architecture specific #define, oh well.
+ *
+ * Traditionally, it just describes i8250 and related serial ports
+ * that have this clock rate.
+ */
+
+#define BASE_BAUD (1843200 / 16)
+
+#endif /* __ASM_GENERIC_SERIAL_H */
diff --git a/include/asm-generic/setup.h b/include/asm-generic/setup.h
new file mode 100644
index 0000000..6fc26a5
--- /dev/null
+++ b/include/asm-generic/setup.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_GENERIC_SETUP_H
+#define __ASM_GENERIC_SETUP_H
+
+#define COMMAND_LINE_SIZE 512
+
+#endif /* __ASM_GENERIC_SETUP_H */
diff --git a/include/asm-generic/shmbuf.h b/include/asm-generic/shmbuf.h
new file mode 100644
index 0000000..5768fa6
--- /dev/null
+++ b/include/asm-generic/shmbuf.h
@@ -0,0 +1,59 @@
+#ifndef __ASM_GENERIC_SHMBUF_H
+#define __ASM_GENERIC_SHMBUF_H
+
+#include <asm/bitsperlong.h>
+
+/*
+ * The shmid64_ds structure for x86 architecture.
+ * Note extra padding because this structure is passed back and forth
+ * between kernel and user space.
+ *
+ * shmid64_ds was originally meant to be architecture specific, but
+ * everyone just ended up making identical copies without specific
+ * optimizations, so we may just as well all use the same one.
+ *
+ * 64 bit architectures typically define a 64 bit __kernel_time_t,
+ * so they do not need the first two padding words.
+ * On big-endian systems, the padding is in the wrong place.
+ *
+ *
+ * Pad space is left for:
+ * - 64-bit time_t to solve y2038 problem
+ * - 2 miscellaneous 32-bit values
+ */
+
+struct shmid64_ds {
+ struct ipc64_perm shm_perm; /* operation perms */
+ size_t shm_segsz; /* size of segment (bytes) */
+ __kernel_time_t shm_atime; /* last attach time */
+#if __BITS_PER_LONG != 64
+ unsigned long __unused1;
+#endif
+ __kernel_time_t shm_dtime; /* last detach time */
+#if __BITS_PER_LONG != 64
+ unsigned long __unused2;
+#endif
+ __kernel_time_t shm_ctime; /* last change time */
+#if __BITS_PER_LONG != 64
+ unsigned long __unused3;
+#endif
+ __kernel_pid_t shm_cpid; /* pid of creator */
+ __kernel_pid_t shm_lpid; /* pid of last operator */
+ unsigned long shm_nattch; /* no. of current attaches */
+ unsigned long __unused4;
+ unsigned long __unused5;
+};
+
+struct shminfo64 {
+ unsigned long shmmax;
+ unsigned long shmmin;
+ unsigned long shmmni;
+ unsigned long shmseg;
+ unsigned long shmall;
+ unsigned long __unused1;
+ unsigned long __unused2;
+ unsigned long __unused3;
+ unsigned long __unused4;
+};
+
+#endif /* __ASM_GENERIC_SHMBUF_H */
diff --git a/include/asm-generic/shmparam.h b/include/asm-generic/shmparam.h
new file mode 100644
index 0000000..51a3852
--- /dev/null
+++ b/include/asm-generic/shmparam.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_GENERIC_SHMPARAM_H
+#define __ASM_GENERIC_SHMPARAM_H
+
+#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
+
+#endif /* _ASM_GENERIC_SHMPARAM_H */
diff --git a/include/asm-generic/signal-defs.h b/include/asm-generic/signal-defs.h
new file mode 100644
index 0000000..00f95df
--- /dev/null
+++ b/include/asm-generic/signal-defs.h
@@ -0,0 +1,28 @@
+#ifndef __ASM_GENERIC_SIGNAL_DEFS_H
+#define __ASM_GENERIC_SIGNAL_DEFS_H
+
+#include <linux/compiler.h>
+
+#ifndef SIG_BLOCK
+#define SIG_BLOCK 0 /* for blocking signals */
+#endif
+#ifndef SIG_UNBLOCK
+#define SIG_UNBLOCK 1 /* for unblocking signals */
+#endif
+#ifndef SIG_SETMASK
+#define SIG_SETMASK 2 /* for setting the signal mask */
+#endif
+
+#ifndef __ASSEMBLY__
+typedef void __signalfn_t(int);
+typedef __signalfn_t __user *__sighandler_t;
+
+typedef void __restorefn_t(void);
+typedef __restorefn_t __user *__sigrestore_t;
+
+#define SIG_DFL ((__force __sighandler_t)0) /* default signal handling */
+#define SIG_IGN ((__force __sighandler_t)1) /* ignore signal */
+#define SIG_ERR ((__force __sighandler_t)-1) /* error return from signal */
+#endif
+
+#endif /* __ASM_GENERIC_SIGNAL_DEFS_H */
diff --git a/include/asm-generic/signal.h b/include/asm-generic/signal.h
index dae1d87..555c0ae 100644
--- a/include/asm-generic/signal.h
+++ b/include/asm-generic/signal.h
@@ -1,28 +1,131 @@
#ifndef __ASM_GENERIC_SIGNAL_H
#define __ASM_GENERIC_SIGNAL_H
-#include <linux/compiler.h>
+#include <linux/types.h>
-#ifndef SIG_BLOCK
-#define SIG_BLOCK 0 /* for blocking signals */
-#endif
-#ifndef SIG_UNBLOCK
-#define SIG_UNBLOCK 1 /* for unblocking signals */
-#endif
-#ifndef SIG_SETMASK
-#define SIG_SETMASK 2 /* for setting the signal mask */
+#define _NSIG 64
+#define _NSIG_BPW __BITS_PER_LONG
+#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
+
+#define SIGHUP 1
+#define SIGINT 2
+#define SIGQUIT 3
+#define SIGILL 4
+#define SIGTRAP 5
+#define SIGABRT 6
+#define SIGIOT 6
+#define SIGBUS 7
+#define SIGFPE 8
+#define SIGKILL 9
+#define SIGUSR1 10
+#define SIGSEGV 11
+#define SIGUSR2 12
+#define SIGPIPE 13
+#define SIGALRM 14
+#define SIGTERM 15
+#define SIGSTKFLT 16
+#define SIGCHLD 17
+#define SIGCONT 18
+#define SIGSTOP 19
+#define SIGTSTP 20
+#define SIGTTIN 21
+#define SIGTTOU 22
+#define SIGURG 23
+#define SIGXCPU 24
+#define SIGXFSZ 25
+#define SIGVTALRM 26
+#define SIGPROF 27
+#define SIGWINCH 28
+#define SIGIO 29
+#define SIGPOLL SIGIO
+/*
+#define SIGLOST 29
+*/
+#define SIGPWR 30
+#define SIGSYS 31
+#define SIGUNUSED 31
+
+/* These should not be considered constants from userland. */
+#define SIGRTMIN 32
+#ifndef SIGRTMAX
+#define SIGRTMAX _NSIG
#endif
+/*
+ * SA_FLAGS values:
+ *
+ * SA_ONSTACK indicates that a registered stack_t will be used.
+ * SA_RESTART flag to get restarting signals (which were the default long ago)
+ * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
+ * SA_RESETHAND clears the handler when the signal is delivered.
+ * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
+ * SA_NODEFER prevents the current signal from being masked in the handler.
+ *
+ * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
+ * Unix names RESETHAND and NODEFER respectively.
+ */
+#define SA_NOCLDSTOP 0x00000001
+#define SA_NOCLDWAIT 0x00000002
+#define SA_SIGINFO 0x00000004
+#define SA_ONSTACK 0x08000000
+#define SA_RESTART 0x10000000
+#define SA_NODEFER 0x40000000
+#define SA_RESETHAND 0x80000000
+
+#define SA_NOMASK SA_NODEFER
+#define SA_ONESHOT SA_RESETHAND
+
+/*
+ * New architectures should not define the obsolete
+ * SA_RESTORER 0x04000000
+ */
+
+/*
+ * sigaltstack controls
+ */
+#define SS_ONSTACK 1
+#define SS_DISABLE 2
+
+#define MINSIGSTKSZ 2048
+#define SIGSTKSZ 8192
+
#ifndef __ASSEMBLY__
-typedef void __signalfn_t(int);
-typedef __signalfn_t __user *__sighandler_t;
+typedef struct {
+ unsigned long sig[_NSIG_WORDS];
+} sigset_t;
+
+/* not actually used, but required for linux/syscalls.h */
+typedef unsigned long old_sigset_t;
-typedef void __restorefn_t(void);
-typedef __restorefn_t __user *__sigrestore_t;
+#include <asm-generic/signal-defs.h>
-#define SIG_DFL ((__force __sighandler_t)0) /* default signal handling */
-#define SIG_IGN ((__force __sighandler_t)1) /* ignore signal */
-#define SIG_ERR ((__force __sighandler_t)-1) /* error return from signal */
+struct sigaction {
+ __sighandler_t sa_handler;
+ unsigned long sa_flags;
+#ifdef SA_RESTORER
+ __sigrestore_t sa_restorer;
#endif
+ sigset_t sa_mask; /* mask last for extensibility */
+};
+
+struct k_sigaction {
+ struct sigaction sa;
+};
+
+typedef struct sigaltstack {
+ void __user *ss_sp;
+ int ss_flags;
+ size_t ss_size;
+} stack_t;
+
+#ifdef __KERNEL__
+
+#include <asm/sigcontext.h>
+#undef __HAVE_ARCH_SIG_BITOPS
+
+#define ptrace_signal_deliver(regs, cookie) do { } while (0)
+
+#endif /* __KERNEL__ */
+#endif /* __ASSEMBLY__ */
-#endif /* __ASM_GENERIC_SIGNAL_H */
+#endif /* _ASM_GENERIC_SIGNAL_H */
diff --git a/include/asm-generic/socket.h b/include/asm-generic/socket.h
new file mode 100644
index 0000000..5d79e40
--- /dev/null
+++ b/include/asm-generic/socket.h
@@ -0,0 +1,63 @@
+#ifndef __ASM_GENERIC_SOCKET_H
+#define __ASM_GENERIC_SOCKET_H
+
+#include <asm/sockios.h>
+
+/* For setsockopt(2) */
+#define SOL_SOCKET 1
+
+#define SO_DEBUG 1
+#define SO_REUSEADDR 2
+#define SO_TYPE 3
+#define SO_ERROR 4
+#define SO_DONTROUTE 5
+#define SO_BROADCAST 6
+#define SO_SNDBUF 7
+#define SO_RCVBUF 8
+#define SO_SNDBUFFORCE 32
+#define SO_RCVBUFFORCE 33
+#define SO_KEEPALIVE 9
+#define SO_OOBINLINE 10
+#define SO_NO_CHECK 11
+#define SO_PRIORITY 12
+#define SO_LINGER 13
+#define SO_BSDCOMPAT 14
+/* To add :#define SO_REUSEPORT 15 */
+
+#ifndef SO_PASSCRED /* powerpc only differs in these */
+#define SO_PASSCRED 16
+#define SO_PEERCRED 17
+#define SO_RCVLOWAT 18
+#define SO_SNDLOWAT 19
+#define SO_RCVTIMEO 20
+#define SO_SNDTIMEO 21
+#endif
+
+/* Security levels - as per NRL IPv6 - don't actually do anything */
+#define SO_SECURITY_AUTHENTICATION 22
+#define SO_SECURITY_ENCRYPTION_TRANSPORT 23
+#define SO_SECURITY_ENCRYPTION_NETWORK 24
+
+#define SO_BINDTODEVICE 25
+
+/* Socket filtering */
+#define SO_ATTACH_FILTER 26
+#define SO_DETACH_FILTER 27
+
+#define SO_PEERNAME 28
+#define SO_TIMESTAMP 29
+#define SCM_TIMESTAMP SO_TIMESTAMP
+
+#define SO_ACCEPTCONN 30
+
+#define SO_PEERSEC 31
+#define SO_PASSSEC 34
+#define SO_TIMESTAMPNS 35
+#define SCM_TIMESTAMPNS SO_TIMESTAMPNS
+
+#define SO_MARK 36
+
+#define SO_TIMESTAMPING 37
+#define SCM_TIMESTAMPING SO_TIMESTAMPING
+
+#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/asm-generic/sockios.h b/include/asm-generic/sockios.h
new file mode 100644
index 0000000..9a61a36
--- /dev/null
+++ b/include/asm-generic/sockios.h
@@ -0,0 +1,13 @@
+#ifndef __ASM_GENERIC_SOCKIOS_H
+#define __ASM_GENERIC_SOCKIOS_H
+
+/* Socket-level I/O control calls. */
+#define FIOSETOWN 0x8901
+#define SIOCSPGRP 0x8902
+#define FIOGETOWN 0x8903
+#define SIOCGPGRP 0x8904
+#define SIOCATMARK 0x8905
+#define SIOCGSTAMP 0x8906 /* Get stamp (timeval) */
+#define SIOCGSTAMPNS 0x8907 /* Get stamp (timespec) */
+
+#endif /* __ASM_GENERIC_SOCKIOS_H */
diff --git a/include/asm-generic/spinlock.h b/include/asm-generic/spinlock.h
new file mode 100644
index 0000000..1547a03
--- /dev/null
+++ b/include/asm-generic/spinlock.h
@@ -0,0 +1,11 @@
+#ifndef __ASM_GENERIC_SPINLOCK_H
+#define __ASM_GENERIC_SPINLOCK_H
+/*
+ * You need to implement asm/spinlock.h for SMP support. The generic
+ * version does not handle SMP.
+ */
+#ifdef CONFIG_SMP
+#error need an architecture specific asm/spinlock.h
+#endif
+
+#endif /* __ASM_GENERIC_SPINLOCK_H */
diff --git a/include/asm-generic/stat.h b/include/asm-generic/stat.h
new file mode 100644
index 0000000..47e6417
--- /dev/null
+++ b/include/asm-generic/stat.h
@@ -0,0 +1,72 @@
+#ifndef __ASM_GENERIC_STAT_H
+#define __ASM_GENERIC_STAT_H
+
+/*
+ * Everybody gets this wrong and has to stick with it for all
+ * eternity. Hopefully, this version gets used by new architectures
+ * so they don't fall into the same traps.
+ *
+ * stat64 is copied from powerpc64, with explicit padding added.
+ * stat is the same structure layout on 64-bit, without the 'long long'
+ * types.
+ *
+ * By convention, 64 bit architectures use the stat interface, while
+ * 32 bit architectures use the stat64 interface. Note that we don't
+ * provide an __old_kernel_stat here, which new architecture should
+ * not have to start with.
+ */
+
+#include <asm/bitsperlong.h>
+
+#define STAT_HAVE_NSEC 1
+
+struct stat {
+ unsigned long st_dev; /* Device. */
+ unsigned long st_ino; /* File serial number. */
+ unsigned int st_mode; /* File mode. */
+ unsigned int st_nlink; /* Link count. */
+ unsigned int st_uid; /* User ID of the file's owner. */
+ unsigned int st_gid; /* Group ID of the file's group. */
+ unsigned long st_rdev; /* Device number, if device. */
+ unsigned long __pad1;
+ long st_size; /* Size of file, in bytes. */
+ int st_blksize; /* Optimal block size for I/O. */
+ int __pad2;
+ long st_blocks; /* Number 512-byte blocks allocated. */
+ int st_atime; /* Time of last access. */
+ unsigned int st_atime_nsec;
+ int st_mtime; /* Time of last modification. */
+ unsigned int st_mtime_nsec;
+ int st_ctime; /* Time of last status change. */
+ unsigned int st_ctime_nsec;
+ unsigned int __unused4;
+ unsigned int __unused5;
+};
+
+#if __BITS_PER_LONG != 64
+/* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
+struct stat64 {
+ unsigned long long st_dev; /* Device. */
+ unsigned long long st_ino; /* File serial number. */
+ unsigned int st_mode; /* File mode. */
+ unsigned int st_nlink; /* Link count. */
+ unsigned int st_uid; /* User ID of the file's owner. */
+ unsigned int st_gid; /* Group ID of the file's group. */
+ unsigned long long st_rdev; /* Device number, if device. */
+ unsigned long long __pad1;
+ long long st_size; /* Size of file, in bytes. */
+ int st_blksize; /* Optimal block size for I/O. */
+ int __pad2;
+ long long st_blocks; /* Number 512-byte blocks allocated. */
+ int st_atime; /* Time of last access. */
+ unsigned int st_atime_nsec;
+ int st_mtime; /* Time of last modification. */
+ unsigned int st_mtime_nsec;
+ int st_ctime; /* Time of last status change. */
+ unsigned int st_ctime_nsec;
+ unsigned int __unused4;
+ unsigned int __unused5;
+};
+#endif
+
+#endif /* __ASM_GENERIC_STAT_H */
diff --git a/include/asm-generic/string.h b/include/asm-generic/string.h
new file mode 100644
index 0000000..de5e020
--- /dev/null
+++ b/include/asm-generic/string.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_GENERIC_STRING_H
+#define __ASM_GENERIC_STRING_H
+/*
+ * The kernel provides all required functions in lib/string.c
+ *
+ * Architectures probably want to provide at least their own optimized
+ * memcpy and memset functions though.
+ */
+
+#endif /* __ASM_GENERIC_STRING_H */
diff --git a/include/asm-generic/swab.h b/include/asm-generic/swab.h
new file mode 100644
index 0000000..a8e9029
--- /dev/null
+++ b/include/asm-generic/swab.h
@@ -0,0 +1,18 @@
+#ifndef _ASM_GENERIC_SWAB_H
+#define _ASM_GENERIC_SWAB_H
+
+#include <asm/bitsperlong.h>
+
+/*
+ * 32 bit architectures typically (but not always) want to
+ * set __SWAB_64_THRU_32__. In user space, this is only
+ * valid if the compiler supports 64 bit data types.
+ */
+
+#if __BITS_PER_LONG == 32
+#if defined(__GNUC__) && !defined(__STRICT_ANSI__) || defined(__KERNEL__)
+#define __SWAB_64_THRU_32__
+#endif
+#endif
+
+#endif /* _ASM_GENERIC_SWAB_H */
diff --git a/include/asm-generic/syscalls.h b/include/asm-generic/syscalls.h
new file mode 100644
index 0000000..df84e3b
--- /dev/null
+++ b/include/asm-generic/syscalls.h
@@ -0,0 +1,60 @@
+#ifndef __ASM_GENERIC_SYSCALLS_H
+#define __ASM_GENERIC_SYSCALLS_H
+
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+
+/*
+ * Calling conventions for these system calls can differ, so
+ * it's possible to override them.
+ */
+#ifndef sys_clone
+asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp,
+ void __user *parent_tid, void __user *child_tid,
+ struct pt_regs *regs);
+#endif
+
+#ifndef sys_fork
+asmlinkage long sys_fork(struct pt_regs *regs);
+#endif
+
+#ifndef sys_vfork
+asmlinkage long sys_vfork(struct pt_regs *regs);
+#endif
+
+#ifndef sys_execve
+asmlinkage long sys_execve(char __user *filename, char __user * __user *argv,
+ char __user * __user *envp, struct pt_regs *regs);
+#endif
+
+#ifndef sys_mmap2
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, unsigned long pgoff);
+#endif
+
+#ifndef sys_mmap
+asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, off_t pgoff);
+#endif
+
+#ifndef sys_sigaltstack
+asmlinkage long sys_sigaltstack(const stack_t __user *, stack_t __user *,
+ struct pt_regs *);
+#endif
+
+#ifndef sys_rt_sigreturn
+asmlinkage long sys_rt_sigreturn(struct pt_regs *regs);
+#endif
+
+#ifndef sys_rt_sigsuspend
+asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize);
+#endif
+
+#ifndef sys_rt_sigaction
+asmlinkage long sys_rt_sigaction(int sig, const struct sigaction __user *act,
+ struct sigaction __user *oact, size_t sigsetsize);
+#endif
+
+#endif /* __ASM_GENERIC_SYSCALLS_H */
diff --git a/include/asm-generic/system.h b/include/asm-generic/system.h
new file mode 100644
index 0000000..efa403b
--- /dev/null
+++ b/include/asm-generic/system.h
@@ -0,0 +1,161 @@
+/* Generic system definitions, based on MN10300 definitions.
+ *
+ * It should be possible to use these on really simple architectures,
+ * but it serves more as a starting point for new ports.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef __ASM_GENERIC_SYSTEM_H
+#define __ASM_GENERIC_SYSTEM_H
+
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <linux/irqflags.h>
+
+#include <asm/cmpxchg-local.h>
+
+struct task_struct;
+
+/* context switching is now performed out-of-line in switch_to.S */
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct task_struct *);
+#define switch_to(prev, next, last) \
+ do { \
+ ((last) = __switch_to((prev), (next))); \
+ } while (0)
+
+#define arch_align_stack(x) (x)
+
+#define nop() asm volatile ("nop")
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ *
+ * This implementation only contains a compiler barrier.
+ */
+
+#define mb() asm volatile ("": : :"memory")
+#define rmb() mb()
+#define wmb() asm volatile ("": : :"memory")
+
+#ifdef CONFIG_SMP
+#define smp_mb() mb()
+#define smp_rmb() rmb()
+#define smp_wmb() wmb()
+#else
+#define smp_mb() barrier()
+#define smp_rmb() barrier()
+#define smp_wmb() barrier()
+#endif
+
+#define set_mb(var, value) do { var = value; mb(); } while (0)
+#define set_wmb(var, value) do { var = value; wmb(); } while (0)
+
+#define read_barrier_depends() do {} while (0)
+#define smp_read_barrier_depends() do {} while (0)
+
+/*
+ * we make sure local_irq_enable() doesn't cause priority inversion
+ */
+#ifndef __ASSEMBLY__
+
+/* This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid xchg(). */
+extern void __xchg_called_with_bad_pointer(void);
+
+static inline
+unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
+{
+ unsigned long ret, flags;
+
+ switch (size) {
+ case 1:
+#ifdef __xchg_u8
+ return __xchg_u8(x, ptr);
+#else
+ local_irq_save(flags);
+ ret = *(volatile u8 *)ptr;
+ *(volatile u8 *)ptr = x;
+ local_irq_restore(flags);
+ return ret;
+#endif /* __xchg_u8 */
+
+ case 2:
+#ifdef __xchg_u16
+ return __xchg_u16(x, ptr);
+#else
+ local_irq_save(flags);
+ ret = *(volatile u16 *)ptr;
+ *(volatile u16 *)ptr = x;
+ local_irq_restore(flags);
+ return ret;
+#endif /* __xchg_u16 */
+
+ case 4:
+#ifdef __xchg_u32
+ return __xchg_u32(x, ptr);
+#else
+ local_irq_save(flags);
+ ret = *(volatile u32 *)ptr;
+ *(volatile u32 *)ptr = x;
+ local_irq_restore(flags);
+ return ret;
+#endif /* __xchg_u32 */
+
+#ifdef CONFIG_64BIT
+ case 8:
+#ifdef __xchg_u64
+ return __xchg_u64(x, ptr);
+#else
+ local_irq_save(flags);
+ ret = *(volatile u64 *)ptr;
+ *(volatile u64 *)ptr = x;
+ local_irq_restore(flags);
+ return ret;
+#endif /* __xchg_u64 */
+#endif /* CONFIG_64BIT */
+
+ default:
+ __xchg_called_with_bad_pointer();
+ return x;
+ }
+}
+
+#define xchg(ptr, x) \
+ ((__typeof__(*(ptr))) __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
+
+static inline unsigned long __cmpxchg(volatile unsigned long *m,
+ unsigned long old, unsigned long new)
+{
+ unsigned long retval;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ retval = *m;
+ if (retval == old)
+ *m = new;
+ local_irq_restore(flags);
+ return retval;
+}
+
+#define cmpxchg(ptr, o, n) \
+ ((__typeof__(*(ptr))) __cmpxchg((unsigned long *)(ptr), \
+ (unsigned long)(o), \
+ (unsigned long)(n)))
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* __ASM_GENERIC_SYSTEM_H */
diff --git a/include/asm-generic/termbits.h b/include/asm-generic/termbits.h
new file mode 100644
index 0000000..1c9773d
--- /dev/null
+++ b/include/asm-generic/termbits.h
@@ -0,0 +1,198 @@
+#ifndef __ASM_GENERIC_TERMBITS_H
+#define __ASM_GENERIC_TERMBITS_H
+
+#include <linux/posix_types.h>
+
+typedef unsigned char cc_t;
+typedef unsigned int speed_t;
+typedef unsigned int tcflag_t;
+
+#define NCCS 19
+struct termios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+};
+
+struct termios2 {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+struct ktermios {
+ tcflag_t c_iflag; /* input mode flags */
+ tcflag_t c_oflag; /* output mode flags */
+ tcflag_t c_cflag; /* control mode flags */
+ tcflag_t c_lflag; /* local mode flags */
+ cc_t c_line; /* line discipline */
+ cc_t c_cc[NCCS]; /* control characters */
+ speed_t c_ispeed; /* input speed */
+ speed_t c_ospeed; /* output speed */
+};
+
+/* c_cc characters */
+#define VINTR 0
+#define VQUIT 1
+#define VERASE 2
+#define VKILL 3
+#define VEOF 4
+#define VTIME 5
+#define VMIN 6
+#define VSWTC 7
+#define VSTART 8
+#define VSTOP 9
+#define VSUSP 10
+#define VEOL 11
+#define VREPRINT 12
+#define VDISCARD 13
+#define VWERASE 14
+#define VLNEXT 15
+#define VEOL2 16
+
+/* c_iflag bits */
+#define IGNBRK 0000001
+#define BRKINT 0000002
+#define IGNPAR 0000004
+#define PARMRK 0000010
+#define INPCK 0000020
+#define ISTRIP 0000040
+#define INLCR 0000100
+#define IGNCR 0000200
+#define ICRNL 0000400
+#define IUCLC 0001000
+#define IXON 0002000
+#define IXANY 0004000
+#define IXOFF 0010000
+#define IMAXBEL 0020000
+#define IUTF8 0040000
+
+/* c_oflag bits */
+#define OPOST 0000001
+#define OLCUC 0000002
+#define ONLCR 0000004
+#define OCRNL 0000010
+#define ONOCR 0000020
+#define ONLRET 0000040
+#define OFILL 0000100
+#define OFDEL 0000200
+#define NLDLY 0000400
+#define NL0 0000000
+#define NL1 0000400
+#define CRDLY 0003000
+#define CR0 0000000
+#define CR1 0001000
+#define CR2 0002000
+#define CR3 0003000
+#define TABDLY 0014000
+#define TAB0 0000000
+#define TAB1 0004000
+#define TAB2 0010000
+#define TAB3 0014000
+#define XTABS 0014000
+#define BSDLY 0020000
+#define BS0 0000000
+#define BS1 0020000
+#define VTDLY 0040000
+#define VT0 0000000
+#define VT1 0040000
+#define FFDLY 0100000
+#define FF0 0000000
+#define FF1 0100000
+
+/* c_cflag bit meaning */
+#define CBAUD 0010017
+#define B0 0000000 /* hang up */
+#define B50 0000001
+#define B75 0000002
+#define B110 0000003
+#define B134 0000004
+#define B150 0000005
+#define B200 0000006
+#define B300 0000007
+#define B600 0000010
+#define B1200 0000011
+#define B1800 0000012
+#define B2400 0000013
+#define B4800 0000014
+#define B9600 0000015
+#define B19200 0000016
+#define B38400 0000017
+#define EXTA B19200
+#define EXTB B38400
+#define CSIZE 0000060
+#define CS5 0000000
+#define CS6 0000020
+#define CS7 0000040
+#define CS8 0000060
+#define CSTOPB 0000100
+#define CREAD 0000200
+#define PARENB 0000400
+#define PARODD 0001000
+#define HUPCL 0002000
+#define CLOCAL 0004000
+#define CBAUDEX 0010000
+#define BOTHER 0010000
+#define B57600 0010001
+#define B115200 0010002
+#define B230400 0010003
+#define B460800 0010004
+#define B500000 0010005
+#define B576000 0010006
+#define B921600 0010007
+#define B1000000 0010010
+#define B1152000 0010011
+#define B1500000 0010012
+#define B2000000 0010013
+#define B2500000 0010014
+#define B3000000 0010015
+#define B3500000 0010016
+#define B4000000 0010017
+#define CIBAUD 002003600000 /* input baud rate */
+#define CMSPAR 010000000000 /* mark or space (stick) parity */
+#define CRTSCTS 020000000000 /* flow control */
+
+#define IBSHIFT 16 /* Shift from CBAUD to CIBAUD */
+
+/* c_lflag bits */
+#define ISIG 0000001
+#define ICANON 0000002
+#define XCASE 0000004
+#define ECHO 0000010
+#define ECHOE 0000020
+#define ECHOK 0000040
+#define ECHONL 0000100
+#define NOFLSH 0000200
+#define TOSTOP 0000400
+#define ECHOCTL 0001000
+#define ECHOPRT 0002000
+#define ECHOKE 0004000
+#define FLUSHO 0010000
+#define PENDIN 0040000
+#define IEXTEN 0100000
+
+/* tcflow() and TCXONC use these */
+#define TCOOFF 0
+#define TCOON 1
+#define TCIOFF 2
+#define TCION 3
+
+/* tcflush() and TCFLSH use these */
+#define TCIFLUSH 0
+#define TCOFLUSH 1
+#define TCIOFLUSH 2
+
+/* tcsetattr uses these */
+#define TCSANOW 0
+#define TCSADRAIN 1
+#define TCSAFLUSH 2
+
+#endif /* __ASM_GENERIC_TERMBITS_H */
diff --git a/include/asm-generic/termios-base.h b/include/asm-generic/termios-base.h
new file mode 100644
index 0000000..0a769fe
--- /dev/null
+++ b/include/asm-generic/termios-base.h
@@ -0,0 +1,77 @@
+/* termios.h: generic termios/termio user copying/translation
+ */
+
+#ifndef _ASM_GENERIC_TERMIOS_BASE_H
+#define _ASM_GENERIC_TERMIOS_BASE_H
+
+#include <asm/uaccess.h>
+
+#ifndef __ARCH_TERMIO_GETPUT
+
+/*
+ * Translate a "termio" structure into a "termios". Ugh.
+ */
+static inline int user_termio_to_kernel_termios(struct ktermios *termios,
+ struct termio __user *termio)
+{
+ unsigned short tmp;
+
+ if (get_user(tmp, &termio->c_iflag) < 0)
+ goto fault;
+ termios->c_iflag = (0xffff0000 & termios->c_iflag) | tmp;
+
+ if (get_user(tmp, &termio->c_oflag) < 0)
+ goto fault;
+ termios->c_oflag = (0xffff0000 & termios->c_oflag) | tmp;
+
+ if (get_user(tmp, &termio->c_cflag) < 0)
+ goto fault;
+ termios->c_cflag = (0xffff0000 & termios->c_cflag) | tmp;
+
+ if (get_user(tmp, &termio->c_lflag) < 0)
+ goto fault;
+ termios->c_lflag = (0xffff0000 & termios->c_lflag) | tmp;
+
+ if (get_user(termios->c_line, &termio->c_line) < 0)
+ goto fault;
+
+ if (copy_from_user(termios->c_cc, termio->c_cc, NCC) != 0)
+ goto fault;
+
+ return 0;
+
+ fault:
+ return -EFAULT;
+}
+
+/*
+ * Translate a "termios" structure into a "termio". Ugh.
+ */
+static inline int kernel_termios_to_user_termio(struct termio __user *termio,
+ struct ktermios *termios)
+{
+ if (put_user(termios->c_iflag, &termio->c_iflag) < 0 ||
+ put_user(termios->c_oflag, &termio->c_oflag) < 0 ||
+ put_user(termios->c_cflag, &termio->c_cflag) < 0 ||
+ put_user(termios->c_lflag, &termio->c_lflag) < 0 ||
+ put_user(termios->c_line, &termio->c_line) < 0 ||
+ copy_to_user(termio->c_cc, termios->c_cc, NCC) != 0)
+ return -EFAULT;
+
+ return 0;
+}
+
+#ifndef user_termios_to_kernel_termios
+#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
+#endif
+
+#ifndef kernel_termios_to_user_termios
+#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
+#endif
+
+#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
+#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
+
+#endif /* __ARCH_TERMIO_GETPUT */
+
+#endif /* _ASM_GENERIC_TERMIOS_BASE_H */
diff --git a/include/asm-generic/termios.h b/include/asm-generic/termios.h
index 7d39ecc..d0922ad 100644
--- a/include/asm-generic/termios.h
+++ b/include/asm-generic/termios.h
@@ -1,18 +1,68 @@
-/* termios.h: generic termios/termio user copying/translation
- */
-
#ifndef _ASM_GENERIC_TERMIOS_H
#define _ASM_GENERIC_TERMIOS_H
+/*
+ * Most architectures have straight copies of the x86 code, with
+ * varying levels of bug fixes on top. Usually it's a good idea
+ * to use this generic version instead, but be careful to avoid
+ * ABI changes.
+ * New architectures should not provide their own version.
+ */
+
+#include <asm/termbits.h>
+#include <asm/ioctls.h>
+
+struct winsize {
+ unsigned short ws_row;
+ unsigned short ws_col;
+ unsigned short ws_xpixel;
+ unsigned short ws_ypixel;
+};
+
+#define NCC 8
+struct termio {
+ unsigned short c_iflag; /* input mode flags */
+ unsigned short c_oflag; /* output mode flags */
+ unsigned short c_cflag; /* control mode flags */
+ unsigned short c_lflag; /* local mode flags */
+ unsigned char c_line; /* line discipline */
+ unsigned char c_cc[NCC]; /* control characters */
+};
+
+/* modem lines */
+#define TIOCM_LE 0x001
+#define TIOCM_DTR 0x002
+#define TIOCM_RTS 0x004
+#define TIOCM_ST 0x008
+#define TIOCM_SR 0x010
+#define TIOCM_CTS 0x020
+#define TIOCM_CAR 0x040
+#define TIOCM_RNG 0x080
+#define TIOCM_DSR 0x100
+#define TIOCM_CD TIOCM_CAR
+#define TIOCM_RI TIOCM_RNG
+#define TIOCM_OUT1 0x2000
+#define TIOCM_OUT2 0x4000
+#define TIOCM_LOOP 0x8000
+
+/* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */
+
+#ifdef __KERNEL__
#include <asm/uaccess.h>
-#ifndef __ARCH_TERMIO_GETPUT
+/* intr=^C quit=^\ erase=del kill=^U
+ eof=^D vtime=\0 vmin=\1 sxtc=\0
+ start=^Q stop=^S susp=^Z eol=\0
+ reprint=^R discard=^U werase=^W lnext=^V
+ eol2=\0
+*/
+#define INIT_C_CC "\003\034\177\025\004\0\1\0\021\023\032\0\022\017\027\026\0"
/*
* Translate a "termio" structure into a "termios". Ugh.
*/
static inline int user_termio_to_kernel_termios(struct ktermios *termios,
- struct termio __user *termio)
+ const struct termio __user *termio)
{
unsigned short tmp;
@@ -61,17 +111,44 @@ static inline int kernel_termios_to_user_termio(struct termio __user *termio,
return 0;
}
-#ifndef user_termios_to_kernel_termios
-#define user_termios_to_kernel_termios(k, u) copy_from_user(k, u, sizeof(struct termios))
-#endif
+#ifdef TCGETS2
+static inline int user_termios_to_kernel_termios(struct ktermios *k,
+ struct termios2 __user *u)
+{
+ return copy_from_user(k, u, sizeof(struct termios2));
+}
-#ifndef kernel_termios_to_user_termios
-#define kernel_termios_to_user_termios(u, k) copy_to_user(u, k, sizeof(struct termios))
-#endif
+static inline int kernel_termios_to_user_termios(struct termios2 __user *u,
+ struct ktermios *k)
+{
+ return copy_to_user(u, k, sizeof(struct termios2));
+}
-#define user_termios_to_kernel_termios_1(k, u) copy_from_user(k, u, sizeof(struct termios))
-#define kernel_termios_to_user_termios_1(u, k) copy_to_user(u, k, sizeof(struct termios))
+static inline int user_termios_to_kernel_termios_1(struct ktermios *k,
+ struct termios __user *u)
+{
+ return copy_from_user(k, u, sizeof(struct termios));
+}
+
+static inline int kernel_termios_to_user_termios_1(struct termios __user *u,
+ struct ktermios *k)
+{
+ return copy_to_user(u, k, sizeof(struct termios));
+}
+#else /* TCGETS2 */
+static inline int user_termios_to_kernel_termios(struct ktermios *k,
+ struct termios __user *u)
+{
+ return copy_from_user(k, u, sizeof(struct termios));
+}
+
+static inline int kernel_termios_to_user_termios(struct termios __user *u,
+ struct ktermios *k)
+{
+ return copy_to_user(u, k, sizeof(struct termios));
+}
+#endif /* TCGETS2 */
-#endif /* __ARCH_TERMIO_GETPUT */
+#endif /* __KERNEL__ */
#endif /* _ASM_GENERIC_TERMIOS_H */
diff --git a/include/asm-generic/timex.h b/include/asm-generic/timex.h
new file mode 100644
index 0000000..b2243cb
--- /dev/null
+++ b/include/asm-generic/timex.h
@@ -0,0 +1,22 @@
+#ifndef __ASM_GENERIC_TIMEX_H
+#define __ASM_GENERIC_TIMEX_H
+
+/*
+ * If you have a cycle counter, return the value here.
+ */
+typedef unsigned long cycles_t;
+#ifndef get_cycles
+static inline cycles_t get_cycles(void)
+{
+ return 0;
+}
+#endif
+
+/*
+ * Architectures are encouraged to implement read_current_timer
+ * and define this in order to avoid the expensive delay loop
+ * calibration during boot.
+ */
+#undef ARCH_HAS_READ_CURRENT_TIMER
+
+#endif /* __ASM_GENERIC_TIMEX_H */
diff --git a/include/asm-generic/tlbflush.h b/include/asm-generic/tlbflush.h
new file mode 100644
index 0000000..c7af037
--- /dev/null
+++ b/include/asm-generic/tlbflush.h
@@ -0,0 +1,18 @@
+#ifndef __ASM_GENERIC_TLBFLUSH_H
+#define __ASM_GENERIC_TLBFLUSH_H
+/*
+ * This is a dummy tlbflush implementation that can be used on all
+ * nommu architectures.
+ * If you have an MMU, you need to write your own functions.
+ */
+#ifdef CONFIG_MMU
+#error need to implement an architecture specific asm/tlbflush.h
+#endif
+
+static inline void flush_tlb_mm(struct mm_struct *mm)
+{
+ BUG();
+}
+
+
+#endif /* __ASM_GENERIC_TLBFLUSH_H */
diff --git a/include/asm-generic/types.h b/include/asm-generic/types.h
new file mode 100644
index 0000000..fba7d33
--- /dev/null
+++ b/include/asm-generic/types.h
@@ -0,0 +1,42 @@
+#ifndef _ASM_GENERIC_TYPES_H
+#define _ASM_GENERIC_TYPES_H
+/*
+ * int-ll64 is used practically everywhere now,
+ * so use it as a reasonable default.
+ */
+#include <asm-generic/int-ll64.h>
+
+#ifndef __ASSEMBLY__
+
+typedef unsigned short umode_t;
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * These aren't exported outside the kernel to avoid name space clashes
+ */
+#ifdef __KERNEL__
+#ifndef __ASSEMBLY__
+/*
+ * DMA addresses may be very different from physical addresses
+ * and pointers. i386 and powerpc may have 64 bit DMA on 32 bit
+ * systems, while sparc64 uses 32 bit DMA addresses for 64 bit
+ * physical addresses.
+ * This default defines dma_addr_t to have the same size as
+ * phys_addr_t, which is the most common way.
+ * Do not define the dma64_addr_t type, which never really
+ * worked.
+ */
+#ifndef dma_addr_t
+#ifdef CONFIG_PHYS_ADDR_T_64BIT
+typedef u64 dma_addr_t;
+#else
+typedef u32 dma_addr_t;
+#endif /* CONFIG_PHYS_ADDR_T_64BIT */
+#endif /* dma_addr_t */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+
+#endif /* _ASM_GENERIC_TYPES_H */
diff --git a/include/asm-generic/uaccess-unaligned.h b/include/asm-generic/uaccess-unaligned.h
new file mode 100644
index 0000000..67deb89
--- /dev/null
+++ b/include/asm-generic/uaccess-unaligned.h
@@ -0,0 +1,26 @@
+#ifndef __ASM_GENERIC_UACCESS_UNALIGNED_H
+#define __ASM_GENERIC_UACCESS_UNALIGNED_H
+
+/*
+ * This macro should be used instead of __get_user() when accessing
+ * values at locations that are not known to be aligned.
+ */
+#define __get_user_unaligned(x, ptr) \
+({ \
+ __typeof__ (*(ptr)) __x; \
+ __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \
+ (x) = __x; \
+})
+
+
+/*
+ * This macro should be used instead of __put_user() when accessing
+ * values at locations that are not known to be aligned.
+ */
+#define __put_user_unaligned(x, ptr) \
+({ \
+ __typeof__ (*(ptr)) __x = (x); \
+ __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \
+})
+
+#endif /* __ASM_GENERIC_UACCESS_UNALIGNED_H */
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index 549cb3a..6d8cab2 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -1,26 +1,325 @@
-#ifndef _ASM_GENERIC_UACCESS_H_
-#define _ASM_GENERIC_UACCESS_H_
+#ifndef __ASM_GENERIC_UACCESS_H
+#define __ASM_GENERIC_UACCESS_H
/*
- * This macro should be used instead of __get_user() when accessing
- * values at locations that are not known to be aligned.
+ * User space memory access functions, these should work
+ * on a ny machine that has kernel and user data in the same
+ * address space, e.g. all NOMMU machines.
*/
-#define __get_user_unaligned(x, ptr) \
-({ \
- __typeof__ (*(ptr)) __x; \
- __copy_from_user(&__x, (ptr), sizeof(*(ptr))) ? -EFAULT : 0; \
- (x) = __x; \
-})
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/string.h>
+
+#include <asm/segment.h>
+
+#define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
+
+#ifndef KERNEL_DS
+#define KERNEL_DS MAKE_MM_SEG(~0UL)
+#endif
+
+#ifndef USER_DS
+#define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
+#endif
+
+#ifndef get_fs
+#define get_ds() (KERNEL_DS)
+#define get_fs() (current_thread_info()->addr_limit)
+
+static inline void set_fs(mm_segment_t fs)
+{
+ current_thread_info()->addr_limit = fs;
+}
+#endif
+#define segment_eq(a, b) ((a).seg == (b).seg)
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
+
+/*
+ * The architecture should really override this if possible, at least
+ * doing a check on the get_fs()
+ */
+#ifndef __access_ok
+static inline int __access_ok(unsigned long addr, unsigned long size)
+{
+ return 1;
+}
+#endif
/*
- * This macro should be used instead of __put_user() when accessing
- * values at locations that are not known to be aligned.
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
*/
-#define __put_user_unaligned(x, ptr) \
-({ \
- __typeof__ (*(ptr)) __x = (x); \
- __copy_to_user((ptr), &__x, sizeof(*(ptr))) ? -EFAULT : 0; \
+
+struct exception_table_entry
+{
+ unsigned long insn, fixup;
+};
+
+/* Returns 0 if exception not found and fixup otherwise. */
+extern unsigned long search_exception_table(unsigned long);
+
+/*
+ * architectures with an MMU should override these two
+ */
+#ifndef __copy_from_user
+static inline __must_check long __copy_from_user(void *to,
+ const void __user * from, unsigned long n)
+{
+ if (__builtin_constant_p(n)) {
+ switch(n) {
+ case 1:
+ *(u8 *)to = *(u8 __force *)from;
+ return 0;
+ case 2:
+ *(u16 *)to = *(u16 __force *)from;
+ return 0;
+ case 4:
+ *(u32 *)to = *(u32 __force *)from;
+ return 0;
+#ifdef CONFIG_64BIT
+ case 8:
+ *(u64 *)to = *(u64 __force *)from;
+ return 0;
+#endif
+ default:
+ break;
+ }
+ }
+
+ memcpy(to, (const void __force *)from, n);
+ return 0;
+}
+#endif
+
+#ifndef __copy_to_user
+static inline __must_check long __copy_to_user(void __user *to,
+ const void *from, unsigned long n)
+{
+ if (__builtin_constant_p(n)) {
+ switch(n) {
+ case 1:
+ *(u8 __force *)to = *(u8 *)from;
+ return 0;
+ case 2:
+ *(u16 __force *)to = *(u16 *)from;
+ return 0;
+ case 4:
+ *(u32 __force *)to = *(u32 *)from;
+ return 0;
+#ifdef CONFIG_64BIT
+ case 8:
+ *(u64 __force *)to = *(u64 *)from;
+ return 0;
+#endif
+ default:
+ break;
+ }
+ }
+
+ memcpy((void __force *)to, from, n);
+ return 0;
+}
+#endif
+
+/*
+ * These are the main single-value transfer routines. They automatically
+ * use the right size if we just have the right pointer type.
+ * This version just falls back to copy_{from,to}_user, which should
+ * provide a fast-path for small values.
+ */
+#define __put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __x = (x); \
+ int __pu_err = -EFAULT; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof (*(ptr))) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ case 8: \
+ __pu_err = __put_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ break; \
+ default: \
+ __put_user_bad(); \
+ break; \
+ } \
+ __pu_err; \
+})
+
+#define put_user(x, ptr) \
+({ \
+ might_sleep(); \
+ __access_ok(ptr, sizeof (*ptr)) ? \
+ __put_user(x, ptr) : \
+ -EFAULT; \
})
-#endif /* _ASM_GENERIC_UACCESS_H */
+static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
+{
+ size = __copy_to_user(ptr, x, size);
+ return size ? -EFAULT : size;
+}
+
+extern int __put_user_bad(void) __attribute__((noreturn));
+
+#define __get_user(x, ptr) \
+({ \
+ int __gu_err = -EFAULT; \
+ __chk_user_ptr(ptr); \
+ switch (sizeof(*(ptr))) { \
+ case 1: { \
+ unsigned char __x; \
+ __gu_err = __get_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 2: { \
+ unsigned short __x; \
+ __gu_err = __get_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 4: { \
+ unsigned int __x; \
+ __gu_err = __get_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ case 8: { \
+ unsigned long long __x; \
+ __gu_err = __get_user_fn(sizeof (*(ptr)), \
+ ptr, &__x); \
+ (x) = *(__force __typeof__(*(ptr)) *) &__x; \
+ break; \
+ }; \
+ default: \
+ __get_user_bad(); \
+ break; \
+ } \
+ __gu_err; \
+})
+
+#define get_user(x, ptr) \
+({ \
+ might_sleep(); \
+ __access_ok(ptr, sizeof (*ptr)) ? \
+ __get_user(x, ptr) : \
+ -EFAULT; \
+})
+
+static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
+{
+ size = __copy_from_user(x, ptr, size);
+ return size ? -EFAULT : size;
+}
+
+extern int __get_user_bad(void) __attribute__((noreturn));
+
+#ifndef __copy_from_user_inatomic
+#define __copy_from_user_inatomic __copy_from_user
+#endif
+
+#ifndef __copy_to_user_inatomic
+#define __copy_to_user_inatomic __copy_to_user
+#endif
+
+static inline long copy_from_user(void *to,
+ const void __user * from, unsigned long n)
+{
+ might_sleep();
+ if (__access_ok(from, n))
+ return __copy_from_user(to, from, n);
+ else
+ return n;
+}
+
+static inline long copy_to_user(void __user *to,
+ const void *from, unsigned long n)
+{
+ might_sleep();
+ if (__access_ok(to, n))
+ return __copy_to_user(to, from, n);
+ else
+ return n;
+}
+
+/*
+ * Copy a null terminated string from userspace.
+ */
+#ifndef __strncpy_from_user
+static inline long
+__strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ char *tmp;
+ strncpy(dst, (const char __force *)src, count);
+ for (tmp = dst; *tmp && count > 0; tmp++, count--)
+ ;
+ return (tmp - dst);
+}
+#endif
+
+static inline long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ if (!__access_ok(src, 1))
+ return -EFAULT;
+ return __strncpy_from_user(dst, src, count);
+}
+
+/*
+ * Return the size of a string (including the ending 0)
+ *
+ * Return 0 on exception, a value greater than N if too long
+ */
+#ifndef strnlen_user
+static inline long strnlen_user(const char __user *src, long n)
+{
+ return strlen((void * __force)src) + 1;
+}
+#endif
+
+static inline long strlen_user(const char __user *src)
+{
+ return strnlen_user(src, 32767);
+}
+
+/*
+ * Zero Userspace
+ */
+#ifndef __clear_user
+static inline __must_check unsigned long
+__clear_user(void __user *to, unsigned long n)
+{
+ memset((void __force *)to, 0, n);
+ return 0;
+}
+#endif
+
+static inline __must_check unsigned long
+clear_user(void __user *to, unsigned long n)
+{
+ might_sleep();
+ if (!__access_ok(to, n))
+ return n;
+
+ return __clear_user(to, n);
+}
+
+#endif /* __ASM_GENERIC_UACCESS_H */
diff --git a/include/asm-generic/ucontext.h b/include/asm-generic/ucontext.h
new file mode 100644
index 0000000..ad77343
--- /dev/null
+++ b/include/asm-generic/ucontext.h
@@ -0,0 +1,12 @@
+#ifndef __ASM_GENERIC_UCONTEXT_H
+#define __ASM_GENERIC_UCONTEXT_H
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+#endif /* __ASM_GENERIC_UCONTEXT_H */
diff --git a/include/asm-generic/unaligned.h b/include/asm-generic/unaligned.h
new file mode 100644
index 0000000..03cf593
--- /dev/null
+++ b/include/asm-generic/unaligned.h
@@ -0,0 +1,30 @@
+#ifndef __ASM_GENERIC_UNALIGNED_H
+#define __ASM_GENERIC_UNALIGNED_H
+
+/*
+ * This is the most generic implementation of unaligned accesses
+ * and should work almost anywhere.
+ *
+ * If an architecture can handle unaligned accesses in hardware,
+ * it may want to use the linux/unaligned/access_ok.h implementation
+ * instead.
+ */
+#include <asm/byteorder.h>
+
+#if defined(__LITTLE_ENDIAN)
+# include <linux/unaligned/le_struct.h>
+# include <linux/unaligned/be_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_le
+# define put_unaligned __put_unaligned_le
+#elif defined(__BIG_ENDIAN)
+# include <linux/unaligned/be_struct.h>
+# include <linux/unaligned/le_byteshift.h>
+# include <linux/unaligned/generic.h>
+# define get_unaligned __get_unaligned_be
+# define put_unaligned __put_unaligned_be
+#else
+# error need to define endianess
+#endif
+
+#endif /* __ASM_GENERIC_UNALIGNED_H */
diff --git a/include/asm-generic/unistd.h b/include/asm-generic/unistd.h
new file mode 100644
index 0000000..5b34b62
--- /dev/null
+++ b/include/asm-generic/unistd.h
@@ -0,0 +1,854 @@
+#if !defined(_ASM_GENERIC_UNISTD_H) || defined(__SYSCALL)
+#define _ASM_GENERIC_UNISTD_H
+
+#include <asm/bitsperlong.h>
+
+/*
+ * This file contains the system call numbers, based on the
+ * layout of the x86-64 architecture, which embeds the
+ * pointer to the syscall in the table.
+ *
+ * As a basic principle, no duplication of functionality
+ * should be added, e.g. we don't use lseek when llseek
+ * is present. New architectures should use this file
+ * and implement the less feature-full calls in user space.
+ */
+
+#ifndef __SYSCALL
+#define __SYSCALL(x, y)
+#endif
+
+#if __BITS_PER_LONG == 32
+#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _32)
+#else
+#define __SC_3264(_nr, _32, _64) __SYSCALL(_nr, _64)
+#endif
+
+#define __NR_io_setup 0
+__SYSCALL(__NR_io_setup, sys_io_setup)
+#define __NR_io_destroy 1
+__SYSCALL(__NR_io_destroy, sys_io_destroy)
+#define __NR_io_submit 2
+__SYSCALL(__NR_io_submit, sys_io_submit)
+#define __NR_io_cancel 3
+__SYSCALL(__NR_io_cancel, sys_io_cancel)
+#define __NR_io_getevents 4
+__SYSCALL(__NR_io_getevents, sys_io_getevents)
+
+/* fs/xattr.c */
+#define __NR_setxattr 5
+__SYSCALL(__NR_setxattr, sys_setxattr)
+#define __NR_lsetxattr 6
+__SYSCALL(__NR_lsetxattr, sys_lsetxattr)
+#define __NR_fsetxattr 7
+__SYSCALL(__NR_fsetxattr, sys_fsetxattr)
+#define __NR_getxattr 8
+__SYSCALL(__NR_getxattr, sys_getxattr)
+#define __NR_lgetxattr 9
+__SYSCALL(__NR_lgetxattr, sys_lgetxattr)
+#define __NR_fgetxattr 10
+__SYSCALL(__NR_fgetxattr, sys_fgetxattr)
+#define __NR_listxattr 11
+__SYSCALL(__NR_listxattr, sys_listxattr)
+#define __NR_llistxattr 12
+__SYSCALL(__NR_llistxattr, sys_llistxattr)
+#define __NR_flistxattr 13
+__SYSCALL(__NR_flistxattr, sys_flistxattr)
+#define __NR_removexattr 14
+__SYSCALL(__NR_removexattr, sys_removexattr)
+#define __NR_lremovexattr 15
+__SYSCALL(__NR_lremovexattr, sys_lremovexattr)
+#define __NR_fremovexattr 16
+__SYSCALL(__NR_fremovexattr, sys_fremovexattr)
+
+/* fs/dcache.c */
+#define __NR_getcwd 17
+__SYSCALL(__NR_getcwd, sys_getcwd)
+
+/* fs/cookies.c */
+#define __NR_lookup_dcookie 18
+__SYSCALL(__NR_lookup_dcookie, sys_lookup_dcookie)
+
+/* fs/eventfd.c */
+#define __NR_eventfd2 19
+__SYSCALL(__NR_eventfd2, sys_eventfd2)
+
+/* fs/eventpoll.c */
+#define __NR_epoll_create1 20
+__SYSCALL(__NR_epoll_create1, sys_epoll_create1)
+#define __NR_epoll_ctl 21
+__SYSCALL(__NR_epoll_ctl, sys_epoll_ctl)
+#define __NR_epoll_pwait 22
+__SYSCALL(__NR_epoll_pwait, sys_epoll_pwait)
+
+/* fs/fcntl.c */
+#define __NR_dup 23
+__SYSCALL(__NR_dup, sys_dup)
+#define __NR_dup3 24
+__SYSCALL(__NR_dup3, sys_dup3)
+#define __NR3264_fcntl 25
+__SC_3264(__NR3264_fcntl, sys_fcntl64, sys_fcntl)
+
+/* fs/inotify_user.c */
+#define __NR_inotify_init1 26
+__SYSCALL(__NR_inotify_init1, sys_inotify_init1)
+#define __NR_inotify_add_watch 27
+__SYSCALL(__NR_inotify_add_watch, sys_inotify_add_watch)
+#define __NR_inotify_rm_watch 28
+__SYSCALL(__NR_inotify_rm_watch, sys_inotify_rm_watch)
+
+/* fs/ioctl.c */
+#define __NR_ioctl 29
+__SYSCALL(__NR_ioctl, sys_ioctl)
+
+/* fs/ioprio.c */
+#define __NR_ioprio_set 30
+__SYSCALL(__NR_ioprio_set, sys_ioprio_set)
+#define __NR_ioprio_get 31
+__SYSCALL(__NR_ioprio_get, sys_ioprio_get)
+
+/* fs/locks.c */
+#define __NR_flock 32
+__SYSCALL(__NR_flock, sys_flock)
+
+/* fs/namei.c */
+#define __NR_mknodat 33
+__SYSCALL(__NR_mknodat, sys_mknodat)
+#define __NR_mkdirat 34
+__SYSCALL(__NR_mkdirat, sys_mkdirat)
+#define __NR_unlinkat 35
+__SYSCALL(__NR_unlinkat, sys_unlinkat)
+#define __NR_symlinkat 36
+__SYSCALL(__NR_symlinkat, sys_symlinkat)
+#define __NR_linkat 37
+__SYSCALL(__NR_linkat, sys_linkat)
+#define __NR_renameat 38
+__SYSCALL(__NR_renameat, sys_renameat)
+
+/* fs/namespace.c */
+#define __NR_umount2 39
+__SYSCALL(__NR_umount2, sys_umount)
+#define __NR_mount 40
+__SYSCALL(__NR_mount, sys_mount)
+#define __NR_pivot_root 41
+__SYSCALL(__NR_pivot_root, sys_pivot_root)
+
+/* fs/nfsctl.c */
+#define __NR_nfsservctl 42
+__SYSCALL(__NR_nfsservctl, sys_nfsservctl)
+
+/* fs/open.c */
+#define __NR3264_statfs 43
+__SC_3264(__NR3264_statfs, sys_statfs64, sys_statfs)
+#define __NR3264_fstatfs 44
+__SC_3264(__NR3264_fstatfs, sys_fstatfs64, sys_fstatfs)
+#define __NR3264_truncate 45
+__SC_3264(__NR3264_truncate, sys_truncate64, sys_truncate)
+#define __NR3264_ftruncate 46
+__SC_3264(__NR3264_ftruncate, sys_ftruncate64, sys_ftruncate)
+
+#define __NR_fallocate 47
+__SYSCALL(__NR_fallocate, sys_fallocate)
+#define __NR_faccessat 48
+__SYSCALL(__NR_faccessat, sys_faccessat)
+#define __NR_chdir 49
+__SYSCALL(__NR_chdir, sys_chdir)
+#define __NR_fchdir 50
+__SYSCALL(__NR_fchdir, sys_fchdir)
+#define __NR_chroot 51
+__SYSCALL(__NR_chroot, sys_chroot)
+#define __NR_fchmod 52
+__SYSCALL(__NR_fchmod, sys_fchmod)
+#define __NR_fchmodat 53
+__SYSCALL(__NR_fchmodat, sys_fchmodat)
+#define __NR_fchownat 54
+__SYSCALL(__NR_fchownat, sys_fchownat)
+#define __NR_fchown 55
+__SYSCALL(__NR_fchown, sys_fchown)
+#define __NR_openat 56
+__SYSCALL(__NR_openat, sys_openat)
+#define __NR_close 57
+__SYSCALL(__NR_close, sys_close)
+#define __NR_vhangup 58
+__SYSCALL(__NR_vhangup, sys_vhangup)
+
+/* fs/pipe.c */
+#define __NR_pipe2 59
+__SYSCALL(__NR_pipe2, sys_pipe2)
+
+/* fs/quota.c */
+#define __NR_quotactl 60
+__SYSCALL(__NR_quotactl, sys_quotactl)
+
+/* fs/readdir.c */
+#define __NR_getdents64 61
+__SYSCALL(__NR_getdents64, sys_getdents64)
+
+/* fs/read_write.c */
+#define __NR3264_lseek 62
+__SC_3264(__NR3264_lseek, sys_llseek, sys_lseek)
+#define __NR_read 63
+__SYSCALL(__NR_read, sys_read)
+#define __NR_write 64
+__SYSCALL(__NR_write, sys_write)
+#define __NR_readv 65
+__SYSCALL(__NR_readv, sys_readv)
+#define __NR_writev 66
+__SYSCALL(__NR_writev, sys_writev)
+#define __NR_pread64 67
+__SYSCALL(__NR_pread64, sys_pread64)
+#define __NR_pwrite64 68
+__SYSCALL(__NR_pwrite64, sys_pwrite64)
+#define __NR_preadv 69
+__SYSCALL(__NR_preadv, sys_preadv)
+#define __NR_pwritev 70
+__SYSCALL(__NR_pwritev, sys_pwritev)
+
+/* fs/sendfile.c */
+#define __NR3264_sendfile 71
+__SC_3264(__NR3264_sendfile, sys_sendfile64, sys_sendfile)
+
+/* fs/select.c */
+#define __NR_pselect6 72
+__SYSCALL(__NR_pselect6, sys_pselect6)
+#define __NR_ppoll 73
+__SYSCALL(__NR_ppoll, sys_ppoll)
+
+/* fs/signalfd.c */
+#define __NR_signalfd4 74
+__SYSCALL(__NR_signalfd4, sys_signalfd4)
+
+/* fs/splice.c */
+#define __NR_vmsplice 75
+__SYSCALL(__NR_vmsplice, sys_vmsplice)
+#define __NR_splice 76
+__SYSCALL(__NR_splice, sys_splice)
+#define __NR_tee 77
+__SYSCALL(__NR_tee, sys_tee)
+
+/* fs/stat.c */
+#define __NR_readlinkat 78
+__SYSCALL(__NR_readlinkat, sys_readlinkat)
+#define __NR3264_fstatat 79
+__SC_3264(__NR3264_fstatat, sys_fstatat64, sys_newfstatat)
+#define __NR3264_fstat 80
+__SC_3264(__NR3264_fstat, sys_fstat64, sys_newfstat)
+
+/* fs/sync.c */
+#define __NR_sync 81
+__SYSCALL(__NR_sync, sys_sync)
+#define __NR_fsync 82
+__SYSCALL(__NR_fsync, sys_fsync)
+#define __NR_fdatasync 83
+__SYSCALL(__NR_fdatasync, sys_fdatasync)
+#define __NR_sync_file_range 84
+__SYSCALL(__NR_sync_file_range, sys_sync_file_range) /* .long sys_sync_file_range2, */
+
+/* fs/timerfd.c */
+#define __NR_timerfd_create 85
+__SYSCALL(__NR_timerfd_create, sys_timerfd_create)
+#define __NR_timerfd_settime 86
+__SYSCALL(__NR_timerfd_settime, sys_timerfd_settime)
+#define __NR_timerfd_gettime 87
+__SYSCALL(__NR_timerfd_gettime, sys_timerfd_gettime)
+
+/* fs/utimes.c */
+#define __NR_utimensat 88
+__SYSCALL(__NR_utimensat, sys_utimensat)
+
+/* kernel/acct.c */
+#define __NR_acct 89
+__SYSCALL(__NR_acct, sys_acct)
+
+/* kernel/capability.c */
+#define __NR_capget 90
+__SYSCALL(__NR_capget, sys_capget)
+#define __NR_capset 91
+__SYSCALL(__NR_capset, sys_capset)
+
+/* kernel/exec_domain.c */
+#define __NR_personality 92
+__SYSCALL(__NR_personality, sys_personality)
+
+/* kernel/exit.c */
+#define __NR_exit 93
+__SYSCALL(__NR_exit, sys_exit)
+#define __NR_exit_group 94
+__SYSCALL(__NR_exit_group, sys_exit_group)
+#define __NR_waitid 95
+__SYSCALL(__NR_waitid, sys_waitid)
+
+/* kernel/fork.c */
+#define __NR_set_tid_address 96
+__SYSCALL(__NR_set_tid_address, sys_set_tid_address)
+#define __NR_unshare 97
+__SYSCALL(__NR_unshare, sys_unshare)
+
+/* kernel/futex.c */
+#define __NR_futex 98
+__SYSCALL(__NR_futex, sys_futex)
+#define __NR_set_robust_list 99
+__SYSCALL(__NR_set_robust_list, sys_set_robust_list)
+#define __NR_get_robust_list 100
+__SYSCALL(__NR_get_robust_list, sys_get_robust_list)
+
+/* kernel/hrtimer.c */
+#define __NR_nanosleep 101
+__SYSCALL(__NR_nanosleep, sys_nanosleep)
+
+/* kernel/itimer.c */
+#define __NR_getitimer 102
+__SYSCALL(__NR_getitimer, sys_getitimer)
+#define __NR_setitimer 103
+__SYSCALL(__NR_setitimer, sys_setitimer)
+
+/* kernel/kexec.c */
+#define __NR_kexec_load 104
+__SYSCALL(__NR_kexec_load, sys_kexec_load)
+
+/* kernel/module.c */
+#define __NR_init_module 105
+__SYSCALL(__NR_init_module, sys_init_module)
+#define __NR_delete_module 106
+__SYSCALL(__NR_delete_module, sys_delete_module)
+
+/* kernel/posix-timers.c */
+#define __NR_timer_create 107
+__SYSCALL(__NR_timer_create, sys_timer_create)
+#define __NR_timer_gettime 108
+__SYSCALL(__NR_timer_gettime, sys_timer_gettime)
+#define __NR_timer_getoverrun 109
+__SYSCALL(__NR_timer_getoverrun, sys_timer_getoverrun)
+#define __NR_timer_settime 110
+__SYSCALL(__NR_timer_settime, sys_timer_settime)
+#define __NR_timer_delete 111
+__SYSCALL(__NR_timer_delete, sys_timer_delete)
+#define __NR_clock_settime 112
+__SYSCALL(__NR_clock_settime, sys_clock_settime)
+#define __NR_clock_gettime 113
+__SYSCALL(__NR_clock_gettime, sys_clock_gettime)
+#define __NR_clock_getres 114
+__SYSCALL(__NR_clock_getres, sys_clock_getres)
+#define __NR_clock_nanosleep 115
+__SYSCALL(__NR_clock_nanosleep, sys_clock_nanosleep)
+
+/* kernel/printk.c */
+#define __NR_syslog 116
+__SYSCALL(__NR_syslog, sys_syslog)
+
+/* kernel/ptrace.c */
+#define __NR_ptrace 117
+__SYSCALL(__NR_ptrace, sys_ptrace)
+
+/* kernel/sched.c */
+#define __NR_sched_setparam 118
+__SYSCALL(__NR_sched_setparam, sys_sched_setparam)
+#define __NR_sched_setscheduler 119
+__SYSCALL(__NR_sched_setscheduler, sys_sched_setscheduler)
+#define __NR_sched_getscheduler 120
+__SYSCALL(__NR_sched_getscheduler, sys_sched_getscheduler)
+#define __NR_sched_getparam 121
+__SYSCALL(__NR_sched_getparam, sys_sched_getparam)
+#define __NR_sched_setaffinity 122
+__SYSCALL(__NR_sched_setaffinity, sys_sched_setaffinity)
+#define __NR_sched_getaffinity 123
+__SYSCALL(__NR_sched_getaffinity, sys_sched_getaffinity)
+#define __NR_sched_yield 124
+__SYSCALL(__NR_sched_yield, sys_sched_yield)
+#define __NR_sched_get_priority_max 125
+__SYSCALL(__NR_sched_get_priority_max, sys_sched_get_priority_max)
+#define __NR_sched_get_priority_min 126
+__SYSCALL(__NR_sched_get_priority_min, sys_sched_get_priority_min)
+#define __NR_sched_rr_get_interval 127
+__SYSCALL(__NR_sched_rr_get_interval, sys_sched_rr_get_interval)
+
+/* kernel/signal.c */
+#define __NR_restart_syscall 128
+__SYSCALL(__NR_restart_syscall, sys_restart_syscall)
+#define __NR_kill 129
+__SYSCALL(__NR_kill, sys_kill)
+#define __NR_tkill 130
+__SYSCALL(__NR_tkill, sys_tkill)
+#define __NR_tgkill 131
+__SYSCALL(__NR_tgkill, sys_tgkill)
+#define __NR_sigaltstack 132
+__SYSCALL(__NR_sigaltstack, sys_sigaltstack)
+#define __NR_rt_sigsuspend 133
+__SYSCALL(__NR_rt_sigsuspend, sys_rt_sigsuspend) /* __ARCH_WANT_SYS_RT_SIGSUSPEND */
+#define __NR_rt_sigaction 134
+__SYSCALL(__NR_rt_sigaction, sys_rt_sigaction) /* __ARCH_WANT_SYS_RT_SIGACTION */
+#define __NR_rt_sigprocmask 135
+__SYSCALL(__NR_rt_sigprocmask, sys_rt_sigprocmask)
+#define __NR_rt_sigpending 136
+__SYSCALL(__NR_rt_sigpending, sys_rt_sigpending)
+#define __NR_rt_sigtimedwait 137
+__SYSCALL(__NR_rt_sigtimedwait, sys_rt_sigtimedwait)
+#define __NR_rt_sigqueueinfo 138
+__SYSCALL(__NR_rt_sigqueueinfo, sys_rt_sigqueueinfo)
+#define __NR_rt_sigreturn 139
+__SYSCALL(__NR_rt_sigreturn, sys_rt_sigreturn) /* sys_rt_sigreturn_wrapper, */
+
+/* kernel/sys.c */
+#define __NR_setpriority 140
+__SYSCALL(__NR_setpriority, sys_setpriority)
+#define __NR_getpriority 141
+__SYSCALL(__NR_getpriority, sys_getpriority)
+#define __NR_reboot 142
+__SYSCALL(__NR_reboot, sys_reboot)
+#define __NR_setregid 143
+__SYSCALL(__NR_setregid, sys_setregid)
+#define __NR_setgid 144
+__SYSCALL(__NR_setgid, sys_setgid)
+#define __NR_setreuid 145
+__SYSCALL(__NR_setreuid, sys_setreuid)
+#define __NR_setuid 146
+__SYSCALL(__NR_setuid, sys_setuid)
+#define __NR_setresuid 147
+__SYSCALL(__NR_setresuid, sys_setresuid)
+#define __NR_getresuid 148
+__SYSCALL(__NR_getresuid, sys_getresuid)
+#define __NR_setresgid 149
+__SYSCALL(__NR_setresgid, sys_setresgid)
+#define __NR_getresgid 150
+__SYSCALL(__NR_getresgid, sys_getresgid)
+#define __NR_setfsuid 151
+__SYSCALL(__NR_setfsuid, sys_setfsuid)
+#define __NR_setfsgid 152
+__SYSCALL(__NR_setfsgid, sys_setfsgid)
+#define __NR_times 153
+__SYSCALL(__NR_times, sys_times)
+#define __NR_setpgid 154
+__SYSCALL(__NR_setpgid, sys_setpgid)
+#define __NR_getpgid 155
+__SYSCALL(__NR_getpgid, sys_getpgid)
+#define __NR_getsid 156
+__SYSCALL(__NR_getsid, sys_getsid)
+#define __NR_setsid 157
+__SYSCALL(__NR_setsid, sys_setsid)
+#define __NR_getgroups 158
+__SYSCALL(__NR_getgroups, sys_getgroups)
+#define __NR_setgroups 159
+__SYSCALL(__NR_setgroups, sys_setgroups)
+#define __NR_uname 160
+__SYSCALL(__NR_uname, sys_newuname)
+#define __NR_sethostname 161
+__SYSCALL(__NR_sethostname, sys_sethostname)
+#define __NR_setdomainname 162
+__SYSCALL(__NR_setdomainname, sys_setdomainname)
+#define __NR_getrlimit 163
+__SYSCALL(__NR_getrlimit, sys_getrlimit)
+#define __NR_setrlimit 164
+__SYSCALL(__NR_setrlimit, sys_setrlimit)
+#define __NR_getrusage 165
+__SYSCALL(__NR_getrusage, sys_getrusage)
+#define __NR_umask 166
+__SYSCALL(__NR_umask, sys_umask)
+#define __NR_prctl 167
+__SYSCALL(__NR_prctl, sys_prctl)
+#define __NR_getcpu 168
+__SYSCALL(__NR_getcpu, sys_getcpu)
+
+/* kernel/time.c */
+#define __NR_gettimeofday 169
+__SYSCALL(__NR_gettimeofday, sys_gettimeofday)
+#define __NR_settimeofday 170
+__SYSCALL(__NR_settimeofday, sys_settimeofday)
+#define __NR_adjtimex 171
+__SYSCALL(__NR_adjtimex, sys_adjtimex)
+
+/* kernel/timer.c */
+#define __NR_getpid 172
+__SYSCALL(__NR_getpid, sys_getpid)
+#define __NR_getppid 173
+__SYSCALL(__NR_getppid, sys_getppid)
+#define __NR_getuid 174
+__SYSCALL(__NR_getuid, sys_getuid)
+#define __NR_geteuid 175
+__SYSCALL(__NR_geteuid, sys_geteuid)
+#define __NR_getgid 176
+__SYSCALL(__NR_getgid, sys_getgid)
+#define __NR_getegid 177
+__SYSCALL(__NR_getegid, sys_getegid)
+#define __NR_gettid 178
+__SYSCALL(__NR_gettid, sys_gettid)
+#define __NR_sysinfo 179
+__SYSCALL(__NR_sysinfo, sys_sysinfo)
+
+/* ipc/mqueue.c */
+#define __NR_mq_open 180
+__SYSCALL(__NR_mq_open, sys_mq_open)
+#define __NR_mq_unlink 181
+__SYSCALL(__NR_mq_unlink, sys_mq_unlink)
+#define __NR_mq_timedsend 182
+__SYSCALL(__NR_mq_timedsend, sys_mq_timedsend)
+#define __NR_mq_timedreceive 183
+__SYSCALL(__NR_mq_timedreceive, sys_mq_timedreceive)
+#define __NR_mq_notify 184
+__SYSCALL(__NR_mq_notify, sys_mq_notify)
+#define __NR_mq_getsetattr 185
+__SYSCALL(__NR_mq_getsetattr, sys_mq_getsetattr)
+
+/* ipc/msg.c */
+#define __NR_msgget 186
+__SYSCALL(__NR_msgget, sys_msgget)
+#define __NR_msgctl 187
+__SYSCALL(__NR_msgctl, sys_msgctl)
+#define __NR_msgrcv 188
+__SYSCALL(__NR_msgrcv, sys_msgrcv)
+#define __NR_msgsnd 189
+__SYSCALL(__NR_msgsnd, sys_msgsnd)
+
+/* ipc/sem.c */
+#define __NR_semget 190
+__SYSCALL(__NR_semget, sys_semget)
+#define __NR_semctl 191
+__SYSCALL(__NR_semctl, sys_semctl)
+#define __NR_semtimedop 192
+__SYSCALL(__NR_semtimedop, sys_semtimedop)
+#define __NR_semop 193
+__SYSCALL(__NR_semop, sys_semop)
+
+/* ipc/shm.c */
+#define __NR_shmget 194
+__SYSCALL(__NR_shmget, sys_shmget)
+#define __NR_shmctl 195
+__SYSCALL(__NR_shmctl, sys_shmctl)
+#define __NR_shmat 196
+__SYSCALL(__NR_shmat, sys_shmat)
+#define __NR_shmdt 197
+__SYSCALL(__NR_shmdt, sys_shmdt)
+
+/* net/socket.c */
+#define __NR_socket 198
+__SYSCALL(__NR_socket, sys_socket)
+#define __NR_socketpair 199
+__SYSCALL(__NR_socketpair, sys_socketpair)
+#define __NR_bind 200
+__SYSCALL(__NR_bind, sys_bind)
+#define __NR_listen 201
+__SYSCALL(__NR_listen, sys_listen)
+#define __NR_accept 202
+__SYSCALL(__NR_accept, sys_accept)
+#define __NR_connect 203
+__SYSCALL(__NR_connect, sys_connect)
+#define __NR_getsockname 204
+__SYSCALL(__NR_getsockname, sys_getsockname)
+#define __NR_getpeername 205
+__SYSCALL(__NR_getpeername, sys_getpeername)
+#define __NR_sendto 206
+__SYSCALL(__NR_sendto, sys_sendto)
+#define __NR_recvfrom 207
+__SYSCALL(__NR_recvfrom, sys_recvfrom)
+#define __NR_setsockopt 208
+__SYSCALL(__NR_setsockopt, sys_setsockopt)
+#define __NR_getsockopt 209
+__SYSCALL(__NR_getsockopt, sys_getsockopt)
+#define __NR_shutdown 210
+__SYSCALL(__NR_shutdown, sys_shutdown)
+#define __NR_sendmsg 211
+__SYSCALL(__NR_sendmsg, sys_sendmsg)
+#define __NR_recvmsg 212
+__SYSCALL(__NR_recvmsg, sys_recvmsg)
+
+/* mm/filemap.c */
+#define __NR_readahead 213
+__SYSCALL(__NR_readahead, sys_readahead)
+
+/* mm/nommu.c, also with MMU */
+#define __NR_brk 214
+__SYSCALL(__NR_brk, sys_brk)
+#define __NR_munmap 215
+__SYSCALL(__NR_munmap, sys_munmap)
+#define __NR_mremap 216
+__SYSCALL(__NR_mremap, sys_mremap)
+
+/* security/keys/keyctl.c */
+#define __NR_add_key 217
+__SYSCALL(__NR_add_key, sys_add_key)
+#define __NR_request_key 218
+__SYSCALL(__NR_request_key, sys_request_key)
+#define __NR_keyctl 219
+__SYSCALL(__NR_keyctl, sys_keyctl)
+
+/* arch/example/kernel/sys_example.c */
+#define __NR_clone 220
+__SYSCALL(__NR_clone, sys_clone) /* .long sys_clone_wrapper */
+#define __NR_execve 221
+__SYSCALL(__NR_execve, sys_execve) /* .long sys_execve_wrapper */
+
+#define __NR3264_mmap 222
+__SC_3264(__NR3264_mmap, sys_mmap2, sys_mmap)
+/* mm/fadvise.c */
+#define __NR3264_fadvise64 223
+__SC_3264(__NR3264_fadvise64, sys_fadvise64_64, sys_fadvise64)
+
+/* mm/, CONFIG_MMU only */
+#ifndef __ARCH_NOMMU
+#define __NR_swapon 224
+__SYSCALL(__NR_swapon, sys_swapon)
+#define __NR_swapoff 225
+__SYSCALL(__NR_swapoff, sys_swapoff)
+#define __NR_mprotect 226
+__SYSCALL(__NR_mprotect, sys_mprotect)
+#define __NR_msync 227
+__SYSCALL(__NR_msync, sys_msync)
+#define __NR_mlock 228
+__SYSCALL(__NR_mlock, sys_mlock)
+#define __NR_munlock 229
+__SYSCALL(__NR_munlock, sys_munlock)
+#define __NR_mlockall 230
+__SYSCALL(__NR_mlockall, sys_mlockall)
+#define __NR_munlockall 231
+__SYSCALL(__NR_munlockall, sys_munlockall)
+#define __NR_mincore 232
+__SYSCALL(__NR_mincore, sys_mincore)
+#define __NR_madvise 233
+__SYSCALL(__NR_madvise, sys_madvise)
+#define __NR_remap_file_pages 234
+__SYSCALL(__NR_remap_file_pages, sys_remap_file_pages)
+#define __NR_mbind 235
+__SYSCALL(__NR_mbind, sys_mbind)
+#define __NR_get_mempolicy 236
+__SYSCALL(__NR_get_mempolicy, sys_get_mempolicy)
+#define __NR_set_mempolicy 237
+__SYSCALL(__NR_set_mempolicy, sys_set_mempolicy)
+#define __NR_migrate_pages 238
+__SYSCALL(__NR_migrate_pages, sys_migrate_pages)
+#define __NR_move_pages 239
+__SYSCALL(__NR_move_pages, sys_move_pages)
+#endif
+
+#undef __NR_syscalls
+#define __NR_syscalls 240
+
+/*
+ * All syscalls below here should go away really,
+ * these are provided for both review and as a porting
+ * help for the C library version.
+*
+ * Last chance: are any of these important enought to
+ * enable by default?
+ */
+#ifdef __ARCH_WANT_SYSCALL_NO_AT
+#define __NR_open 1024
+__SYSCALL(__NR_open, sys_open)
+#define __NR_link 1025
+__SYSCALL(__NR_link, sys_link)
+#define __NR_unlink 1026
+__SYSCALL(__NR_unlink, sys_unlink)
+#define __NR_mknod 1027
+__SYSCALL(__NR_mknod, sys_mknod)
+#define __NR_chmod 1028
+__SYSCALL(__NR_chmod, sys_chmod)
+#define __NR_chown 1029
+__SYSCALL(__NR_chown, sys_chown)
+#define __NR_mkdir 1030
+__SYSCALL(__NR_mkdir, sys_mkdir)
+#define __NR_rmdir 1031
+__SYSCALL(__NR_rmdir, sys_rmdir)
+#define __NR_lchown 1032
+__SYSCALL(__NR_lchown, sys_lchown)
+#define __NR_access 1033
+__SYSCALL(__NR_access, sys_access)
+#define __NR_rename 1034
+__SYSCALL(__NR_rename, sys_rename)
+#define __NR_readlink 1035
+__SYSCALL(__NR_readlink, sys_readlink)
+#define __NR_symlink 1036
+__SYSCALL(__NR_symlink, sys_symlink)
+#define __NR_utimes 1037
+__SYSCALL(__NR_utimes, sys_utimes)
+#define __NR3264_stat 1038
+__SC_3264(__NR3264_stat, sys_stat64, sys_newstat)
+#define __NR3264_lstat 1039
+__SC_3264(__NR3264_lstat, sys_lstat64, sys_newlstat)
+
+#undef __NR_syscalls
+#define __NR_syscalls (__NR3264_lstat+1)
+#endif /* __ARCH_WANT_SYSCALL_NO_AT */
+
+#ifdef __ARCH_WANT_SYSCALL_NO_FLAGS
+#define __NR_pipe 1040
+__SYSCALL(__NR_pipe, sys_pipe)
+#define __NR_dup2 1041
+__SYSCALL(__NR_dup2, sys_dup2)
+#define __NR_epoll_create 1042
+__SYSCALL(__NR_epoll_create, sys_epoll_create)
+#define __NR_inotify_init 1043
+__SYSCALL(__NR_inotify_init, sys_inotify_init)
+#define __NR_eventfd 1044
+__SYSCALL(__NR_eventfd, sys_eventfd)
+#define __NR_signalfd 1045
+__SYSCALL(__NR_signalfd, sys_signalfd)
+
+#undef __NR_syscalls
+#define __NR_syscalls (__NR_signalfd+1)
+#endif /* __ARCH_WANT_SYSCALL_NO_FLAGS */
+
+#if __BITS_PER_LONG == 32 && defined(__ARCH_WANT_SYSCALL_OFF_T)
+#define __NR_sendfile 1046
+__SYSCALL(__NR_sendfile, sys_sendfile)
+#define __NR_ftruncate 1047
+__SYSCALL(__NR_ftruncate, sys_ftruncate)
+#define __NR_truncate 1048
+__SYSCALL(__NR_truncate, sys_truncate)
+#define __NR_stat 1049
+__SYSCALL(__NR_stat, sys_newstat)
+#define __NR_lstat 1050
+__SYSCALL(__NR_lstat, sys_newlstat)
+#define __NR_fstat 1051
+__SYSCALL(__NR_fstat, sys_newfstat)
+#define __NR_fcntl 1052
+__SYSCALL(__NR_fcntl, sys_fcntl)
+#define __NR_fadvise64 1053
+#define __ARCH_WANT_SYS_FADVISE64
+__SYSCALL(__NR_fadvise64, sys_fadvise64)
+#define __NR_newfstatat 1054
+#define __ARCH_WANT_SYS_NEWFSTATAT
+__SYSCALL(__NR_newfstatat, sys_newfstatat)
+#define __NR_fstatfs 1055
+__SYSCALL(__NR_fstatfs, sys_fstatfs)
+#define __NR_statfs 1056
+__SYSCALL(__NR_statfs, sys_statfs)
+#define __NR_lseek 1057
+__SYSCALL(__NR_lseek, sys_lseek)
+#define __NR_mmap 1058
+__SYSCALL(__NR_mmap, sys_mmap)
+
+#undef __NR_syscalls
+#define __NR_syscalls (__NR_mmap+1)
+#endif /* 32 bit off_t syscalls */
+
+#ifdef __ARCH_WANT_SYSCALL_DEPRECATED
+#define __NR_alarm 1059
+#define __ARCH_WANT_SYS_ALARM
+__SYSCALL(__NR_alarm, sys_alarm)
+#define __NR_getpgrp 1060
+#define __ARCH_WANT_SYS_GETPGRP
+__SYSCALL(__NR_getpgrp, sys_getpgrp)
+#define __NR_pause 1061
+#define __ARCH_WANT_SYS_PAUSE
+__SYSCALL(__NR_pause, sys_pause)
+#define __NR_time 1062
+#define __ARCH_WANT_SYS_TIME
+__SYSCALL(__NR_time, sys_time)
+#define __NR_utime 1063
+#define __ARCH_WANT_SYS_UTIME
+__SYSCALL(__NR_utime, sys_utime)
+
+#define __NR_creat 1064
+__SYSCALL(__NR_creat, sys_creat)
+#define __NR_getdents 1065
+#define __ARCH_WANT_SYS_GETDENTS
+__SYSCALL(__NR_getdents, sys_getdents)
+#define __NR_futimesat 1066
+__SYSCALL(__NR_futimesat, sys_futimesat)
+#define __NR_select 1067
+#define __ARCH_WANT_SYS_SELECT
+__SYSCALL(__NR_select, sys_select)
+#define __NR_poll 1068
+__SYSCALL(__NR_poll, sys_poll)
+#define __NR_epoll_wait 1069
+__SYSCALL(__NR_epoll_wait, sys_epoll_wait)
+#define __NR_ustat 1070
+__SYSCALL(__NR_ustat, sys_ustat)
+#define __NR_vfork 1071
+__SYSCALL(__NR_vfork, sys_vfork)
+#define __NR_wait4 1072
+__SYSCALL(__NR_wait4, sys_wait4)
+#define __NR_recv 1073
+__SYSCALL(__NR_recv, sys_recv)
+#define __NR_send 1074
+__SYSCALL(__NR_send, sys_send)
+#define __NR_bdflush 1075
+__SYSCALL(__NR_bdflush, sys_bdflush)
+#define __NR_umount 1076
+__SYSCALL(__NR_umount, sys_oldumount)
+#define __ARCH_WANT_SYS_OLDUMOUNT
+#define __NR_uselib 1077
+__SYSCALL(__NR_uselib, sys_uselib)
+#define __NR__sysctl 1078
+__SYSCALL(__NR__sysctl, sys_sysctl)
+
+#define __NR_fork 1079
+#ifdef CONFIG_MMU
+__SYSCALL(__NR_fork, sys_fork)
+#else
+__SYSCALL(__NR_fork, sys_ni_syscall)
+#endif /* CONFIG_MMU */
+
+#undef __NR_syscalls
+#define __NR_syscalls (__NR_fork+1)
+
+#endif /* __ARCH_WANT_SYSCALL_DEPRECATED */
+
+/*
+ * 32 bit systems traditionally used different
+ * syscalls for off_t and loff_t arguments, while
+ * 64 bit systems only need the off_t version.
+ * For new 32 bit platforms, there is no need to
+ * implement the old 32 bit off_t syscalls, so
+ * they take different names.
+ * Here we map the numbers so that both versions
+ * use the same syscall table layout.
+ */
+#if __BITS_PER_LONG == 64
+#define __NR_fcntl __NR3264_fcntl
+#define __NR_statfs __NR3264_statfs
+#define __NR_fstatfs __NR3264_fstatfs
+#define __NR_truncate __NR3264_truncate
+#define __NR_ftruncate __NR3264_truncate
+#define __NR_lseek __NR3264_lseek
+#define __NR_sendfile __NR3264_sendfile
+#define __NR_newfstatat __NR3264_fstatat
+#define __NR_fstat __NR3264_fstat
+#define __NR_mmap __NR3264_mmap
+#define __NR_fadvise64 __NR3264_fadvise64
+#ifdef __NR3264_stat
+#define __NR_stat __NR3264_stat
+#define __NR_lstat __NR3264_lstat
+#endif
+#else
+#define __NR_fcntl64 __NR3264_fcntl
+#define __NR_statfs64 __NR3264_statfs
+#define __NR_fstatfs64 __NR3264_fstatfs
+#define __NR_truncate64 __NR3264_truncate
+#define __NR_ftruncate64 __NR3264_truncate
+#define __NR_llseek __NR3264_lseek
+#define __NR_sendfile64 __NR3264_sendfile
+#define __NR_fstatat64 __NR3264_fstatat
+#define __NR_fstat64 __NR3264_fstat
+#define __NR_mmap2 __NR3264_mmap
+#define __NR_fadvise64_64 __NR3264_fadvise64
+#ifdef __NR3264_stat
+#define __NR_stat64 __NR3264_stat
+#define __NR_lstat64 __NR3264_lstat
+#endif
+#endif
+
+#ifdef __KERNEL__
+
+/*
+ * These are required system calls, we should
+ * invert the logic eventually and let them
+ * be selected by default.
+ */
+#if __BITS_PER_LONG == 32
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_SYS_LLSEEK
+#endif
+#define __ARCH_WANT_SYS_RT_SIGACTION
+#define __ARCH_WANT_SYS_RT_SIGSUSPEND
+
+/*
+ * "Conditional" syscalls
+ *
+ * What we want is __attribute__((weak,alias("sys_ni_syscall"))),
+ * but it doesn't work on all toolchains, so we just do it by hand
+ */
+#ifndef cond_syscall
+#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall")
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_GENERIC_UNISTD_H */
diff --git a/include/asm-generic/user.h b/include/asm-generic/user.h
new file mode 100644
index 0000000..8b9c3c9
--- /dev/null
+++ b/include/asm-generic/user.h
@@ -0,0 +1,8 @@
+#ifndef __ASM_GENERIC_USER_H
+#define __ASM_GENERIC_USER_H
+/*
+ * This file may define a 'struct user' structure. However, it it only
+ * used for a.out file, which are not supported on new architectures.
+ */
+
+#endif /* __ASM_GENERIC_USER_H */
diff --git a/include/asm-generic/vga.h b/include/asm-generic/vga.h
new file mode 100644
index 0000000..36c8ff5
--- /dev/null
+++ b/include/asm-generic/vga.h
@@ -0,0 +1,24 @@
+/*
+ * Access to VGA videoram
+ *
+ * (c) 1998 Martin Mares <mj@ucw.cz>
+ */
+#ifndef __ASM_GENERIC_VGA_H
+#define __ASM_GENERIC_VGA_H
+
+/*
+ * On most architectures that support VGA, we can just
+ * recalculate addresses and then access the videoram
+ * directly without any black magic.
+ *
+ * Everyone else needs to ioremap the address and use
+ * proper I/O accesses.
+ */
+#ifndef VGA_MAP_MEM
+#define VGA_MAP_MEM(x, s) (unsigned long)phys_to_virt(x)
+#endif
+
+#define vga_readb(x) (*(x))
+#define vga_writeb(x, y) (*(y) = (x))
+
+#endif /* _ASM_GENERIC_VGA_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 89853bc..6bdba10 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -1,4 +1,57 @@
-#include <linux/section-names.h>
+/*
+ * Helper macros to support writing architecture specific
+ * linker scripts.
+ *
+ * A minimal linker scripts has following content:
+ * [This is a sample, architectures may have special requiriements]
+ *
+ * OUTPUT_FORMAT(...)
+ * OUTPUT_ARCH(...)
+ * ENTRY(...)
+ * SECTIONS
+ * {
+ * . = START;
+ * __init_begin = .;
+ * HEAD_TEXT_SECTION
+ * INIT_TEXT_SECTION(PAGE_SIZE)
+ * INIT_DATA_SECTION(...)
+ * PERCPU(PAGE_SIZE)
+ * __init_end = .;
+ *
+ * _stext = .;
+ * TEXT_SECTION = 0
+ * _etext = .;
+ *
+ * _sdata = .;
+ * RO_DATA_SECTION(PAGE_SIZE)
+ * RW_DATA_SECTION(...)
+ * _edata = .;
+ *
+ * EXCEPTION_TABLE(...)
+ * NOTES
+ *
+ * __bss_start = .;
+ * BSS_SECTION(0, 0)
+ * __bss_stop = .;
+ * _end = .;
+ *
+ * /DISCARD/ : {
+ * EXIT_TEXT
+ * EXIT_DATA
+ * EXIT_CALL
+ * }
+ * STABS_DEBUG
+ * DWARF_DEBUG
+ * }
+ *
+ * [__init_begin, __init_end] is the init section that may be freed after init
+ * [_stext, _etext] is the text section
+ * [_sdata, _edata] is the data section
+ *
+ * Some of the included output section have their own set of constants.
+ * Examples are: [__initramfs_start, __initramfs_end] for initramfs and
+ * [__nosave_begin, __nosave_end] for the nosave data
+ */
#ifndef LOAD_OFFSET
#define LOAD_OFFSET 0
@@ -63,7 +116,7 @@
#define BRANCH_PROFILE()
#endif
-#ifdef CONFIG_EVENT_TRACER
+#ifdef CONFIG_EVENT_TRACING
#define FTRACE_EVENTS() VMLINUX_SYMBOL(__start_ftrace_events) = .; \
*(_ftrace_events) \
VMLINUX_SYMBOL(__stop_ftrace_events) = .;
@@ -116,7 +169,36 @@
FTRACE_EVENTS() \
TRACE_SYSCALLS()
-#define RO_DATA(align) \
+/*
+ * Data section helpers
+ */
+#define NOSAVE_DATA \
+ . = ALIGN(PAGE_SIZE); \
+ VMLINUX_SYMBOL(__nosave_begin) = .; \
+ *(.data.nosave) \
+ . = ALIGN(PAGE_SIZE); \
+ VMLINUX_SYMBOL(__nosave_end) = .;
+
+#define PAGE_ALIGNED_DATA(page_align) \
+ . = ALIGN(page_align); \
+ *(.data.page_aligned)
+
+#define READ_MOSTLY_DATA(align) \
+ . = ALIGN(align); \
+ *(.data.read_mostly)
+
+#define CACHELINE_ALIGNED_DATA(align) \
+ . = ALIGN(align); \
+ *(.data.cacheline_aligned)
+
+#define INIT_TASK(align) \
+ . = ALIGN(align); \
+ *(.data.init_task)
+
+/*
+ * Read only Data
+ */
+#define RO_DATA_SECTION(align) \
. = ALIGN((align)); \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_rodata) = .; \
@@ -270,9 +352,10 @@
} \
. = ALIGN((align));
-/* RODATA provided for backward compatibility.
+/* RODATA & RO_DATA provided for backward compatibility.
* All archs are supposed to use RO_DATA() */
-#define RODATA RO_DATA(4096)
+#define RODATA RO_DATA_SECTION(4096)
+#define RO_DATA(align) RO_DATA_SECTION(align)
#define SECURITY_INIT \
.security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) { \
@@ -330,16 +413,42 @@
#endif
/* Section used for early init (in .S files) */
-#define HEAD_TEXT *(HEAD_TEXT_SECTION)
+#define HEAD_TEXT *(.head.text)
+
+#define HEAD_TEXT_SECTION \
+ .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) { \
+ HEAD_TEXT \
+ }
+
+/*
+ * Exception table
+ */
+#define EXCEPTION_TABLE(align) \
+ . = ALIGN(align); \
+ __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__start___ex_table) = .; \
+ *(__ex_table) \
+ VMLINUX_SYMBOL(__stop___ex_table) = .; \
+ }
+
+/*
+ * Init task
+ */
+#define INIT_TASK_DATA(align) \
+ . = ALIGN(align); \
+ .data.init_task : { \
+ INIT_TASK \
+ }
/* init and exit section handling */
#define INIT_DATA \
*(.init.data) \
DEV_DISCARD(init.data) \
- DEV_DISCARD(init.rodata) \
CPU_DISCARD(init.data) \
- CPU_DISCARD(init.rodata) \
MEM_DISCARD(init.data) \
+ *(.init.rodata) \
+ DEV_DISCARD(init.rodata) \
+ CPU_DISCARD(init.rodata) \
MEM_DISCARD(init.rodata)
#define INIT_TEXT \
@@ -363,9 +472,35 @@
CPU_DISCARD(exit.text) \
MEM_DISCARD(exit.text)
- /* DWARF debug sections.
- Symbols in the DWARF debugging sections are relative to
- the beginning of the section so we begin them at 0. */
+#define EXIT_CALL \
+ *(.exitcall.exit)
+
+/*
+ * bss (Block Started by Symbol) - uninitialized data
+ * zeroed during startup
+ */
+#define SBSS \
+ .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) { \
+ *(.sbss) \
+ *(.scommon) \
+ }
+
+#define BSS(bss_align) \
+ . = ALIGN(bss_align); \
+ .bss : AT(ADDR(.bss) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(__bss_start) = .; \
+ *(.bss.page_aligned) \
+ *(.dynbss) \
+ *(.bss) \
+ *(COMMON) \
+ VMLINUX_SYMBOL(__bss_stop) = .; \
+ }
+
+/*
+ * DWARF debug sections.
+ * Symbols in the DWARF debugging sections are relative to
+ * the beginning of the section so we begin them at 0.
+ */
#define DWARF_DEBUG \
/* DWARF 1 */ \
.debug 0 : { *(.debug) } \
@@ -432,6 +567,12 @@
VMLINUX_SYMBOL(__stop_notes) = .; \
}
+#define INIT_SETUP(initsetup_align) \
+ . = ALIGN(initsetup_align); \
+ VMLINUX_SYMBOL(__setup_start) = .; \
+ *(.init.setup) \
+ VMLINUX_SYMBOL(__setup_end) = .;
+
#define INITCALLS \
*(.initcallearly.init) \
VMLINUX_SYMBOL(__early_initcall_end) = .; \
@@ -453,6 +594,31 @@
*(.initcall7.init) \
*(.initcall7s.init)
+#define INIT_CALLS \
+ VMLINUX_SYMBOL(__initcall_start) = .; \
+ INITCALLS \
+ VMLINUX_SYMBOL(__initcall_end) = .;
+
+#define CON_INITCALL \
+ VMLINUX_SYMBOL(__con_initcall_start) = .; \
+ *(.con_initcall.init) \
+ VMLINUX_SYMBOL(__con_initcall_end) = .;
+
+#define SECURITY_INITCALL \
+ VMLINUX_SYMBOL(__security_initcall_start) = .; \
+ *(.security_initcall.init) \
+ VMLINUX_SYMBOL(__security_initcall_end) = .;
+
+#ifdef CONFIG_BLK_DEV_INITRD
+#define INIT_RAM_FS \
+ . = ALIGN(PAGE_SIZE); \
+ VMLINUX_SYMBOL(__initramfs_start) = .; \
+ *(.init.ramfs) \
+ VMLINUX_SYMBOL(__initramfs_end) = .;
+#else
+#define INITRAMFS
+#endif
+
/**
* PERCPU_VADDR - define output section for percpu area
* @vaddr: explicit base address (optional)
@@ -509,3 +675,58 @@
*(.data.percpu.shared_aligned) \
VMLINUX_SYMBOL(__per_cpu_end) = .; \
}
+
+
+/*
+ * Definition of the high level *_SECTION macros
+ * They will fit only a subset of the architectures
+ */
+
+
+/*
+ * Writeable data.
+ * All sections are combined in a single .data section.
+ * The sections following CONSTRUCTORS are arranged so their
+ * typical alignment matches.
+ * A cacheline is typical/always less than a PAGE_SIZE so
+ * the sections that has this restriction (or similar)
+ * is located before the ones requiring PAGE_SIZE alignment.
+ * NOSAVE_DATA starts and ends with a PAGE_SIZE alignment which
+ * matches the requirment of PAGE_ALIGNED_DATA.
+ *
+ * use 0 as page_align if page_aligned data is not used */
+#define RW_DATA_SECTION(cacheline, nosave, pagealigned, inittask) \
+ . = ALIGN(PAGE_SIZE); \
+ .data : AT(ADDR(.data) - LOAD_OFFSET) { \
+ INIT_TASK(inittask) \
+ CACHELINE_ALIGNED_DATA(cacheline) \
+ READ_MOSTLY_DATA(cacheline) \
+ DATA_DATA \
+ CONSTRUCTORS \
+ NOSAVE_DATA(nosave) \
+ PAGE_ALIGNED_DATA(pagealigned) \
+ }
+
+#define INIT_TEXT_SECTION(inittext_align) \
+ . = ALIGN(inittext_align); \
+ .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) { \
+ VMLINUX_SYMBOL(_sinittext) = .; \
+ INIT_TEXT \
+ VMLINUX_SYMBOL(_einittext) = .; \
+ }
+
+#define INIT_DATA_SECTION(initsetup_align) \
+ .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) { \
+ INIT_DATA \
+ INIT_SETUP(initsetup_align) \
+ INIT_CALLS \
+ CON_INITCALL \
+ SECURITY_INITCALL \
+ INIT_RAM_FS \
+ }
+
+#define BSS_SECTION(sbss_align, bss_align) \
+ SBSS \
+ BSS(bss_align) \
+ . = ALIGN(4);
+
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index c8c4221..d4ddc22 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -86,7 +86,17 @@ struct drm_device;
#include "drm_os_linux.h"
#include "drm_hashtab.h"
+#include "drm_mm.h"
+#define DRM_UT_CORE 0x01
+#define DRM_UT_DRIVER 0x02
+#define DRM_UT_KMS 0x04
+#define DRM_UT_MODE 0x08
+
+extern void drm_ut_debug_printk(unsigned int request_level,
+ const char *prefix,
+ const char *function_name,
+ const char *format, ...);
/***********************************************************************/
/** \name DRM template customization defaults */
/*@{*/
@@ -186,15 +196,57 @@ struct drm_device;
* \param arg arguments
*/
#if DRM_DEBUG_CODE
-#define DRM_DEBUG(fmt, arg...) \
+#define DRM_DEBUG(fmt, args...) \
+ do { \
+ drm_ut_debug_printk(DRM_UT_CORE, DRM_NAME, \
+ __func__, fmt, ##args); \
+ } while (0)
+
+#define DRM_DEBUG_DRIVER(prefix, fmt, args...) \
+ do { \
+ drm_ut_debug_printk(DRM_UT_DRIVER, prefix, \
+ __func__, fmt, ##args); \
+ } while (0)
+#define DRM_DEBUG_KMS(prefix, fmt, args...) \
+ do { \
+ drm_ut_debug_printk(DRM_UT_KMS, prefix, \
+ __func__, fmt, ##args); \
+ } while (0)
+#define DRM_DEBUG_MODE(prefix, fmt, args...) \
+ do { \
+ drm_ut_debug_printk(DRM_UT_MODE, prefix, \
+ __func__, fmt, ##args); \
+ } while (0)
+#define DRM_LOG(fmt, args...) \
do { \
- if ( drm_debug ) \
- printk(KERN_DEBUG \
- "[" DRM_NAME ":%s] " fmt , \
- __func__ , ##arg); \
+ drm_ut_debug_printk(DRM_UT_CORE, NULL, \
+ NULL, fmt, ##args); \
+ } while (0)
+#define DRM_LOG_KMS(fmt, args...) \
+ do { \
+ drm_ut_debug_printk(DRM_UT_KMS, NULL, \
+ NULL, fmt, ##args); \
+ } while (0)
+#define DRM_LOG_MODE(fmt, args...) \
+ do { \
+ drm_ut_debug_printk(DRM_UT_MODE, NULL, \
+ NULL, fmt, ##args); \
+ } while (0)
+#define DRM_LOG_DRIVER(fmt, args...) \
+ do { \
+ drm_ut_debug_printk(DRM_UT_DRIVER, NULL, \
+ NULL, fmt, ##args); \
} while (0)
#else
+#define DRM_DEBUG_DRIVER(prefix, fmt, args...) do { } while (0)
+#define DRM_DEBUG_KMS(prefix, fmt, args...) do { } while (0)
+#define DRM_DEBUG_MODE(prefix, fmt, args...) do { } while (0)
#define DRM_DEBUG(fmt, arg...) do { } while (0)
+#define DRM_LOG(fmt, arg...) do { } while (0)
+#define DRM_LOG_KMS(fmt, args...) do { } while (0)
+#define DRM_LOG_MODE(fmt, arg...) do { } while (0)
+#define DRM_LOG_DRIVER(fmt, arg...) do { } while (0)
+
#endif
#define DRM_PROC_LIMIT (PAGE_SIZE-80)
@@ -237,15 +289,15 @@ struct drm_device;
* \param dev DRM device.
* \param filp file pointer of the caller.
*/
-#define LOCK_TEST_WITH_RETURN( dev, file_priv ) \
-do { \
- if (!_DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock) || \
- file_priv->master->lock.file_priv != file_priv) { \
+#define LOCK_TEST_WITH_RETURN( dev, _file_priv ) \
+do { \
+ if (!_DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock) || \
+ _file_priv->master->lock.file_priv != _file_priv) { \
DRM_ERROR( "%s called without lock held, held %d owner %p %p\n",\
- __func__, _DRM_LOCK_IS_HELD(file_priv->master->lock.hw_lock->lock),\
- file_priv->master->lock.file_priv, file_priv); \
- return -EINVAL; \
- } \
+ __func__, _DRM_LOCK_IS_HELD(_file_priv->master->lock.hw_lock->lock),\
+ _file_priv->master->lock.file_priv, _file_priv); \
+ return -EINVAL; \
+ } \
} while (0)
/**
@@ -502,26 +554,6 @@ struct drm_sigdata {
};
-/*
- * Generic memory manager structs
- */
-
-struct drm_mm_node {
- struct list_head fl_entry;
- struct list_head ml_entry;
- int free;
- unsigned long start;
- unsigned long size;
- struct drm_mm *mm;
- void *private;
-};
-
-struct drm_mm {
- struct list_head fl_entry;
- struct list_head ml_entry;
-};
-
-
/**
* Kernel side of a mapping
*/
@@ -1385,22 +1417,6 @@ extern char *drm_get_connector_status_name(enum drm_connector_status status);
extern int drm_sysfs_connector_add(struct drm_connector *connector);
extern void drm_sysfs_connector_remove(struct drm_connector *connector);
-/*
- * Basic memory manager support (drm_mm.c)
- */
-extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
- unsigned long size,
- unsigned alignment);
-extern void drm_mm_put_block(struct drm_mm_node * cur);
-extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm, unsigned long size,
- unsigned alignment, int best_match);
-extern int drm_mm_init(struct drm_mm *mm, unsigned long start, unsigned long size);
-extern void drm_mm_takedown(struct drm_mm *mm);
-extern int drm_mm_clean(struct drm_mm *mm);
-extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
-extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size);
-extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size);
-
/* Graphics Execution Manager library functions (drm_gem.c) */
int drm_gem_init(struct drm_device *dev);
void drm_gem_destroy(struct drm_device *dev);
@@ -1519,6 +1535,26 @@ static __inline__ void *drm_calloc(size_t nmemb, size_t size, int area)
{
return kcalloc(nmemb, size, GFP_KERNEL);
}
+
+static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
+{
+ if (size * nmemb <= PAGE_SIZE)
+ return kcalloc(nmemb, size, GFP_KERNEL);
+
+ if (size != 0 && nmemb > ULONG_MAX / size)
+ return NULL;
+
+ return __vmalloc(size * nmemb,
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+}
+
+static __inline void drm_free_large(void *ptr)
+{
+ if (!is_vmalloc_addr(ptr))
+ return kfree(ptr);
+
+ vfree(ptr);
+}
#else
extern void *drm_alloc(size_t size, int area);
extern void drm_free(void *pt, size_t size, int area);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 3c1924c..7300fb8 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -471,6 +471,9 @@ struct drm_connector {
u32 property_ids[DRM_CONNECTOR_MAX_PROPERTY];
uint64_t property_values[DRM_CONNECTOR_MAX_PROPERTY];
+ /* requested DPMS state */
+ int dpms;
+
void *helper_private;
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index ec073d8..6769ff6 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -99,6 +99,8 @@ extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
struct drm_framebuffer *old_fb);
extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
+extern void drm_helper_connector_dpms(struct drm_connector *connector, int mode);
+
extern int drm_helper_mode_fill_fb_struct(struct drm_framebuffer *fb,
struct drm_mode_fb_cmd *mode_cmd);
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
index cd2b189..0af087a 100644
--- a/include/drm/drm_hashtab.h
+++ b/include/drm/drm_hashtab.h
@@ -35,6 +35,8 @@
#ifndef DRM_HASHTAB_H
#define DRM_HASHTAB_H
+#include <linux/list.h>
+
#define drm_hash_entry(_ptr, _type, _member) container_of(_ptr, _type, _member)
struct drm_hash_item {
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
new file mode 100644
index 0000000..5662f42
--- /dev/null
+++ b/include/drm/drm_mm.h
@@ -0,0 +1,90 @@
+/**************************************************************************
+ *
+ * Copyright 2006-2008 Tungsten Graphics, Inc., Cedar Park, TX. USA.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ *
+ **************************************************************************/
+/*
+ * Authors:
+ * Thomas Hellstrom <thomas-at-tungstengraphics-dot-com>
+ */
+
+#ifndef _DRM_MM_H_
+#define _DRM_MM_H_
+
+/*
+ * Generic range manager structs
+ */
+#include <linux/list.h>
+
+struct drm_mm_node {
+ struct list_head fl_entry;
+ struct list_head ml_entry;
+ int free;
+ unsigned long start;
+ unsigned long size;
+ struct drm_mm *mm;
+ void *private;
+};
+
+struct drm_mm {
+ struct list_head fl_entry;
+ struct list_head ml_entry;
+ struct list_head unused_nodes;
+ int num_unused;
+ spinlock_t unused_lock;
+};
+
+/*
+ * Basic range manager support (drm_mm.c)
+ */
+
+extern struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment);
+extern struct drm_mm_node *drm_mm_get_block_atomic(struct drm_mm_node *parent,
+ unsigned long size,
+ unsigned alignment);
+extern void drm_mm_put_block(struct drm_mm_node *cur);
+extern struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
+ unsigned long size,
+ unsigned alignment,
+ int best_match);
+extern int drm_mm_init(struct drm_mm *mm, unsigned long start,
+ unsigned long size);
+extern void drm_mm_takedown(struct drm_mm *mm);
+extern int drm_mm_clean(struct drm_mm *mm);
+extern unsigned long drm_mm_tail_space(struct drm_mm *mm);
+extern int drm_mm_remove_space_from_tail(struct drm_mm *mm,
+ unsigned long size);
+extern int drm_mm_add_space_to_tail(struct drm_mm *mm,
+ unsigned long size, int atomic);
+extern int drm_mm_pre_get(struct drm_mm *mm);
+
+static inline struct drm_mm *drm_get_mm(struct drm_mm_node *block)
+{
+ return block->mm;
+}
+
+#endif
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index fc55db7..f8634ab 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -254,6 +254,11 @@
{0x1002, 0x940A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x940B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
{0x1002, 0x940F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R600|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94A1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94B1, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94B3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x94B5, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV740|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9440, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9441, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x9442, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
@@ -268,6 +273,8 @@
{0x1002, 0x9456, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_NEW_MEMMAP}, \
{0x1002, 0x945A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x945B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9460, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
+ {0x1002, 0x9462, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x946A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x946B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
{0x1002, 0x947A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV770|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -536,4 +543,6 @@
{0x8086, 0xa001, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0xa011, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0x8086, 0x35e8, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+ {0x8086, 0x0042, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
+ {0x8086, 0x0046, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \
{0, 0, 0}
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 7e09c5c..a2df703 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -135,6 +135,7 @@ header-y += posix_types.h
header-y += ppdev.h
header-y += prctl.h
header-y += qnxtypes.h
+header-y += qnx4_fs.h
header-y += radeonfb.h
header-y += raw.h
header-y += resource.h
@@ -308,7 +309,6 @@ unifdef-y += poll.h
unifdef-y += ppp_defs.h
unifdef-y += ppp-comp.h
unifdef-y += ptrace.h
-unifdef-y += qnx4_fs.h
unifdef-y += quota.h
unifdef-y += random.h
unifdef-y += rfkill.h
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 88be890..51b4b0a 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -119,7 +119,7 @@ extern int pci_mmcfg_config_num;
extern int sbf_port;
extern unsigned long acpi_realmode_flags;
-int acpi_register_gsi (u32 gsi, int triggering, int polarity);
+int acpi_register_gsi (struct device *dev, u32 gsi, int triggering, int polarity);
int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
#ifdef CONFIG_X86_IO_APIC
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 51e6e54..9b93caf 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -28,7 +28,7 @@ struct amba_id {
struct amba_driver {
struct device_driver drv;
- int (*probe)(struct amba_device *, void *);
+ int (*probe)(struct amba_device *, struct amba_id *);
int (*remove)(struct amba_device *);
void (*shutdown)(struct amba_device *);
int (*suspend)(struct amba_device *, pm_message_t);
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
new file mode 100644
index 0000000..dcad0ff
--- /dev/null
+++ b/include/linux/amba/pl022.h
@@ -0,0 +1,264 @@
+/*
+ * include/linux/amba/pl022.h
+ *
+ * Copyright (C) 2008-2009 ST-Ericsson AB
+ * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ *
+ * Initial version inspired by:
+ * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
+ * Initial adoption to PL022 by:
+ * Sachin Verma <sachin.verma@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SSP_PL022_H
+#define _SSP_PL022_H
+
+#include <linux/device.h>
+
+/**
+ * whether SSP is in loopback mode or not
+ */
+enum ssp_loopback {
+ LOOPBACK_DISABLED,
+ LOOPBACK_ENABLED
+};
+
+/**
+ * enum ssp_interface - interfaces allowed for this SSP Controller
+ * @SSP_INTERFACE_MOTOROLA_SPI: Motorola Interface
+ * @SSP_INTERFACE_TI_SYNC_SERIAL: Texas Instrument Synchronous Serial
+ * interface
+ * @SSP_INTERFACE_NATIONAL_MICROWIRE: National Semiconductor Microwire
+ * interface
+ * @SSP_INTERFACE_UNIDIRECTIONAL: Unidirectional interface (STn8810
+ * &STn8815 only)
+ */
+enum ssp_interface {
+ SSP_INTERFACE_MOTOROLA_SPI,
+ SSP_INTERFACE_TI_SYNC_SERIAL,
+ SSP_INTERFACE_NATIONAL_MICROWIRE,
+ SSP_INTERFACE_UNIDIRECTIONAL
+};
+
+/**
+ * enum ssp_hierarchy - whether SSP is configured as Master or Slave
+ */
+enum ssp_hierarchy {
+ SSP_MASTER,
+ SSP_SLAVE
+};
+
+/**
+ * enum ssp_clock_params - clock parameters, to set SSP clock at a
+ * desired freq
+ */
+struct ssp_clock_params {
+ u8 cpsdvsr; /* value from 2 to 254 (even only!) */
+ u8 scr; /* value from 0 to 255 */
+};
+
+/**
+ * enum ssp_rx_endian - endianess of Rx FIFO Data
+ */
+enum ssp_rx_endian {
+ SSP_RX_MSB,
+ SSP_RX_LSB
+};
+
+/**
+ * enum ssp_tx_endian - endianess of Tx FIFO Data
+ */
+enum ssp_tx_endian {
+ SSP_TX_MSB,
+ SSP_TX_LSB
+};
+
+/**
+ * enum ssp_data_size - number of bits in one data element
+ */
+enum ssp_data_size {
+ SSP_DATA_BITS_4 = 0x03, SSP_DATA_BITS_5, SSP_DATA_BITS_6,
+ SSP_DATA_BITS_7, SSP_DATA_BITS_8, SSP_DATA_BITS_9,
+ SSP_DATA_BITS_10, SSP_DATA_BITS_11, SSP_DATA_BITS_12,
+ SSP_DATA_BITS_13, SSP_DATA_BITS_14, SSP_DATA_BITS_15,
+ SSP_DATA_BITS_16, SSP_DATA_BITS_17, SSP_DATA_BITS_18,
+ SSP_DATA_BITS_19, SSP_DATA_BITS_20, SSP_DATA_BITS_21,
+ SSP_DATA_BITS_22, SSP_DATA_BITS_23, SSP_DATA_BITS_24,
+ SSP_DATA_BITS_25, SSP_DATA_BITS_26, SSP_DATA_BITS_27,
+ SSP_DATA_BITS_28, SSP_DATA_BITS_29, SSP_DATA_BITS_30,
+ SSP_DATA_BITS_31, SSP_DATA_BITS_32
+};
+
+/**
+ * enum ssp_mode - SSP mode of operation (Communication modes)
+ */
+enum ssp_mode {
+ INTERRUPT_TRANSFER,
+ POLLING_TRANSFER,
+ DMA_TRANSFER
+};
+
+/**
+ * enum ssp_rx_level_trig - receive FIFO watermark level which triggers
+ * IT: Interrupt fires when _N_ or more elements in RX FIFO.
+ */
+enum ssp_rx_level_trig {
+ SSP_RX_1_OR_MORE_ELEM,
+ SSP_RX_4_OR_MORE_ELEM,
+ SSP_RX_8_OR_MORE_ELEM,
+ SSP_RX_16_OR_MORE_ELEM,
+ SSP_RX_32_OR_MORE_ELEM
+};
+
+/**
+ * Transmit FIFO watermark level which triggers (IT Interrupt fires
+ * when _N_ or more empty locations in TX FIFO)
+ */
+enum ssp_tx_level_trig {
+ SSP_TX_1_OR_MORE_EMPTY_LOC,
+ SSP_TX_4_OR_MORE_EMPTY_LOC,
+ SSP_TX_8_OR_MORE_EMPTY_LOC,
+ SSP_TX_16_OR_MORE_EMPTY_LOC,
+ SSP_TX_32_OR_MORE_EMPTY_LOC
+};
+
+/**
+ * enum SPI Clock Phase - clock phase (Motorola SPI interface only)
+ * @SSP_CLK_RISING_EDGE: Receive data on rising edge
+ * @SSP_CLK_FALLING_EDGE: Receive data on falling edge
+ */
+enum ssp_spi_clk_phase {
+ SSP_CLK_RISING_EDGE,
+ SSP_CLK_FALLING_EDGE
+};
+
+/**
+ * enum SPI Clock Polarity - clock polarity (Motorola SPI interface only)
+ * @SSP_CLK_POL_IDLE_LOW: Low inactive level
+ * @SSP_CLK_POL_IDLE_HIGH: High inactive level
+ */
+enum ssp_spi_clk_pol {
+ SSP_CLK_POL_IDLE_LOW,
+ SSP_CLK_POL_IDLE_HIGH
+};
+
+/**
+ * Microwire Conrol Lengths Command size in microwire format
+ */
+enum ssp_microwire_ctrl_len {
+ SSP_BITS_4 = 0x03, SSP_BITS_5, SSP_BITS_6,
+ SSP_BITS_7, SSP_BITS_8, SSP_BITS_9,
+ SSP_BITS_10, SSP_BITS_11, SSP_BITS_12,
+ SSP_BITS_13, SSP_BITS_14, SSP_BITS_15,
+ SSP_BITS_16, SSP_BITS_17, SSP_BITS_18,
+ SSP_BITS_19, SSP_BITS_20, SSP_BITS_21,
+ SSP_BITS_22, SSP_BITS_23, SSP_BITS_24,
+ SSP_BITS_25, SSP_BITS_26, SSP_BITS_27,
+ SSP_BITS_28, SSP_BITS_29, SSP_BITS_30,
+ SSP_BITS_31, SSP_BITS_32
+};
+
+/**
+ * enum Microwire Wait State
+ * @SSP_MWIRE_WAIT_ZERO: No wait state inserted after last command bit
+ * @SSP_MWIRE_WAIT_ONE: One wait state inserted after last command bit
+ */
+enum ssp_microwire_wait_state {
+ SSP_MWIRE_WAIT_ZERO,
+ SSP_MWIRE_WAIT_ONE
+};
+
+/**
+ * enum Microwire - whether Full/Half Duplex
+ * @SSP_MICROWIRE_CHANNEL_FULL_DUPLEX: SSPTXD becomes bi-directional,
+ * SSPRXD not used
+ * @SSP_MICROWIRE_CHANNEL_HALF_DUPLEX: SSPTXD is an output, SSPRXD is
+ * an input.
+ */
+enum ssp_duplex {
+ SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
+ SSP_MICROWIRE_CHANNEL_HALF_DUPLEX
+};
+
+/**
+ * CHIP select/deselect commands
+ */
+enum ssp_chip_select {
+ SSP_CHIP_SELECT,
+ SSP_CHIP_DESELECT
+};
+
+
+/**
+ * struct pl022_ssp_master - device.platform_data for SPI controller devices.
+ * @num_chipselect: chipselects are used to distinguish individual
+ * SPI slaves, and are numbered from zero to num_chipselects - 1.
+ * each slave has a chipselect signal, but it's common that not
+ * every chipselect is connected to a slave.
+ * @enable_dma: if true enables DMA driven transfers.
+ */
+struct pl022_ssp_controller {
+ u16 bus_id;
+ u8 num_chipselect;
+ u8 enable_dma:1;
+};
+
+/**
+ * struct ssp_config_chip - spi_board_info.controller_data for SPI
+ * slave devices, copied to spi_device.controller_data.
+ *
+ * @lbm: used for test purpose to internally connect RX and TX
+ * @iface: Interface type(Motorola, TI, Microwire, Universal)
+ * @hierarchy: sets whether interface is master or slave
+ * @slave_tx_disable: SSPTXD is disconnected (in slave mode only)
+ * @clk_freq: Tune freq parameters of SSP(when in master mode)
+ * @endian_rx: Endianess of Data in Rx FIFO
+ * @endian_tx: Endianess of Data in Tx FIFO
+ * @data_size: Width of data element(4 to 32 bits)
+ * @com_mode: communication mode: polling, Interrupt or DMA
+ * @rx_lev_trig: Rx FIFO watermark level (for IT & DMA mode)
+ * @tx_lev_trig: Tx FIFO watermark level (for IT & DMA mode)
+ * @clk_phase: Motorola SPI interface Clock phase
+ * @clk_pol: Motorola SPI interface Clock polarity
+ * @ctrl_len: Microwire interface: Control length
+ * @wait_state: Microwire interface: Wait state
+ * @duplex: Microwire interface: Full/Half duplex
+ * @cs_control: function pointer to board-specific function to
+ * assert/deassert I/O port to control HW generation of devices chip-select.
+ * @dma_xfer_type: Type of DMA xfer (Mem-to-periph or Periph-to-Periph)
+ * @dma_config: DMA configuration for SSP controller and peripheral
+ */
+struct pl022_config_chip {
+ struct device *dev;
+ enum ssp_loopback lbm;
+ enum ssp_interface iface;
+ enum ssp_hierarchy hierarchy;
+ bool slave_tx_disable;
+ struct ssp_clock_params clk_freq;
+ enum ssp_rx_endian endian_rx;
+ enum ssp_tx_endian endian_tx;
+ enum ssp_data_size data_size;
+ enum ssp_mode com_mode;
+ enum ssp_rx_level_trig rx_lev_trig;
+ enum ssp_tx_level_trig tx_lev_trig;
+ enum ssp_spi_clk_phase clk_phase;
+ enum ssp_spi_clk_pol clk_pol;
+ enum ssp_microwire_ctrl_len ctrl_len;
+ enum ssp_microwire_wait_state wait_state;
+ enum ssp_duplex duplex;
+ void (*cs_control) (u32 control);
+};
+
+#endif /* _SSP_PL022_H */
diff --git a/include/linux/amba/serial.h b/include/linux/amba/serial.h
index 48ee32a..5a5a7fd 100644
--- a/include/linux/amba/serial.h
+++ b/include/linux/amba/serial.h
@@ -114,6 +114,9 @@
#define UART011_IFLS_TX4_8 (2 << 0)
#define UART011_IFLS_TX6_8 (3 << 0)
#define UART011_IFLS_TX7_8 (4 << 0)
+/* special values for ST vendor with deeper fifo */
+#define UART011_IFLS_RX_HALF (5 << 3)
+#define UART011_IFLS_TX_HALF (5 << 0)
#define UART011_OEIM (1 << 10) /* overrun error interrupt mask */
#define UART011_BEIM (1 << 9) /* break error interrupt mask */
@@ -159,6 +162,7 @@
#define UART01x_FR_MODEM_ANY (UART01x_FR_DCD|UART01x_FR_DSR|UART01x_FR_CTS)
#ifndef __ASSEMBLY__
+struct amba_device; /* in uncompress this is included but amba/bus.h is not */
struct amba_pl010_data {
void (*set_mctrl)(struct amba_device *dev, void __iomem *base, unsigned int mctrl);
};
diff --git a/include/linux/atmel-mci.h b/include/linux/atmel-mci.h
index 2f1f957..57b1846 100644
--- a/include/linux/atmel-mci.h
+++ b/include/linux/atmel-mci.h
@@ -10,6 +10,7 @@
* @bus_width: Number of data lines wired up the slot
* @detect_pin: GPIO pin wired to the card detect switch
* @wp_pin: GPIO pin wired to the write protect sensor
+ * @detect_is_active_high: The state of the detect pin when it is active
*
* If a given slot is not present on the board, @bus_width should be
* set to 0. The other fields are ignored in this case.
@@ -24,6 +25,7 @@ struct mci_slot_pdata {
unsigned int bus_width;
int detect_pin;
int wp_pin;
+ bool detect_is_active_high;
};
/**
diff --git a/include/linux/auto_fs.h b/include/linux/auto_fs.h
index 6326585..7b09c83 100644
--- a/include/linux/auto_fs.h
+++ b/include/linux/auto_fs.h
@@ -14,13 +14,12 @@
#ifndef _LINUX_AUTO_FS_H
#define _LINUX_AUTO_FS_H
+#include <linux/types.h>
#ifdef __KERNEL__
#include <linux/fs.h>
#include <linux/limits.h>
-#include <linux/types.h>
#include <linux/ioctl.h>
#else
-#include <asm/types.h>
#include <sys/ioctl.h>
#endif /* __KERNEL__ */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 7b214fd..12737be 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -218,12 +218,12 @@ struct bio {
#define bio_sectors(bio) ((bio)->bi_size >> 9)
#define bio_empty_barrier(bio) (bio_barrier(bio) && !bio_has_data(bio) && !bio_discard(bio))
-static inline unsigned int bio_cur_sectors(struct bio *bio)
+static inline unsigned int bio_cur_bytes(struct bio *bio)
{
if (bio->bi_vcnt)
- return bio_iovec(bio)->bv_len >> 9;
+ return bio_iovec(bio)->bv_len;
else /* dataless requests such as discard */
- return bio->bi_size >> 9;
+ return bio->bi_size;
}
static inline void *bio_data(struct bio *bio)
@@ -279,7 +279,7 @@ static inline int bio_has_allocated_vec(struct bio *bio)
#define __BIO_SEG_BOUNDARY(addr1, addr2, mask) \
(((addr1) | (mask)) == (((addr2) - 1) | (mask)))
#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
- __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, (q)->seg_boundary_mask)
+ __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
#define BIO_SEG_BOUNDARY(q, b1, b2) \
BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
@@ -506,7 +506,7 @@ static inline int bio_has_data(struct bio *bio)
}
/*
- * BIO list managment for use by remapping drivers (e.g. DM or MD).
+ * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
*
* A bio_list anchors a singly-linked list of bios chained through the bi_next
* member of the bio. The bio_list also caches the last list member to allow
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index b4f71f1..0b1a6ca 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -166,19 +166,9 @@ struct request {
enum rq_cmd_type_bits cmd_type;
unsigned long atomic_flags;
- /* Maintain bio traversal state for part by part I/O submission.
- * hard_* are block layer internals, no driver should touch them!
- */
-
- sector_t sector; /* next sector to submit */
- sector_t hard_sector; /* next sector to complete */
- unsigned long nr_sectors; /* no. of sectors left to submit */
- unsigned long hard_nr_sectors; /* no. of sectors left to complete */
- /* no. of sectors left to submit in the current segment */
- unsigned int current_nr_sectors;
-
- /* no. of sectors left to complete in the current segment */
- unsigned int hard_cur_sectors;
+ /* the following two fields are internal, NEVER access directly */
+ sector_t __sector; /* sector cursor */
+ unsigned int __data_len; /* total data len */
struct bio *bio;
struct bio *biotail;
@@ -211,8 +201,8 @@ struct request {
unsigned short ioprio;
- void *special;
- char *buffer;
+ void *special; /* opaque pointer available for LLD use */
+ char *buffer; /* kaddr of the current segment if available */
int tag;
int errors;
@@ -226,10 +216,9 @@ struct request {
unsigned char __cmd[BLK_MAX_CDB];
unsigned char *cmd;
- unsigned int data_len;
unsigned int extra_len; /* length of alignment and padding */
unsigned int sense_len;
- void *data;
+ unsigned int resid_len; /* residual count */
void *sense;
unsigned long deadline;
@@ -318,6 +307,26 @@ struct blk_cmd_filter {
struct kobject kobj;
};
+struct queue_limits {
+ unsigned long bounce_pfn;
+ unsigned long seg_boundary_mask;
+
+ unsigned int max_hw_sectors;
+ unsigned int max_sectors;
+ unsigned int max_segment_size;
+ unsigned int physical_block_size;
+ unsigned int alignment_offset;
+ unsigned int io_min;
+ unsigned int io_opt;
+
+ unsigned short logical_block_size;
+ unsigned short max_hw_segments;
+ unsigned short max_phys_segments;
+
+ unsigned char misaligned;
+ unsigned char no_cluster;
+};
+
struct request_queue
{
/*
@@ -369,7 +378,6 @@ struct request_queue
/*
* queue needs bounce pages for pages above this limit
*/
- unsigned long bounce_pfn;
gfp_t bounce_gfp;
/*
@@ -398,14 +406,6 @@ struct request_queue
unsigned int nr_congestion_off;
unsigned int nr_batching;
- unsigned int max_sectors;
- unsigned int max_hw_sectors;
- unsigned short max_phys_segments;
- unsigned short max_hw_segments;
- unsigned short hardsect_size;
- unsigned int max_segment_size;
-
- unsigned long seg_boundary_mask;
void *dma_drain_buffer;
unsigned int dma_drain_size;
unsigned int dma_pad_mask;
@@ -415,12 +415,14 @@ struct request_queue
struct list_head tag_busy_list;
unsigned int nr_sorted;
- unsigned int in_flight;
+ unsigned int in_flight[2];
unsigned int rq_timeout;
struct timer_list timeout;
struct list_head timeout_list;
+ struct queue_limits limits;
+
/*
* sg stuff
*/
@@ -522,6 +524,11 @@ static inline void queue_flag_clear_unlocked(unsigned int flag,
__clear_bit(flag, &q->queue_flags);
}
+static inline int queue_in_flight(struct request_queue *q)
+{
+ return q->in_flight[0] + q->in_flight[1];
+}
+
static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
{
WARN_ON_ONCE(!queue_is_locked(q));
@@ -752,10 +759,17 @@ extern void blk_rq_init(struct request_queue *q, struct request *rq);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
+extern struct request *blk_make_request(struct request_queue *, struct bio *,
+ gfp_t);
extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
extern void blk_requeue_request(struct request_queue *, struct request *);
extern int blk_rq_check_limits(struct request_queue *q, struct request *rq);
extern int blk_lld_busy(struct request_queue *q);
+extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
+ struct bio_set *bs, gfp_t gfp_mask,
+ int (*bio_ctr)(struct bio *, struct bio *, void *),
+ void *data);
+extern void blk_rq_unprep_clone(struct request *rq);
extern int blk_insert_cloned_request(struct request_queue *q,
struct request *rq);
extern void blk_plug_device(struct request_queue *);
@@ -768,12 +782,6 @@ extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
/*
- * Temporary export, until SCSI gets fixed up.
- */
-extern int blk_rq_append_bio(struct request_queue *q, struct request *rq,
- struct bio *bio);
-
-/*
* A queue has just exitted congestion. Note this in the global counter of
* congested queues, and wake up anyone who was waiting for requests to be
* put back.
@@ -798,7 +806,6 @@ extern void blk_sync_queue(struct request_queue *q);
extern void __blk_stop_queue(struct request_queue *q);
extern void __blk_run_queue(struct request_queue *);
extern void blk_run_queue(struct request_queue *);
-extern void blk_start_queueing(struct request_queue *);
extern int blk_rq_map_user(struct request_queue *, struct request *,
struct rq_map_data *, void __user *, unsigned long,
gfp_t);
@@ -831,41 +838,73 @@ static inline void blk_run_address_space(struct address_space *mapping)
blk_run_backing_dev(mapping->backing_dev_info, NULL);
}
-extern void blkdev_dequeue_request(struct request *req);
+/*
+ * blk_rq_pos() : the current sector
+ * blk_rq_bytes() : bytes left in the entire request
+ * blk_rq_cur_bytes() : bytes left in the current segment
+ * blk_rq_sectors() : sectors left in the entire request
+ * blk_rq_cur_sectors() : sectors left in the current segment
+ */
+static inline sector_t blk_rq_pos(const struct request *rq)
+{
+ return rq->__sector;
+}
+
+static inline unsigned int blk_rq_bytes(const struct request *rq)
+{
+ return rq->__data_len;
+}
+
+static inline int blk_rq_cur_bytes(const struct request *rq)
+{
+ return rq->bio ? bio_cur_bytes(rq->bio) : 0;
+}
+
+static inline unsigned int blk_rq_sectors(const struct request *rq)
+{
+ return blk_rq_bytes(rq) >> 9;
+}
+
+static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
+{
+ return blk_rq_cur_bytes(rq) >> 9;
+}
+
+/*
+ * Request issue related functions.
+ */
+extern struct request *blk_peek_request(struct request_queue *q);
+extern void blk_start_request(struct request *rq);
+extern struct request *blk_fetch_request(struct request_queue *q);
/*
- * blk_end_request() and friends.
- * __blk_end_request() and end_request() must be called with
- * the request queue spinlock acquired.
+ * Request completion related functions.
+ *
+ * blk_update_request() completes given number of bytes and updates
+ * the request without completing it.
+ *
+ * blk_end_request() and friends. __blk_end_request() must be called
+ * with the request queue spinlock acquired.
*
* Several drivers define their own end_request and call
* blk_end_request() for parts of the original function.
* This prevents code duplication in drivers.
*/
-extern int blk_end_request(struct request *rq, int error,
- unsigned int nr_bytes);
-extern int __blk_end_request(struct request *rq, int error,
- unsigned int nr_bytes);
-extern int blk_end_bidi_request(struct request *rq, int error,
- unsigned int nr_bytes, unsigned int bidi_bytes);
-extern void end_request(struct request *, int);
-extern int blk_end_request_callback(struct request *rq, int error,
- unsigned int nr_bytes,
- int (drv_callback)(struct request *));
+extern bool blk_update_request(struct request *rq, int error,
+ unsigned int nr_bytes);
+extern bool blk_end_request(struct request *rq, int error,
+ unsigned int nr_bytes);
+extern void blk_end_request_all(struct request *rq, int error);
+extern bool blk_end_request_cur(struct request *rq, int error);
+extern bool __blk_end_request(struct request *rq, int error,
+ unsigned int nr_bytes);
+extern void __blk_end_request_all(struct request *rq, int error);
+extern bool __blk_end_request_cur(struct request *rq, int error);
+
extern void blk_complete_request(struct request *);
extern void __blk_complete_request(struct request *);
extern void blk_abort_request(struct request *);
extern void blk_abort_queue(struct request_queue *);
-extern void blk_update_request(struct request *rq, int error,
- unsigned int nr_bytes);
-
-/*
- * blk_end_request() takes bytes instead of sectors as a complete size.
- * blk_rq_bytes() returns bytes left to complete in the entire request.
- * blk_rq_cur_bytes() returns bytes left to complete in the current segment.
- */
-extern unsigned int blk_rq_bytes(struct request *rq);
-extern unsigned int blk_rq_cur_bytes(struct request *rq);
/*
* Access functions for manipulating queue properties
@@ -877,10 +916,20 @@ extern void blk_cleanup_queue(struct request_queue *);
extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_queue_max_sectors(struct request_queue *, unsigned int);
+extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_max_phys_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_hw_segments(struct request_queue *, unsigned short);
extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
-extern void blk_queue_hardsect_size(struct request_queue *, unsigned short);
+extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_physical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_alignment_offset(struct request_queue *q,
+ unsigned int alignment);
+extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
+extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
+extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
+ sector_t offset);
+extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
+ sector_t offset);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
extern void blk_queue_dma_pad(struct request_queue *, unsigned int);
extern void blk_queue_update_dma_pad(struct request_queue *, unsigned int);
@@ -967,19 +1016,87 @@ extern void blk_set_cmd_filter_defaults(struct blk_cmd_filter *filter);
#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
-static inline int queue_hardsect_size(struct request_queue *q)
+static inline unsigned long queue_bounce_pfn(struct request_queue *q)
+{
+ return q->limits.bounce_pfn;
+}
+
+static inline unsigned long queue_segment_boundary(struct request_queue *q)
+{
+ return q->limits.seg_boundary_mask;
+}
+
+static inline unsigned int queue_max_sectors(struct request_queue *q)
+{
+ return q->limits.max_sectors;
+}
+
+static inline unsigned int queue_max_hw_sectors(struct request_queue *q)
+{
+ return q->limits.max_hw_sectors;
+}
+
+static inline unsigned short queue_max_hw_segments(struct request_queue *q)
+{
+ return q->limits.max_hw_segments;
+}
+
+static inline unsigned short queue_max_phys_segments(struct request_queue *q)
+{
+ return q->limits.max_phys_segments;
+}
+
+static inline unsigned int queue_max_segment_size(struct request_queue *q)
+{
+ return q->limits.max_segment_size;
+}
+
+static inline unsigned short queue_logical_block_size(struct request_queue *q)
{
int retval = 512;
- if (q && q->hardsect_size)
- retval = q->hardsect_size;
+ if (q && q->limits.logical_block_size)
+ retval = q->limits.logical_block_size;
return retval;
}
-static inline int bdev_hardsect_size(struct block_device *bdev)
+static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
+{
+ return queue_logical_block_size(bdev_get_queue(bdev));
+}
+
+static inline unsigned int queue_physical_block_size(struct request_queue *q)
+{
+ return q->limits.physical_block_size;
+}
+
+static inline unsigned int queue_io_min(struct request_queue *q)
+{
+ return q->limits.io_min;
+}
+
+static inline unsigned int queue_io_opt(struct request_queue *q)
+{
+ return q->limits.io_opt;
+}
+
+static inline int queue_alignment_offset(struct request_queue *q)
+{
+ if (q && q->limits.misaligned)
+ return -1;
+
+ if (q && q->limits.alignment_offset)
+ return q->limits.alignment_offset;
+
+ return 0;
+}
+
+static inline int queue_sector_alignment_offset(struct request_queue *q,
+ sector_t sector)
{
- return queue_hardsect_size(bdev_get_queue(bdev));
+ return ((sector << 9) - q->limits.alignment_offset)
+ & (q->limits.io_min - 1);
}
static inline int queue_dma_alignment(struct request_queue *q)
@@ -1109,6 +1226,8 @@ struct block_device_operations {
int (*direct_access) (struct block_device *, sector_t,
void **, unsigned long *);
int (*media_changed) (struct gendisk *);
+ unsigned long long (*set_capacity) (struct gendisk *,
+ unsigned long long);
int (*revalidate_disk) (struct gendisk *);
int (*getgeo)(struct block_device *, struct hd_geometry *);
struct module *owner;
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index d960889..7e4350e 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -116,9 +116,9 @@ struct blk_io_trace {
* The remap event
*/
struct blk_io_trace_remap {
- __be32 device;
__be32 device_from;
- __be64 sector;
+ __be32 device_to;
+ __be64 sector_from;
};
enum {
@@ -165,8 +165,9 @@ struct blk_trace {
extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
extern void blk_trace_shutdown(struct request_queue *);
-extern int do_blk_trace_setup(struct request_queue *q,
- char *name, dev_t dev, struct blk_user_trace_setup *buts);
+extern int do_blk_trace_setup(struct request_queue *q, char *name,
+ dev_t dev, struct block_device *bdev,
+ struct blk_user_trace_setup *buts);
extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
/**
@@ -193,22 +194,42 @@ extern void __trace_note_message(struct blk_trace *, const char *fmt, ...);
extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
void *data, size_t len);
extern int blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
+ struct block_device *bdev,
char __user *arg);
extern int blk_trace_startstop(struct request_queue *q, int start);
extern int blk_trace_remove(struct request_queue *q);
+extern int blk_trace_init_sysfs(struct device *dev);
extern struct attribute_group blk_trace_attr_group;
#else /* !CONFIG_BLK_DEV_IO_TRACE */
-#define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
-#define blk_trace_shutdown(q) do { } while (0)
-#define do_blk_trace_setup(q, name, dev, buts) (-ENOTTY)
-#define blk_add_driver_data(q, rq, data, len) do {} while (0)
-#define blk_trace_setup(q, name, dev, arg) (-ENOTTY)
-#define blk_trace_startstop(q, start) (-ENOTTY)
-#define blk_trace_remove(q) (-ENOTTY)
-#define blk_add_trace_msg(q, fmt, ...) do { } while (0)
+# define blk_trace_ioctl(bdev, cmd, arg) (-ENOTTY)
+# define blk_trace_shutdown(q) do { } while (0)
+# define do_blk_trace_setup(q, name, dev, bdev, buts) (-ENOTTY)
+# define blk_add_driver_data(q, rq, data, len) do {} while (0)
+# define blk_trace_setup(q, name, dev, bdev, arg) (-ENOTTY)
+# define blk_trace_startstop(q, start) (-ENOTTY)
+# define blk_trace_remove(q) (-ENOTTY)
+# define blk_add_trace_msg(q, fmt, ...) do { } while (0)
+static inline int blk_trace_init_sysfs(struct device *dev)
+{
+ return 0;
+}
#endif /* CONFIG_BLK_DEV_IO_TRACE */
+
+#if defined(CONFIG_EVENT_TRACING) && defined(CONFIG_BLOCK)
+
+static inline int blk_cmd_buf_len(struct request *rq)
+{
+ return blk_pc_request(rq) ? rq->cmd_len * 3 : 1;
+}
+
+extern void blk_dump_cmd(char *buf, struct request *rq);
+extern void blk_fill_rwbs(char *rwbs, u32 rw, int bytes);
+extern void blk_fill_rwbs_rq(char *rwbs, struct request *rq);
+
+#endif /* CONFIG_EVENT_TRACING && CONFIG_BLOCK */
+
#endif /* __KERNEL__ */
#endif
diff --git a/include/linux/cb710.h b/include/linux/cb710.h
new file mode 100644
index 0000000..63bc9a4
--- /dev/null
+++ b/include/linux/cb710.h
@@ -0,0 +1,231 @@
+/*
+ * cb710/cb710.h
+ *
+ * Copyright by Michał Mirosław, 2008-2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef LINUX_CB710_DRIVER_H
+#define LINUX_CB710_DRIVER_H
+
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/mmc/host.h>
+
+struct cb710_slot;
+
+typedef int (*cb710_irq_handler_t)(struct cb710_slot *);
+
+/* per-virtual-slot structure */
+struct cb710_slot {
+ struct platform_device pdev;
+ void __iomem *iobase;
+ cb710_irq_handler_t irq_handler;
+};
+
+/* per-device structure */
+struct cb710_chip {
+ struct pci_dev *pdev;
+ void __iomem *iobase;
+ unsigned platform_id;
+#ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS
+ atomic_t slot_refs_count;
+#endif
+ unsigned slot_mask;
+ unsigned slots;
+ spinlock_t irq_lock;
+ struct cb710_slot slot[0];
+};
+
+/* NOTE: cb710_chip.slots is modified only during device init/exit and
+ * they are all serialized wrt themselves */
+
+/* cb710_chip.slot_mask values */
+#define CB710_SLOT_MMC 1
+#define CB710_SLOT_MS 2
+#define CB710_SLOT_SM 4
+
+/* slot port accessors - so the logic is more clear in the code */
+#define CB710_PORT_ACCESSORS(t) \
+static inline void cb710_write_port_##t(struct cb710_slot *slot, \
+ unsigned port, u##t value) \
+{ \
+ iowrite##t(value, slot->iobase + port); \
+} \
+ \
+static inline u##t cb710_read_port_##t(struct cb710_slot *slot, \
+ unsigned port) \
+{ \
+ return ioread##t(slot->iobase + port); \
+} \
+ \
+static inline void cb710_modify_port_##t(struct cb710_slot *slot, \
+ unsigned port, u##t set, u##t clear) \
+{ \
+ iowrite##t( \
+ (ioread##t(slot->iobase + port) & ~clear)|set, \
+ slot->iobase + port); \
+}
+
+CB710_PORT_ACCESSORS(8)
+CB710_PORT_ACCESSORS(16)
+CB710_PORT_ACCESSORS(32)
+
+void cb710_pci_update_config_reg(struct pci_dev *pdev,
+ int reg, uint32_t and, uint32_t xor);
+void cb710_set_irq_handler(struct cb710_slot *slot,
+ cb710_irq_handler_t handler);
+
+/* some device struct walking */
+
+static inline struct cb710_slot *cb710_pdev_to_slot(
+ struct platform_device *pdev)
+{
+ return container_of(pdev, struct cb710_slot, pdev);
+}
+
+static inline struct cb710_chip *cb710_slot_to_chip(struct cb710_slot *slot)
+{
+ return dev_get_drvdata(slot->pdev.dev.parent);
+}
+
+static inline struct device *cb710_slot_dev(struct cb710_slot *slot)
+{
+ return &slot->pdev.dev;
+}
+
+static inline struct device *cb710_chip_dev(struct cb710_chip *chip)
+{
+ return &chip->pdev->dev;
+}
+
+/* debugging aids */
+
+#ifdef CONFIG_CB710_DEBUG
+void cb710_dump_regs(struct cb710_chip *chip, unsigned dump);
+#else
+#define cb710_dump_regs(c, d) do {} while (0)
+#endif
+
+#define CB710_DUMP_REGS_MMC 0x0F
+#define CB710_DUMP_REGS_MS 0x30
+#define CB710_DUMP_REGS_SM 0xC0
+#define CB710_DUMP_REGS_ALL 0xFF
+#define CB710_DUMP_REGS_MASK 0xFF
+
+#define CB710_DUMP_ACCESS_8 0x100
+#define CB710_DUMP_ACCESS_16 0x200
+#define CB710_DUMP_ACCESS_32 0x400
+#define CB710_DUMP_ACCESS_ALL 0x700
+#define CB710_DUMP_ACCESS_MASK 0x700
+
+#endif /* LINUX_CB710_DRIVER_H */
+/*
+ * cb710/sgbuf2.h
+ *
+ * Copyright by Michał Mirosław, 2008-2009
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef LINUX_CB710_SG_H
+#define LINUX_CB710_SG_H
+
+#include <linux/highmem.h>
+#include <linux/scatterlist.h>
+
+/**
+ * cb710_sg_miter_stop_writing - stop mapping iteration after writing
+ * @miter: sg mapping iter to be stopped
+ *
+ * Description:
+ * Stops mapping iterator @miter. @miter should have been started
+ * started using sg_miter_start(). A stopped iteration can be
+ * resumed by calling sg_miter_next() on it. This is useful when
+ * resources (kmap) need to be released during iteration.
+ *
+ * This is a convenience wrapper that will be optimized out for arches
+ * that don't need flush_kernel_dcache_page().
+ *
+ * Context:
+ * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
+ */
+static inline void cb710_sg_miter_stop_writing(struct sg_mapping_iter *miter)
+{
+ if (miter->page)
+ flush_kernel_dcache_page(miter->page);
+ sg_miter_stop(miter);
+}
+
+/*
+ * 32-bit PIO mapping sg iterator
+ *
+ * Hides scatterlist access issues - fragment boundaries, alignment, page
+ * mapping - for drivers using 32-bit-word-at-a-time-PIO (ie. PCI devices
+ * without DMA support).
+ *
+ * Best-case reading (transfer from device):
+ * sg_miter_start();
+ * cb710_sg_dwiter_write_from_io();
+ * cb710_sg_miter_stop_writing();
+ *
+ * Best-case writing (transfer to device):
+ * sg_miter_start();
+ * cb710_sg_dwiter_read_to_io();
+ * sg_miter_stop();
+ */
+
+uint32_t cb710_sg_dwiter_read_next_block(struct sg_mapping_iter *miter);
+void cb710_sg_dwiter_write_next_block(struct sg_mapping_iter *miter, uint32_t data);
+
+/**
+ * cb710_sg_dwiter_write_from_io - transfer data to mapped buffer from 32-bit IO port
+ * @miter: sg mapping iter
+ * @port: PIO port - IO or MMIO address
+ * @count: number of 32-bit words to transfer
+ *
+ * Description:
+ * Reads @count 32-bit words from register @port and stores it in
+ * buffer iterated by @miter. Data that would overflow the buffer
+ * is silently ignored. Iterator is advanced by 4*@count bytes
+ * or to the buffer's end whichever is closer.
+ *
+ * Context:
+ * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
+ */
+static inline void cb710_sg_dwiter_write_from_io(struct sg_mapping_iter *miter,
+ void __iomem *port, size_t count)
+{
+ while (count-- > 0)
+ cb710_sg_dwiter_write_next_block(miter, ioread32(port));
+}
+
+/**
+ * cb710_sg_dwiter_read_to_io - transfer data to 32-bit IO port from mapped buffer
+ * @miter: sg mapping iter
+ * @port: PIO port - IO or MMIO address
+ * @count: number of 32-bit words to transfer
+ *
+ * Description:
+ * Writes @count 32-bit words to register @port from buffer iterated
+ * through @miter. If buffer ends before @count words are written
+ * missing data is replaced by zeroes. @miter is advanced by 4*@count
+ * bytes or to the buffer's end whichever is closer.
+ *
+ * Context:
+ * IRQ disabled if the SG_MITER_ATOMIC is set. Don't care otherwise.
+ */
+static inline void cb710_sg_dwiter_read_to_io(struct sg_mapping_iter *miter,
+ void __iomem *port, size_t count)
+{
+ while (count-- > 0)
+ iowrite32(cb710_sg_dwiter_read_next_block(miter), port);
+}
+
+#endif /* LINUX_CB710_SG_H */
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index fb45919..f389e31 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -28,6 +28,8 @@ int cdev_add(struct cdev *, dev_t, unsigned);
void cdev_del(struct cdev *);
+int cdev_index(struct inode *inode);
+
void cd_forget(struct inode *);
extern struct backing_dev_info directly_mappable_cdev_bdi;
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 1db9bbf..1d37f42 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -142,4 +142,17 @@ struct clk *clk_get_parent(struct clk *clk);
*/
struct clk *clk_get_sys(const char *dev_id, const char *con_id);
+/**
+ * clk_add_alias - add a new clock alias
+ * @alias: name for clock alias
+ * @alias_dev_name: device name
+ * @id: platform specific clock name
+ * @dev: device
+ *
+ * Allows using generic clock names for drivers by adding a new alias.
+ * Assumes clkdev, see clkdev.h for more info.
+ */
+int clk_add_alias(const char *alias, const char *alias_dev_name, char *id,
+ struct device *dev);
+
#endif
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 5a40d14..c56457c 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -288,7 +288,15 @@ static inline cycle_t clocksource_read(struct clocksource *cs)
*/
static inline int clocksource_enable(struct clocksource *cs)
{
- return cs->enable ? cs->enable(cs) : 0;
+ int ret = 0;
+
+ if (cs->enable)
+ ret = cs->enable(cs);
+
+ /* save mult_orig on enable */
+ cs->mult_orig = cs->mult;
+
+ return ret;
}
/**
diff --git a/include/linux/compat.h b/include/linux/compat.h
index f2ded21..af931ee 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -222,6 +222,8 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from);
int copy_siginfo_to_user32(struct compat_siginfo __user *to, siginfo_t *from);
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
+long compat_sys_rt_tgsigqueueinfo(compat_pid_t tgid, compat_pid_t pid, int sig,
+ struct compat_siginfo __user *uinfo);
static inline int compat_timeval_compare(struct compat_timeval *lhs,
struct compat_timeval *rhs)
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 37bcb50..04fb513 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -261,6 +261,11 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define __section(S) __attribute__ ((__section__(#S)))
#endif
+/* Are two types/vars the same type (ignoring qualifiers)? */
+#ifndef __same_type
+# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+#endif
+
/*
* Prevent the compiler from merging or refetching accesses. The compiler
* is also forbidden from reordering successive instances of ACCESS_ONCE(),
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 9f31538..c5ac87c 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -1022,6 +1022,8 @@ typedef struct cpumask *cpumask_var_t;
bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
+bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node);
+bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags);
void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
@@ -1040,6 +1042,19 @@ static inline bool alloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
return true;
}
+static inline bool zalloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
+{
+ cpumask_clear(*mask);
+ return true;
+}
+
+static inline bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags,
+ int node)
+{
+ cpumask_clear(*mask);
+ return true;
+}
+
static inline void alloc_bootmem_cpumask_var(cpumask_var_t *mask)
{
}
diff --git a/include/linux/cramfs_fs.h b/include/linux/cramfs_fs.h
index 3be4e5a..6fc2bed 100644
--- a/include/linux/cramfs_fs.h
+++ b/include/linux/cramfs_fs.h
@@ -2,9 +2,8 @@
#define __CRAMFS_H
#include <linux/types.h>
+#include <linux/magic.h>
-#define CRAMFS_MAGIC 0x28cd3d45 /* some random number */
-#define CRAMFS_MAGIC_WEND 0x453dcd28 /* magic number with the wrong endianess */
#define CRAMFS_SIGNATURE "Compressed ROMFS"
/*
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 3282ee4..4fa9996 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -13,6 +13,7 @@
#define _LINUX_CRED_H
#include <linux/capability.h>
+#include <linux/init.h>
#include <linux/key.h>
#include <asm/atomic.h>
diff --git a/include/linux/cyclades.h b/include/linux/cyclades.h
index 788850b..1fbdea4 100644
--- a/include/linux/cyclades.h
+++ b/include/linux/cyclades.h
@@ -142,19 +142,6 @@ struct CYZ_BOOT_CTRL {
#ifndef DP_WINDOW_SIZE
-/* #include "cyclomz.h" */
-/****************** ****************** *******************/
-/*
- * The data types defined below are used in all ZFIRM interface
- * data structures. They accomodate differences between HW
- * architectures and compilers.
- */
-
-typedef __u64 ucdouble; /* 64 bits, unsigned */
-typedef __u32 uclong; /* 32 bits, unsigned */
-typedef __u16 ucshort; /* 16 bits, unsigned */
-typedef __u8 ucchar; /* 8 bits, unsigned */
-
/*
* Memory Window Sizes
*/
@@ -507,16 +494,20 @@ struct ZFW_CTRL {
/* Per card data structure */
struct cyclades_card {
- void __iomem *base_addr;
- void __iomem *ctl_addr;
- int irq;
- unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */
- unsigned int first_line; /* minor number of first channel on card */
- unsigned int nports; /* Number of ports in the card */
- int bus_index; /* address shift - 0 for ISA, 1 for PCI */
- int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */
- spinlock_t card_lock;
- struct cyclades_port *ports;
+ void __iomem *base_addr;
+ union {
+ void __iomem *p9050;
+ struct RUNTIME_9060 __iomem *p9060;
+ } ctl_addr;
+ int irq;
+ unsigned int num_chips; /* 0 if card absent, -1 if Z/PCI, else Y */
+ unsigned int first_line; /* minor number of first channel on card */
+ unsigned int nports; /* Number of ports in the card */
+ int bus_index; /* address shift - 0 for ISA, 1 for PCI */
+ int intr_enabled; /* FW Interrupt flag - 0 disabled, 1 enabled */
+ u32 hw_ver;
+ spinlock_t card_lock;
+ struct cyclades_port *ports;
};
/***************************************
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 1515636..30b93b2 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -180,10 +180,12 @@ d_iput: no no no yes
#define DCACHE_REFERENCED 0x0008 /* Recently used, don't discard. */
#define DCACHE_UNHASHED 0x0010
-#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched */
+#define DCACHE_INOTIFY_PARENT_WATCHED 0x0020 /* Parent inode is watched by inotify */
#define DCACHE_COOKIE 0x0040 /* For use by dcookie subsystem */
+#define DCACHE_FSNOTIFY_PARENT_WATCHED 0x0080 /* Parent inode is watched by some fsnotify listener */
+
extern spinlock_t dcache_lock;
extern seqlock_t rename_lock;
@@ -351,6 +353,11 @@ static inline int d_unhashed(struct dentry *dentry)
return (dentry->d_flags & DCACHE_UNHASHED);
}
+static inline int d_unlinked(struct dentry *dentry)
+{
+ return d_unhashed(dentry) && !IS_ROOT(dentry);
+}
+
static inline struct dentry *dget_parent(struct dentry *dentry)
{
struct dentry *ret;
@@ -368,7 +375,7 @@ static inline int d_mountpoint(struct dentry *dentry)
return dentry->d_mounted;
}
-extern struct vfsmount *lookup_mnt(struct vfsmount *, struct dentry *);
+extern struct vfsmount *lookup_mnt(struct path *);
extern struct dentry *lookup_create(struct nameidata *nd, int is_dir);
extern int sysctl_vfs_cache_pressure;
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index ded2d7c..49c2362 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -149,7 +149,7 @@ struct io_restrictions {
unsigned max_hw_sectors;
unsigned max_sectors;
unsigned max_segment_size;
- unsigned short hardsect_size;
+ unsigned short logical_block_size;
unsigned short max_hw_segments;
unsigned short max_phys_segments;
unsigned char no_cluster; /* inverted so that 0 is default */
diff --git a/include/linux/device.h b/include/linux/device.h
index 5d5c197..a4a7b10 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -62,8 +62,6 @@ struct bus_type {
void (*shutdown)(struct device *dev);
int (*suspend)(struct device *dev, pm_message_t state);
- int (*suspend_late)(struct device *dev, pm_message_t state);
- int (*resume_early)(struct device *dev);
int (*resume)(struct device *dev);
struct dev_pm_ops *pm;
@@ -291,9 +289,6 @@ struct device_type {
int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
void (*release)(struct device *dev);
- int (*suspend)(struct device *dev, pm_message_t state);
- int (*resume)(struct device *dev);
-
struct dev_pm_ops *pm;
};
diff --git a/include/linux/dlm.h b/include/linux/dlm.h
index b9cd386..0b3518c 100644
--- a/include/linux/dlm.h
+++ b/include/linux/dlm.h
@@ -81,8 +81,8 @@ struct dlm_lksb {
* the cluster, the calling node joins it.
*/
-int dlm_new_lockspace(char *name, int namelen, dlm_lockspace_t **lockspace,
- uint32_t flags, int lvblen);
+int dlm_new_lockspace(const char *name, int namelen,
+ dlm_lockspace_t **lockspace, uint32_t flags, int lvblen);
/*
* dlm_release_lockspace
diff --git a/include/linux/dma-debug.h b/include/linux/dma-debug.h
index 28d53cb..171ad8a 100644
--- a/include/linux/dma-debug.h
+++ b/include/linux/dma-debug.h
@@ -32,6 +32,8 @@ extern void dma_debug_add_bus(struct bus_type *bus);
extern void dma_debug_init(u32 num_entries);
+extern int dma_debug_resize_entries(u32 num_entries);
+
extern void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
int direction, dma_addr_t dma_addr,
@@ -91,6 +93,11 @@ static inline void dma_debug_init(u32 num_entries)
{
}
+static inline int dma_debug_resize_entries(u32 num_entries)
+{
+ return 0;
+}
+
static inline void debug_dma_map_page(struct device *dev, struct page *page,
size_t offset, size_t size,
int direction, dma_addr_t dma_addr,
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index e397dc3..10ff5c4 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -108,6 +108,7 @@ struct irte {
};
#ifdef CONFIG_INTR_REMAP
extern int intr_remapping_enabled;
+extern int intr_remapping_supported(void);
extern int enable_intr_remapping(int);
extern void disable_intr_remapping(void);
extern int reenable_intr_remapping(int);
@@ -157,6 +158,8 @@ static inline struct intel_iommu *map_ioapic_to_ir(int apic)
}
#define irq_remapped(irq) (0)
#define enable_intr_remapping(mode) (-1)
+#define disable_intr_remapping() (0)
+#define reenable_intr_remapping(mode) (0)
#define intr_remapping_enabled (0)
#endif
diff --git a/include/linux/dnotify.h b/include/linux/dnotify.h
index 102a902..ecc0628 100644
--- a/include/linux/dnotify.h
+++ b/include/linux/dnotify.h
@@ -10,7 +10,7 @@
struct dnotify_struct {
struct dnotify_struct * dn_next;
- unsigned long dn_mask;
+ __u32 dn_mask;
int dn_fd;
struct file * dn_filp;
fl_owner_t dn_owner;
@@ -21,23 +21,18 @@ struct dnotify_struct {
#ifdef CONFIG_DNOTIFY
-extern void __inode_dir_notify(struct inode *, unsigned long);
+#define DNOTIFY_ALL_EVENTS (FS_DELETE | FS_DELETE_CHILD |\
+ FS_MODIFY | FS_MODIFY_CHILD |\
+ FS_ACCESS | FS_ACCESS_CHILD |\
+ FS_ATTRIB | FS_ATTRIB_CHILD |\
+ FS_CREATE | FS_DN_RENAME |\
+ FS_MOVED_FROM | FS_MOVED_TO)
+
extern void dnotify_flush(struct file *, fl_owner_t);
extern int fcntl_dirnotify(int, struct file *, unsigned long);
-extern void dnotify_parent(struct dentry *, unsigned long);
-
-static inline void inode_dir_notify(struct inode *inode, unsigned long event)
-{
- if (inode->i_dnotify_mask & (event))
- __inode_dir_notify(inode, event);
-}
#else
-static inline void __inode_dir_notify(struct inode *inode, unsigned long event)
-{
-}
-
static inline void dnotify_flush(struct file *filp, fl_owner_t id)
{
}
@@ -47,14 +42,6 @@ static inline int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)
return -EINVAL;
}
-static inline void dnotify_parent(struct dentry *dentry, unsigned long event)
-{
-}
-
-static inline void inode_dir_notify(struct inode *inode, unsigned long event)
-{
-}
-
#endif /* CONFIG_DNOTIFY */
#endif /* __KERNEL __ */
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index c59b769..1cb3372 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -103,10 +103,8 @@ extern int elv_merge(struct request_queue *, struct request **, struct bio *);
extern void elv_merge_requests(struct request_queue *, struct request *,
struct request *);
extern void elv_merged_request(struct request_queue *, struct request *, int);
-extern void elv_dequeue_request(struct request_queue *, struct request *);
extern void elv_requeue_request(struct request_queue *, struct request *);
extern int elv_queue_empty(struct request_queue *);
-extern struct request *elv_next_request(struct request_queue *q);
extern struct request *elv_former_request(struct request_queue *, struct request *);
extern struct request *elv_latter_request(struct request_queue *, struct request *);
extern int elv_register_queue(struct request_queue *q);
@@ -171,7 +169,7 @@ enum {
ELV_MQUEUE_MUST,
};
-#define rq_end_sector(rq) ((rq)->sector + (rq)->nr_sectors)
+#define rq_end_sector(rq) (blk_rq_pos(rq) + blk_rq_sectors(rq))
#define rb_entry_rq(node) rb_entry((node), struct request, rb_node)
/*
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 3b534e5..ede84fa 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -729,8 +729,8 @@ struct inode {
struct timespec i_atime;
struct timespec i_mtime;
struct timespec i_ctime;
- unsigned int i_blkbits;
blkcnt_t i_blocks;
+ unsigned int i_blkbits;
unsigned short i_bytes;
umode_t i_mode;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
@@ -751,13 +751,12 @@ struct inode {
struct block_device *i_bdev;
struct cdev *i_cdev;
};
- int i_cindex;
__u32 i_generation;
-#ifdef CONFIG_DNOTIFY
- unsigned long i_dnotify_mask; /* Directory notify events */
- struct dnotify_struct *i_dnotify; /* for directory notifications */
+#ifdef CONFIG_FSNOTIFY
+ __u32 i_fsnotify_mask; /* all events this inode cares about */
+ struct hlist_head i_fsnotify_mark_entries; /* fsnotify mark entries */
#endif
#ifdef CONFIG_INOTIFY
@@ -1321,7 +1320,7 @@ struct super_block {
struct rw_semaphore s_umount;
struct mutex s_lock;
int s_count;
- int s_need_sync_fs;
+ int s_need_sync;
atomic_t s_active;
#ifdef CONFIG_SECURITY
void *s_security;
@@ -1372,11 +1371,6 @@ struct super_block {
* generic_show_options()
*/
char *s_options;
-
- /*
- * storage for asynchronous operations
- */
- struct list_head s_async_list;
};
extern struct timespec current_fs_time(struct super_block *sb);
@@ -1800,7 +1794,7 @@ extern struct vfsmount *kern_mount_data(struct file_system_type *, void *data);
extern int may_umount_tree(struct vfsmount *);
extern int may_umount(struct vfsmount *);
extern long do_mount(char *, char *, char *, unsigned long, void *);
-extern struct vfsmount *collect_mounts(struct vfsmount *, struct dentry *);
+extern struct vfsmount *collect_mounts(struct path *);
extern void drop_collected_mounts(struct vfsmount *);
extern int vfs_statfs(struct dentry *, struct kstatfs *);
@@ -1947,8 +1941,6 @@ extern struct super_block *freeze_bdev(struct block_device *);
extern void emergency_thaw_all(void);
extern int thaw_bdev(struct block_device *bdev, struct super_block *sb);
extern int fsync_bdev(struct block_device *);
-extern int fsync_super(struct super_block *);
-extern int fsync_no_super(struct block_device *);
#else
static inline void bd_forget(struct inode *inode) {}
static inline int sync_blockdev(struct block_device *bdev) { return 0; }
@@ -1964,6 +1956,7 @@ static inline int thaw_bdev(struct block_device *bdev, struct super_block *sb)
return 0;
}
#endif
+extern int sync_filesystem(struct super_block *);
extern const struct file_operations def_blk_fops;
extern const struct file_operations def_chr_fops;
extern const struct file_operations bad_sock_fops;
@@ -2082,12 +2075,8 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
extern int vfs_fsync(struct file *file, struct dentry *dentry, int datasync);
extern void sync_supers(void);
-extern void sync_filesystems(int wait);
-extern void __fsync_super(struct super_block *sb);
extern void emergency_sync(void);
extern void emergency_remount(void);
-extern int do_remount_sb(struct super_block *sb, int flags,
- void *data, int force);
#ifdef CONFIG_BLOCK
extern sector_t bmap(struct inode *, sector_t);
#endif
@@ -2205,6 +2194,8 @@ extern int generic_segment_checks(const struct iovec *iov,
/* fs/splice.c */
extern ssize_t generic_file_splice_read(struct file *, loff_t *,
struct pipe_inode_info *, size_t, unsigned int);
+extern ssize_t default_file_splice_read(struct file *, loff_t *,
+ struct pipe_inode_info *, size_t, unsigned int);
extern ssize_t generic_file_splice_write(struct pipe_inode_info *,
struct file *, loff_t *, size_t, unsigned int);
extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
@@ -2354,6 +2345,8 @@ extern void simple_release_fs(struct vfsmount **mount, int *count);
extern ssize_t simple_read_from_buffer(void __user *to, size_t count,
loff_t *ppos, const void *from, size_t available);
+extern int simple_fsync(struct file *, struct dentry *, int);
+
#ifdef CONFIG_MIGRATION
extern int buffer_migrate_page(struct address_space *,
struct page *, struct page *);
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index 00fbd5b..936f9aa 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -13,6 +13,7 @@
#include <linux/dnotify.h>
#include <linux/inotify.h>
+#include <linux/fsnotify_backend.h>
#include <linux/audit.h>
/*
@@ -22,19 +23,45 @@
static inline void fsnotify_d_instantiate(struct dentry *entry,
struct inode *inode)
{
+ __fsnotify_d_instantiate(entry, inode);
+
inotify_d_instantiate(entry, inode);
}
+/* Notify this dentry's parent about a child's events. */
+static inline void fsnotify_parent(struct dentry *dentry, __u32 mask)
+{
+ __fsnotify_parent(dentry, mask);
+
+ inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
+}
+
/*
* fsnotify_d_move - entry has been moved
* Called with dcache_lock and entry->d_lock held.
*/
static inline void fsnotify_d_move(struct dentry *entry)
{
+ /*
+ * On move we need to update entry->d_flags to indicate if the new parent
+ * cares about events from this entry.
+ */
+ __fsnotify_update_dcache_flags(entry);
+
inotify_d_move(entry);
}
/*
+ * fsnotify_link_count - inode's link count changed
+ */
+static inline void fsnotify_link_count(struct inode *inode)
+{
+ inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL);
+
+ fsnotify(inode, FS_ATTRIB, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+}
+
+/*
* fsnotify_move - file old_name at old_dir was moved to new_name at new_dir
*/
static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
@@ -42,42 +69,62 @@ static inline void fsnotify_move(struct inode *old_dir, struct inode *new_dir,
int isdir, struct inode *target, struct dentry *moved)
{
struct inode *source = moved->d_inode;
- u32 cookie = inotify_get_cookie();
+ u32 in_cookie = inotify_get_cookie();
+ u32 fs_cookie = fsnotify_get_cookie();
+ __u32 old_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_FROM);
+ __u32 new_dir_mask = (FS_EVENT_ON_CHILD | FS_MOVED_TO);
if (old_dir == new_dir)
- inode_dir_notify(old_dir, DN_RENAME);
- else {
- inode_dir_notify(old_dir, DN_DELETE);
- inode_dir_notify(new_dir, DN_CREATE);
- }
+ old_dir_mask |= FS_DN_RENAME;
- if (isdir)
+ if (isdir) {
isdir = IN_ISDIR;
- inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir,cookie,old_name,
+ old_dir_mask |= FS_IN_ISDIR;
+ new_dir_mask |= FS_IN_ISDIR;
+ }
+
+ inotify_inode_queue_event(old_dir, IN_MOVED_FROM|isdir, in_cookie, old_name,
source);
- inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, cookie, new_name,
+ inotify_inode_queue_event(new_dir, IN_MOVED_TO|isdir, in_cookie, new_name,
source);
+ fsnotify(old_dir, old_dir_mask, old_dir, FSNOTIFY_EVENT_INODE, old_name, fs_cookie);
+ fsnotify(new_dir, new_dir_mask, new_dir, FSNOTIFY_EVENT_INODE, new_name, fs_cookie);
+
if (target) {
inotify_inode_queue_event(target, IN_DELETE_SELF, 0, NULL, NULL);
inotify_inode_is_dead(target);
+
+ /* this is really a link_count change not a removal */
+ fsnotify_link_count(target);
}
if (source) {
inotify_inode_queue_event(source, IN_MOVE_SELF, 0, NULL, NULL);
+ fsnotify(source, FS_MOVE_SELF, moved->d_inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
audit_inode_child(new_name, moved, new_dir);
}
/*
+ * fsnotify_inode_delete - and inode is being evicted from cache, clean up is needed
+ */
+static inline void fsnotify_inode_delete(struct inode *inode)
+{
+ __fsnotify_inode_delete(inode);
+}
+
+/*
* fsnotify_nameremove - a filename was removed from a directory
*/
static inline void fsnotify_nameremove(struct dentry *dentry, int isdir)
{
+ __u32 mask = FS_DELETE;
+
if (isdir)
- isdir = IN_ISDIR;
- dnotify_parent(dentry, DN_DELETE);
- inotify_dentry_parent_queue_event(dentry, IN_DELETE|isdir, 0, dentry->d_name.name);
+ mask |= FS_IN_ISDIR;
+
+ fsnotify_parent(dentry, mask);
}
/*
@@ -87,14 +134,9 @@ static inline void fsnotify_inoderemove(struct inode *inode)
{
inotify_inode_queue_event(inode, IN_DELETE_SELF, 0, NULL, NULL);
inotify_inode_is_dead(inode);
-}
-/*
- * fsnotify_link_count - inode's link count changed
- */
-static inline void fsnotify_link_count(struct inode *inode)
-{
- inotify_inode_queue_event(inode, IN_ATTRIB, 0, NULL, NULL);
+ fsnotify(inode, FS_DELETE_SELF, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
+ __fsnotify_inode_delete(inode);
}
/*
@@ -102,10 +144,11 @@ static inline void fsnotify_link_count(struct inode *inode)
*/
static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
{
- inode_dir_notify(inode, DN_CREATE);
inotify_inode_queue_event(inode, IN_CREATE, 0, dentry->d_name.name,
dentry->d_inode);
audit_inode_child(dentry->d_name.name, dentry, inode);
+
+ fsnotify(inode, FS_CREATE, dentry->d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
}
/*
@@ -115,11 +158,12 @@ static inline void fsnotify_create(struct inode *inode, struct dentry *dentry)
*/
static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct dentry *new_dentry)
{
- inode_dir_notify(dir, DN_CREATE);
inotify_inode_queue_event(dir, IN_CREATE, 0, new_dentry->d_name.name,
inode);
fsnotify_link_count(inode);
audit_inode_child(new_dentry->d_name.name, new_dentry, dir);
+
+ fsnotify(dir, FS_CREATE, inode, FSNOTIFY_EVENT_INODE, new_dentry->d_name.name, 0);
}
/*
@@ -127,10 +171,13 @@ static inline void fsnotify_link(struct inode *dir, struct inode *inode, struct
*/
static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
{
- inode_dir_notify(inode, DN_CREATE);
- inotify_inode_queue_event(inode, IN_CREATE | IN_ISDIR, 0,
- dentry->d_name.name, dentry->d_inode);
+ __u32 mask = (FS_CREATE | FS_IN_ISDIR);
+ struct inode *d_inode = dentry->d_inode;
+
+ inotify_inode_queue_event(inode, mask, 0, dentry->d_name.name, d_inode);
audit_inode_child(dentry->d_name.name, dentry, inode);
+
+ fsnotify(inode, mask, d_inode, FSNOTIFY_EVENT_INODE, dentry->d_name.name, 0);
}
/*
@@ -139,14 +186,15 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
static inline void fsnotify_access(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- u32 mask = IN_ACCESS;
+ __u32 mask = FS_ACCESS;
if (S_ISDIR(inode->i_mode))
- mask |= IN_ISDIR;
+ mask |= FS_IN_ISDIR;
- dnotify_parent(dentry, DN_ACCESS);
- inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
+
+ fsnotify_parent(dentry, mask);
+ fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
/*
@@ -155,14 +203,15 @@ static inline void fsnotify_access(struct dentry *dentry)
static inline void fsnotify_modify(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- u32 mask = IN_MODIFY;
+ __u32 mask = FS_MODIFY;
if (S_ISDIR(inode->i_mode))
- mask |= IN_ISDIR;
+ mask |= FS_IN_ISDIR;
- dnotify_parent(dentry, DN_MODIFY);
- inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
+
+ fsnotify_parent(dentry, mask);
+ fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
/*
@@ -171,13 +220,15 @@ static inline void fsnotify_modify(struct dentry *dentry)
static inline void fsnotify_open(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- u32 mask = IN_OPEN;
+ __u32 mask = FS_OPEN;
if (S_ISDIR(inode->i_mode))
- mask |= IN_ISDIR;
+ mask |= FS_IN_ISDIR;
- inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
+
+ fsnotify_parent(dentry, mask);
+ fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
/*
@@ -187,15 +238,16 @@ static inline void fsnotify_close(struct file *file)
{
struct dentry *dentry = file->f_path.dentry;
struct inode *inode = dentry->d_inode;
- const char *name = dentry->d_name.name;
fmode_t mode = file->f_mode;
- u32 mask = (mode & FMODE_WRITE) ? IN_CLOSE_WRITE : IN_CLOSE_NOWRITE;
+ __u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
if (S_ISDIR(inode->i_mode))
- mask |= IN_ISDIR;
+ mask |= FS_IN_ISDIR;
- inotify_dentry_parent_queue_event(dentry, mask, 0, name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
+
+ fsnotify_parent(dentry, mask);
+ fsnotify(inode, mask, file, FSNOTIFY_EVENT_FILE, NULL, 0);
}
/*
@@ -204,13 +256,15 @@ static inline void fsnotify_close(struct file *file)
static inline void fsnotify_xattr(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
- u32 mask = IN_ATTRIB;
+ __u32 mask = FS_ATTRIB;
if (S_ISDIR(inode->i_mode))
- mask |= IN_ISDIR;
+ mask |= FS_IN_ISDIR;
- inotify_dentry_parent_queue_event(dentry, mask, 0, dentry->d_name.name);
inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
+
+ fsnotify_parent(dentry, mask);
+ fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
/*
@@ -220,50 +274,37 @@ static inline void fsnotify_xattr(struct dentry *dentry)
static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
{
struct inode *inode = dentry->d_inode;
- int dn_mask = 0;
- u32 in_mask = 0;
+ __u32 mask = 0;
+
+ if (ia_valid & ATTR_UID)
+ mask |= FS_ATTRIB;
+ if (ia_valid & ATTR_GID)
+ mask |= FS_ATTRIB;
+ if (ia_valid & ATTR_SIZE)
+ mask |= FS_MODIFY;
- if (ia_valid & ATTR_UID) {
- in_mask |= IN_ATTRIB;
- dn_mask |= DN_ATTRIB;
- }
- if (ia_valid & ATTR_GID) {
- in_mask |= IN_ATTRIB;
- dn_mask |= DN_ATTRIB;
- }
- if (ia_valid & ATTR_SIZE) {
- in_mask |= IN_MODIFY;
- dn_mask |= DN_MODIFY;
- }
/* both times implies a utime(s) call */
if ((ia_valid & (ATTR_ATIME | ATTR_MTIME)) == (ATTR_ATIME | ATTR_MTIME))
- {
- in_mask |= IN_ATTRIB;
- dn_mask |= DN_ATTRIB;
- } else if (ia_valid & ATTR_ATIME) {
- in_mask |= IN_ACCESS;
- dn_mask |= DN_ACCESS;
- } else if (ia_valid & ATTR_MTIME) {
- in_mask |= IN_MODIFY;
- dn_mask |= DN_MODIFY;
- }
- if (ia_valid & ATTR_MODE) {
- in_mask |= IN_ATTRIB;
- dn_mask |= DN_ATTRIB;
- }
+ mask |= FS_ATTRIB;
+ else if (ia_valid & ATTR_ATIME)
+ mask |= FS_ACCESS;
+ else if (ia_valid & ATTR_MTIME)
+ mask |= FS_MODIFY;
+
+ if (ia_valid & ATTR_MODE)
+ mask |= FS_ATTRIB;
- if (dn_mask)
- dnotify_parent(dentry, dn_mask);
- if (in_mask) {
+ if (mask) {
if (S_ISDIR(inode->i_mode))
- in_mask |= IN_ISDIR;
- inotify_inode_queue_event(inode, in_mask, 0, NULL, NULL);
- inotify_dentry_parent_queue_event(dentry, in_mask, 0,
- dentry->d_name.name);
+ mask |= FS_IN_ISDIR;
+ inotify_inode_queue_event(inode, mask, 0, NULL, NULL);
+
+ fsnotify_parent(dentry, mask);
+ fsnotify(inode, mask, inode, FSNOTIFY_EVENT_INODE, NULL, 0);
}
}
-#ifdef CONFIG_INOTIFY /* inotify helpers */
+#if defined(CONFIG_INOTIFY) || defined(CONFIG_FSNOTIFY) /* notify helpers */
/*
* fsnotify_oldname_init - save off the old filename before we change it
@@ -281,7 +322,7 @@ static inline void fsnotify_oldname_free(const char *old_name)
kfree(old_name);
}
-#else /* CONFIG_INOTIFY */
+#else /* CONFIG_INOTIFY || CONFIG_FSNOTIFY */
static inline const char *fsnotify_oldname_init(const char *name)
{
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
new file mode 100644
index 0000000..44848aa
--- /dev/null
+++ b/include/linux/fsnotify_backend.h
@@ -0,0 +1,387 @@
+/*
+ * Filesystem access notification for Linux
+ *
+ * Copyright (C) 2008 Red Hat, Inc., Eric Paris <eparis@redhat.com>
+ */
+
+#ifndef __LINUX_FSNOTIFY_BACKEND_H
+#define __LINUX_FSNOTIFY_BACKEND_H
+
+#ifdef __KERNEL__
+
+#include <linux/idr.h> /* inotify uses this */
+#include <linux/fs.h> /* struct inode */
+#include <linux/list.h>
+#include <linux/path.h> /* struct path */
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/atomic.h>
+
+/*
+ * IN_* from inotfy.h lines up EXACTLY with FS_*, this is so we can easily
+ * convert between them. dnotify only needs conversion at watch creation
+ * so no perf loss there. fanotify isn't defined yet, so it can use the
+ * wholes if it needs more events.
+ */
+#define FS_ACCESS 0x00000001 /* File was accessed */
+#define FS_MODIFY 0x00000002 /* File was modified */
+#define FS_ATTRIB 0x00000004 /* Metadata changed */
+#define FS_CLOSE_WRITE 0x00000008 /* Writtable file was closed */
+#define FS_CLOSE_NOWRITE 0x00000010 /* Unwrittable file closed */
+#define FS_OPEN 0x00000020 /* File was opened */
+#define FS_MOVED_FROM 0x00000040 /* File was moved from X */
+#define FS_MOVED_TO 0x00000080 /* File was moved to Y */
+#define FS_CREATE 0x00000100 /* Subfile was created */
+#define FS_DELETE 0x00000200 /* Subfile was deleted */
+#define FS_DELETE_SELF 0x00000400 /* Self was deleted */
+#define FS_MOVE_SELF 0x00000800 /* Self was moved */
+
+#define FS_UNMOUNT 0x00002000 /* inode on umount fs */
+#define FS_Q_OVERFLOW 0x00004000 /* Event queued overflowed */
+#define FS_IN_IGNORED 0x00008000 /* last inotify event here */
+
+#define FS_IN_ISDIR 0x40000000 /* event occurred against dir */
+#define FS_IN_ONESHOT 0x80000000 /* only send event once */
+
+#define FS_DN_RENAME 0x10000000 /* file renamed */
+#define FS_DN_MULTISHOT 0x20000000 /* dnotify multishot */
+
+/* This inode cares about things that happen to its children. Always set for
+ * dnotify and inotify. */
+#define FS_EVENT_ON_CHILD 0x08000000
+
+/* This is a list of all events that may get sent to a parernt based on fs event
+ * happening to inodes inside that directory */
+#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\
+ FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\
+ FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
+ FS_DELETE)
+
+/* listeners that hard code group numbers near the top */
+#define DNOTIFY_GROUP_NUM UINT_MAX
+#define INOTIFY_GROUP_NUM (DNOTIFY_GROUP_NUM-1)
+
+struct fsnotify_group;
+struct fsnotify_event;
+struct fsnotify_mark_entry;
+struct fsnotify_event_private_data;
+
+/*
+ * Each group much define these ops. The fsnotify infrastructure will call
+ * these operations for each relevant group.
+ *
+ * should_send_event - given a group, inode, and mask this function determines
+ * if the group is interested in this event.
+ * handle_event - main call for a group to handle an fs event
+ * free_group_priv - called when a group refcnt hits 0 to clean up the private union
+ * freeing-mark - this means that a mark has been flagged to die when everything
+ * finishes using it. The function is supplied with what must be a
+ * valid group and inode to use to clean up.
+ */
+struct fsnotify_ops {
+ bool (*should_send_event)(struct fsnotify_group *group, struct inode *inode, __u32 mask);
+ int (*handle_event)(struct fsnotify_group *group, struct fsnotify_event *event);
+ void (*free_group_priv)(struct fsnotify_group *group);
+ void (*freeing_mark)(struct fsnotify_mark_entry *entry, struct fsnotify_group *group);
+ void (*free_event_priv)(struct fsnotify_event_private_data *priv);
+};
+
+/*
+ * A group is a "thing" that wants to receive notification about filesystem
+ * events. The mask holds the subset of event types this group cares about.
+ * refcnt on a group is up to the implementor and at any moment if it goes 0
+ * everything will be cleaned up.
+ */
+struct fsnotify_group {
+ /*
+ * global list of all groups receiving events from fsnotify.
+ * anchored by fsnotify_groups and protected by either fsnotify_grp_mutex
+ * or fsnotify_grp_srcu depending on write vs read.
+ */
+ struct list_head group_list;
+
+ /*
+ * Defines all of the event types in which this group is interested.
+ * This mask is a bitwise OR of the FS_* events from above. Each time
+ * this mask changes for a group (if it changes) the correct functions
+ * must be called to update the global structures which indicate global
+ * interest in event types.
+ */
+ __u32 mask;
+
+ /*
+ * How the refcnt is used is up to each group. When the refcnt hits 0
+ * fsnotify will clean up all of the resources associated with this group.
+ * As an example, the dnotify group will always have a refcnt=1 and that
+ * will never change. Inotify, on the other hand, has a group per
+ * inotify_init() and the refcnt will hit 0 only when that fd has been
+ * closed.
+ */
+ atomic_t refcnt; /* things with interest in this group */
+ unsigned int group_num; /* simply prevents accidental group collision */
+
+ const struct fsnotify_ops *ops; /* how this group handles things */
+
+ /* needed to send notification to userspace */
+ struct mutex notification_mutex; /* protect the notification_list */
+ struct list_head notification_list; /* list of event_holder this group needs to send to userspace */
+ wait_queue_head_t notification_waitq; /* read() on the notification file blocks on this waitq */
+ unsigned int q_len; /* events on the queue */
+ unsigned int max_events; /* maximum events allowed on the list */
+
+ /* stores all fastapth entries assoc with this group so they can be cleaned on unregister */
+ spinlock_t mark_lock; /* protect mark_entries list */
+ atomic_t num_marks; /* 1 for each mark entry and 1 for not being
+ * past the point of no return when freeing
+ * a group */
+ struct list_head mark_entries; /* all inode mark entries for this group */
+
+ /* prevents double list_del of group_list. protected by global fsnotify_grp_mutex */
+ bool on_group_list;
+
+ /* groups can define private fields here or use the void *private */
+ union {
+ void *private;
+#ifdef CONFIG_INOTIFY_USER
+ struct inotify_group_private_data {
+ spinlock_t idr_lock;
+ struct idr idr;
+ u32 last_wd;
+ struct fasync_struct *fa; /* async notification */
+ struct user_struct *user;
+ } inotify_data;
+#endif
+ };
+};
+
+/*
+ * A single event can be queued in multiple group->notification_lists.
+ *
+ * each group->notification_list will point to an event_holder which in turns points
+ * to the actual event that needs to be sent to userspace.
+ *
+ * Seemed cheaper to create a refcnt'd event and a small holder for every group
+ * than create a different event for every group
+ *
+ */
+struct fsnotify_event_holder {
+ struct fsnotify_event *event;
+ struct list_head event_list;
+};
+
+/*
+ * Inotify needs to tack data onto an event. This struct lets us later find the
+ * correct private data of the correct group.
+ */
+struct fsnotify_event_private_data {
+ struct fsnotify_group *group;
+ struct list_head event_list;
+};
+
+/*
+ * all of the information about the original object we want to now send to
+ * a group. If you want to carry more info from the accessing task to the
+ * listener this structure is where you need to be adding fields.
+ */
+struct fsnotify_event {
+ /*
+ * If we create an event we are also likely going to need a holder
+ * to link to a group. So embed one holder in the event. Means only
+ * one allocation for the common case where we only have one group
+ */
+ struct fsnotify_event_holder holder;
+ spinlock_t lock; /* protection for the associated event_holder and private_list */
+ /* to_tell may ONLY be dereferenced during handle_event(). */
+ struct inode *to_tell; /* either the inode the event happened to or its parent */
+ /*
+ * depending on the event type we should have either a path or inode
+ * We hold a reference on path, but NOT on inode. Since we have the ref on
+ * the path, it may be dereferenced at any point during this object's
+ * lifetime. That reference is dropped when this object's refcnt hits
+ * 0. If this event contains an inode instead of a path, the inode may
+ * ONLY be used during handle_event().
+ */
+ union {
+ struct path path;
+ struct inode *inode;
+ };
+/* when calling fsnotify tell it if the data is a path or inode */
+#define FSNOTIFY_EVENT_NONE 0
+#define FSNOTIFY_EVENT_PATH 1
+#define FSNOTIFY_EVENT_INODE 2
+#define FSNOTIFY_EVENT_FILE 3
+ int data_type; /* which of the above union we have */
+ atomic_t refcnt; /* how many groups still are using/need to send this event */
+ __u32 mask; /* the type of access, bitwise OR for FS_* event types */
+
+ u32 sync_cookie; /* used to corrolate events, namely inotify mv events */
+ char *file_name;
+ size_t name_len;
+
+ struct list_head private_data_list; /* groups can store private data here */
+};
+
+/*
+ * a mark is simply an entry attached to an in core inode which allows an
+ * fsnotify listener to indicate they are either no longer interested in events
+ * of a type matching mask or only interested in those events.
+ *
+ * these are flushed when an inode is evicted from core and may be flushed
+ * when the inode is modified (as seen by fsnotify_access). Some fsnotify users
+ * (such as dnotify) will flush these when the open fd is closed and not at
+ * inode eviction or modification.
+ */
+struct fsnotify_mark_entry {
+ __u32 mask; /* mask this mark entry is for */
+ /* we hold ref for each i_list and g_list. also one ref for each 'thing'
+ * in kernel that found and may be using this mark. */
+ atomic_t refcnt; /* active things looking at this mark */
+ struct inode *inode; /* inode this entry is associated with */
+ struct fsnotify_group *group; /* group this mark entry is for */
+ struct hlist_node i_list; /* list of mark_entries by inode->i_fsnotify_mark_entries */
+ struct list_head g_list; /* list of mark_entries by group->i_fsnotify_mark_entries */
+ spinlock_t lock; /* protect group, inode, and killme */
+ struct list_head free_i_list; /* tmp list used when freeing this mark */
+ struct list_head free_g_list; /* tmp list used when freeing this mark */
+ void (*free_mark)(struct fsnotify_mark_entry *entry); /* called on final put+free */
+};
+
+#ifdef CONFIG_FSNOTIFY
+
+/* called from the vfs helpers */
+
+/* main fsnotify call to send events */
+extern void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+ const char *name, u32 cookie);
+extern void __fsnotify_parent(struct dentry *dentry, __u32 mask);
+extern void __fsnotify_inode_delete(struct inode *inode);
+extern u32 fsnotify_get_cookie(void);
+
+static inline int fsnotify_inode_watches_children(struct inode *inode)
+{
+ /* FS_EVENT_ON_CHILD is set if the inode may care */
+ if (!(inode->i_fsnotify_mask & FS_EVENT_ON_CHILD))
+ return 0;
+ /* this inode might care about child events, does it care about the
+ * specific set of events that can happen on a child? */
+ return inode->i_fsnotify_mask & FS_EVENTS_POSS_ON_CHILD;
+}
+
+/*
+ * Update the dentry with a flag indicating the interest of its parent to receive
+ * filesystem events when those events happens to this dentry->d_inode.
+ */
+static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
+{
+ struct dentry *parent;
+
+ assert_spin_locked(&dcache_lock);
+ assert_spin_locked(&dentry->d_lock);
+
+ parent = dentry->d_parent;
+ if (fsnotify_inode_watches_children(parent->d_inode))
+ dentry->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;
+ else
+ dentry->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;
+}
+
+/*
+ * fsnotify_d_instantiate - instantiate a dentry for inode
+ * Called with dcache_lock held.
+ */
+static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
+{
+ if (!inode)
+ return;
+
+ assert_spin_locked(&dcache_lock);
+
+ spin_lock(&dentry->d_lock);
+ __fsnotify_update_dcache_flags(dentry);
+ spin_unlock(&dentry->d_lock);
+}
+
+/* called from fsnotify listeners, such as fanotify or dnotify */
+
+/* must call when a group changes its ->mask */
+extern void fsnotify_recalc_global_mask(void);
+/* get a reference to an existing or create a new group */
+extern struct fsnotify_group *fsnotify_obtain_group(unsigned int group_num,
+ __u32 mask,
+ const struct fsnotify_ops *ops);
+/* run all marks associated with this group and update group->mask */
+extern void fsnotify_recalc_group_mask(struct fsnotify_group *group);
+/* drop reference on a group from fsnotify_obtain_group */
+extern void fsnotify_put_group(struct fsnotify_group *group);
+
+/* take a reference to an event */
+extern void fsnotify_get_event(struct fsnotify_event *event);
+extern void fsnotify_put_event(struct fsnotify_event *event);
+/* find private data previously attached to an event and unlink it */
+extern struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group,
+ struct fsnotify_event *event);
+
+/* attach the event to the group notification queue */
+extern int fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event,
+ struct fsnotify_event_private_data *priv);
+/* true if the group notification queue is empty */
+extern bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group);
+/* return, but do not dequeue the first event on the notification queue */
+extern struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group);
+/* return AND dequeue the first event on the notification queue */
+extern struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group);
+
+/* functions used to manipulate the marks attached to inodes */
+
+/* run all marks associated with an inode and update inode->i_fsnotify_mask */
+extern void fsnotify_recalc_inode_mask(struct inode *inode);
+extern void fsnotify_init_mark(struct fsnotify_mark_entry *entry, void (*free_mark)(struct fsnotify_mark_entry *entry));
+/* find (and take a reference) to a mark associated with group and inode */
+extern struct fsnotify_mark_entry *fsnotify_find_mark_entry(struct fsnotify_group *group, struct inode *inode);
+/* attach the mark to both the group and the inode */
+extern int fsnotify_add_mark(struct fsnotify_mark_entry *entry, struct fsnotify_group *group, struct inode *inode);
+/* given a mark, flag it to be freed when all references are dropped */
+extern void fsnotify_destroy_mark_by_entry(struct fsnotify_mark_entry *entry);
+/* run all the marks in a group, and flag them to be freed */
+extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group);
+extern void fsnotify_get_mark(struct fsnotify_mark_entry *entry);
+extern void fsnotify_put_mark(struct fsnotify_mark_entry *entry);
+extern void fsnotify_unmount_inodes(struct list_head *list);
+
+/* put here because inotify does some weird stuff when destroying watches */
+extern struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask,
+ void *data, int data_is, const char *name,
+ u32 cookie);
+
+#else
+
+static inline void fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
+ const char *name, u32 cookie)
+{}
+
+static inline void __fsnotify_parent(struct dentry *dentry, __u32 mask)
+{}
+
+static inline void __fsnotify_inode_delete(struct inode *inode)
+{}
+
+static inline void __fsnotify_update_dcache_flags(struct dentry *dentry)
+{}
+
+static inline void __fsnotify_d_instantiate(struct dentry *dentry, struct inode *inode)
+{}
+
+static inline u32 fsnotify_get_cookie(void)
+{
+ return 0;
+}
+
+static inline void fsnotify_unmount_inodes(struct list_head *list)
+{}
+
+#endif /* CONFIG_FSNOTIFY */
+
+#endif /* __KERNEL __ */
+
+#endif /* __LINUX_FSNOTIFY_BACKEND_H */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 8a0c2f2..39b95c5 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -233,8 +233,6 @@ extern int ftrace_arch_read_dyn_info(char *buf, int size);
extern int skip_trace(unsigned long ip);
-extern void ftrace_release(void *start, unsigned long size);
-
extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void);
#else
@@ -325,13 +323,8 @@ static inline void __ftrace_enabled_restore(int enabled)
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
extern void ftrace_init(void);
-extern void ftrace_init_module(struct module *mod,
- unsigned long *start, unsigned long *end);
#else
static inline void ftrace_init(void) { }
-static inline void
-ftrace_init_module(struct module *mod,
- unsigned long *start, unsigned long *end) { }
#endif
/*
@@ -368,6 +361,7 @@ struct ftrace_ret_stack {
unsigned long ret;
unsigned long func;
unsigned long long calltime;
+ unsigned long long subtime;
};
/*
@@ -379,8 +373,6 @@ extern void return_to_handler(void);
extern int
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth);
-extern void
-ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
/*
* Sometimes we don't want to trace a function with the function
@@ -496,8 +488,15 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk)
extern int ftrace_dump_on_oops;
+#ifdef CONFIG_PREEMPT
+#define INIT_TRACE_RECURSION .trace_recursion = 0,
+#endif
+
#endif /* CONFIG_TRACING */
+#ifndef INIT_TRACE_RECURSION
+#define INIT_TRACE_RECURSION
+#endif
#ifdef CONFIG_HW_BRANCH_TRACER
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
new file mode 100644
index 0000000..5c093ff
--- /dev/null
+++ b/include/linux/ftrace_event.h
@@ -0,0 +1,172 @@
+#ifndef _LINUX_FTRACE_EVENT_H
+#define _LINUX_FTRACE_EVENT_H
+
+#include <linux/trace_seq.h>
+#include <linux/ring_buffer.h>
+#include <linux/percpu.h>
+
+struct trace_array;
+struct tracer;
+struct dentry;
+
+DECLARE_PER_CPU(struct trace_seq, ftrace_event_seq);
+
+struct trace_print_flags {
+ unsigned long mask;
+ const char *name;
+};
+
+const char *ftrace_print_flags_seq(struct trace_seq *p, const char *delim,
+ unsigned long flags,
+ const struct trace_print_flags *flag_array);
+
+const char *ftrace_print_symbols_seq(struct trace_seq *p, unsigned long val,
+ const struct trace_print_flags *symbol_array);
+
+/*
+ * The trace entry - the most basic unit of tracing. This is what
+ * is printed in the end as a single line in the trace output, such as:
+ *
+ * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
+ */
+struct trace_entry {
+ unsigned short type;
+ unsigned char flags;
+ unsigned char preempt_count;
+ int pid;
+ int tgid;
+};
+
+#define FTRACE_MAX_EVENT \
+ ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
+
+/*
+ * Trace iterator - used by printout routines who present trace
+ * results to users and which routines might sleep, etc:
+ */
+struct trace_iterator {
+ struct trace_array *tr;
+ struct tracer *trace;
+ void *private;
+ int cpu_file;
+ struct mutex mutex;
+ struct ring_buffer_iter *buffer_iter[NR_CPUS];
+ unsigned long iter_flags;
+
+ /* The below is zeroed out in pipe_read */
+ struct trace_seq seq;
+ struct trace_entry *ent;
+ int cpu;
+ u64 ts;
+
+ loff_t pos;
+ long idx;
+
+ cpumask_var_t started;
+};
+
+
+typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
+ int flags);
+struct trace_event {
+ struct hlist_node node;
+ struct list_head list;
+ int type;
+ trace_print_func trace;
+ trace_print_func raw;
+ trace_print_func hex;
+ trace_print_func binary;
+};
+
+extern int register_ftrace_event(struct trace_event *event);
+extern int unregister_ftrace_event(struct trace_event *event);
+
+/* Return values for print_line callback */
+enum print_line_t {
+ TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
+ TRACE_TYPE_HANDLED = 1,
+ TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
+ TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
+};
+
+
+struct ring_buffer_event *
+trace_current_buffer_lock_reserve(int type, unsigned long len,
+ unsigned long flags, int pc);
+void trace_current_buffer_unlock_commit(struct ring_buffer_event *event,
+ unsigned long flags, int pc);
+void trace_nowake_buffer_unlock_commit(struct ring_buffer_event *event,
+ unsigned long flags, int pc);
+void trace_current_buffer_discard_commit(struct ring_buffer_event *event);
+
+void tracing_record_cmdline(struct task_struct *tsk);
+
+struct ftrace_event_call {
+ struct list_head list;
+ char *name;
+ char *system;
+ struct dentry *dir;
+ struct trace_event *event;
+ int enabled;
+ int (*regfunc)(void);
+ void (*unregfunc)(void);
+ int id;
+ int (*raw_init)(void);
+ int (*show_format)(struct trace_seq *s);
+ int (*define_fields)(void);
+ struct list_head fields;
+ int filter_active;
+ void *filter;
+ void *mod;
+
+#ifdef CONFIG_EVENT_PROFILE
+ atomic_t profile_count;
+ int (*profile_enable)(struct ftrace_event_call *);
+ void (*profile_disable)(struct ftrace_event_call *);
+#endif
+};
+
+#define MAX_FILTER_PRED 32
+#define MAX_FILTER_STR_VAL 128
+
+extern int init_preds(struct ftrace_event_call *call);
+extern void destroy_preds(struct ftrace_event_call *call);
+extern int filter_match_preds(struct ftrace_event_call *call, void *rec);
+extern int filter_current_check_discard(struct ftrace_event_call *call,
+ void *rec,
+ struct ring_buffer_event *event);
+
+extern int trace_define_field(struct ftrace_event_call *call, char *type,
+ char *name, int offset, int size, int is_signed);
+
+#define is_signed_type(type) (((type)(-1)) < 0)
+
+int trace_set_clr_event(const char *system, const char *event, int set);
+
+/*
+ * The double __builtin_constant_p is because gcc will give us an error
+ * if we try to allocate the static variable to fmt if it is not a
+ * constant. Even with the outer if statement optimizing out.
+ */
+#define event_trace_printk(ip, fmt, args...) \
+do { \
+ __trace_printk_check_format(fmt, ##args); \
+ tracing_record_cmdline(current); \
+ if (__builtin_constant_p(fmt)) { \
+ static const char *trace_printk_fmt \
+ __attribute__((section("__trace_printk_fmt"))) = \
+ __builtin_constant_p(fmt) ? fmt : NULL; \
+ \
+ __trace_bprintk(ip, trace_printk_fmt, ##args); \
+ } else \
+ __trace_printk(ip, fmt, ##args); \
+} while (0)
+
+#define __common_field(type, item, is_signed) \
+ ret = trace_define_field(event_call, #type, "common_" #item, \
+ offsetof(typeof(field.ent), item), \
+ sizeof(field.ent.item), is_signed); \
+ if (ret) \
+ return ret;
+
+#endif /* _LINUX_FTRACE_EVENT_H */
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 162e5de..d41ed59 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -121,6 +121,13 @@ struct fuse_file_lock {
#define FUSE_BIG_WRITES (1 << 5)
/**
+ * CUSE INIT request/reply flags
+ *
+ * CUSE_UNRESTRICTED_IOCTL: use unrestricted ioctl
+ */
+#define CUSE_UNRESTRICTED_IOCTL (1 << 0)
+
+/**
* Release flags
*/
#define FUSE_RELEASE_FLUSH (1 << 0)
@@ -210,6 +217,9 @@ enum fuse_opcode {
FUSE_DESTROY = 38,
FUSE_IOCTL = 39,
FUSE_POLL = 40,
+
+ /* CUSE specific operations */
+ CUSE_INIT = 4096,
};
enum fuse_notify_code {
@@ -401,6 +411,27 @@ struct fuse_init_out {
__u32 max_write;
};
+#define CUSE_INIT_INFO_MAX 4096
+
+struct cuse_init_in {
+ __u32 major;
+ __u32 minor;
+ __u32 unused;
+ __u32 flags;
+};
+
+struct cuse_init_out {
+ __u32 major;
+ __u32 minor;
+ __u32 unused;
+ __u32 flags;
+ __u32 max_read;
+ __u32 max_write;
+ __u32 dev_major; /* chardev major */
+ __u32 dev_minor; /* chardev minor */
+ __u32 spare[10];
+};
+
struct fuse_interrupt_in {
__u64 unique;
};
diff --git a/include/linux/futex.h b/include/linux/futex.h
index 3bf5bb5..34956c8 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -23,6 +23,8 @@ union ktime;
#define FUTEX_TRYLOCK_PI 8
#define FUTEX_WAIT_BITSET 9
#define FUTEX_WAKE_BITSET 10
+#define FUTEX_WAIT_REQUEUE_PI 11
+#define FUTEX_CMP_REQUEUE_PI 12
#define FUTEX_PRIVATE_FLAG 128
#define FUTEX_CLOCK_REALTIME 256
@@ -38,6 +40,10 @@ union ktime;
#define FUTEX_TRYLOCK_PI_PRIVATE (FUTEX_TRYLOCK_PI | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAIT_BITSET_PRIVATE (FUTEX_WAIT_BITS | FUTEX_PRIVATE_FLAG)
#define FUTEX_WAKE_BITSET_PRIVATE (FUTEX_WAKE_BITS | FUTEX_PRIVATE_FLAG)
+#define FUTEX_WAIT_REQUEUE_PI_PRIVATE (FUTEX_WAIT_REQUEUE_PI | \
+ FUTEX_PRIVATE_FLAG)
+#define FUTEX_CMP_REQUEUE_PI_PRIVATE (FUTEX_CMP_REQUEUE_PI | \
+ FUTEX_PRIVATE_FLAG)
/*
* Support for robust futexes: the kernel cleans up held futexes at
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index a1a28ca..7cbd38d 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -90,6 +90,7 @@ struct disk_stats {
struct hd_struct {
sector_t start_sect;
sector_t nr_sects;
+ sector_t alignment_offset;
struct device __dev;
struct kobject *holder_dir;
int policy, partno;
@@ -113,6 +114,7 @@ struct hd_struct {
#define GENHD_FL_UP 16
#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
+#define GENHD_FL_NATIVE_CAPACITY 128
#define BLK_SCSI_MAX_CMDS (256)
#define BLK_SCSI_CMD_PER_LONG (BLK_SCSI_MAX_CMDS / (sizeof(long) * 8))
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0bbc15f..3760e7c 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -85,6 +85,9 @@ struct vm_area_struct;
__GFP_NOWARN|__GFP_REPEAT|__GFP_NOFAIL|\
__GFP_NORETRY|__GFP_NOMEMALLOC)
+/* Control slab gfp mask during early boot */
+#define SLAB_GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)
+
/* Control allocation constraints */
#define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE)
diff --git a/include/linux/hid.h b/include/linux/hid.h
index a72876e..53489fd 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -238,6 +238,42 @@ struct hid_item {
#define HID_GD_RIGHT 0x00010092
#define HID_GD_LEFT 0x00010093
+#define HID_DG_DIGITIZER 0x000d0001
+#define HID_DG_PEN 0x000d0002
+#define HID_DG_LIGHTPEN 0x000d0003
+#define HID_DG_TOUCHSCREEN 0x000d0004
+#define HID_DG_TOUCHPAD 0x000d0005
+#define HID_DG_STYLUS 0x000d0020
+#define HID_DG_PUCK 0x000d0021
+#define HID_DG_FINGER 0x000d0022
+#define HID_DG_TIPPRESSURE 0x000d0030
+#define HID_DG_BARRELPRESSURE 0x000d0031
+#define HID_DG_INRANGE 0x000d0032
+#define HID_DG_TOUCH 0x000d0033
+#define HID_DG_UNTOUCH 0x000d0034
+#define HID_DG_TAP 0x000d0035
+#define HID_DG_TABLETFUNCTIONKEY 0x000d0039
+#define HID_DG_PROGRAMCHANGEKEY 0x000d003a
+#define HID_DG_INVERT 0x000d003c
+#define HID_DG_TIPSWITCH 0x000d0042
+#define HID_DG_TIPSWITCH2 0x000d0043
+#define HID_DG_BARRELSWITCH 0x000d0044
+#define HID_DG_ERASER 0x000d0045
+#define HID_DG_TABLETPICK 0x000d0046
+/*
+ * as of May 20, 2009 the usages below are not yet in the official USB spec
+ * but are being pushed by Microsft as described in their paper "Digitizer
+ * Drivers for Windows Touch and Pen-Based Computers"
+ */
+#define HID_DG_CONFIDENCE 0x000d0047
+#define HID_DG_WIDTH 0x000d0048
+#define HID_DG_HEIGHT 0x000d0049
+#define HID_DG_CONTACTID 0x000d0051
+#define HID_DG_INPUTMODE 0x000d0052
+#define HID_DG_DEVICEINDEX 0x000d0053
+#define HID_DG_CONTACTCOUNT 0x000d0054
+#define HID_DG_CONTACTMAX 0x000d0055
+
/*
* HID report types --- Ouch! HID spec says 1 2 3!
*/
diff --git a/include/linux/i2c-ocores.h b/include/linux/i2c-ocores.h
index 8ed591b..4d5e57f 100644
--- a/include/linux/i2c-ocores.h
+++ b/include/linux/i2c-ocores.h
@@ -14,6 +14,8 @@
struct ocores_i2c_platform_data {
u32 regstep; /* distance between registers */
u32 clock_khz; /* input clock in kHz */
+ u8 num_devices; /* number of devices in the devices list */
+ struct i2c_board_info const *devices; /* devices connected to the bus */
};
#endif /* _LINUX_I2C_OCORES_H */
diff --git a/include/linux/i7300_idle.h b/include/linux/i7300_idle.h
index 05a80c4..1587b7d 100644
--- a/include/linux/i7300_idle.h
+++ b/include/linux/i7300_idle.h
@@ -16,35 +16,33 @@
struct fbd_ioat {
unsigned int vendor;
unsigned int ioat_dev;
+ unsigned int enabled;
};
/*
* The i5000 chip-set has the same hooks as the i7300
- * but support is disabled by default because this driver
- * has not been validated on that platform.
+ * but it is not enabled by default and must be manually
+ * manually enabled with "forceload=1" because it is
+ * only lightly validated.
*/
-#define SUPPORT_I5000 0
static const struct fbd_ioat fbd_ioat_list[] = {
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB},
-#if SUPPORT_I5000
- {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT},
-#endif
+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB, 1},
+ {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT, 0},
{0, 0}
};
/* table of devices that work with this driver */
static const struct pci_device_id pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_FBD_CNB) },
-#if SUPPORT_I5000
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5000_ERR) },
-#endif
{ } /* Terminating entry */
};
/* Check for known platforms with I/O-AT */
static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev,
- struct pci_dev **ioat_dev)
+ struct pci_dev **ioat_dev,
+ int enable_all)
{
int i;
struct pci_dev *memdev, *dmadev;
@@ -69,6 +67,8 @@ static inline int i7300_idle_platform_probe(struct pci_dev **fbd_dev,
for (i = 0; fbd_ioat_list[i].vendor != 0; i++) {
if (dmadev->vendor == fbd_ioat_list[i].vendor &&
dmadev->device == fbd_ioat_list[i].ioat_dev) {
+ if (!(fbd_ioat_list[i].enabled || enable_all))
+ continue;
if (fbd_dev)
*fbd_dev = memdev;
if (ioat_dev)
diff --git a/include/linux/ide.h b/include/linux/ide.h
index ff65fff..a6c6a2f 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -26,6 +26,9 @@
#include <asm/io.h>
#include <asm/mutex.h>
+/* for request_sense */
+#include <linux/cdrom.h>
+
#if defined(CONFIG_CRIS) || defined(CONFIG_FRV) || defined(CONFIG_MN10300)
# define SUPPORT_VLB_SYNC 0
#else
@@ -175,7 +178,7 @@ typedef u8 hwif_chipset_t;
/*
* Structure to hold all information about the location of this port
*/
-typedef struct hw_regs_s {
+struct ide_hw {
union {
struct ide_io_ports io_ports;
unsigned long io_ports_array[IDE_NR_PORTS];
@@ -183,12 +186,11 @@ typedef struct hw_regs_s {
int irq; /* our irq number */
ide_ack_intr_t *ack_intr; /* acknowledge interrupt */
- hwif_chipset_t chipset;
struct device *dev, *parent;
unsigned long config;
-} hw_regs_t;
+};
-static inline void ide_std_init_ports(hw_regs_t *hw,
+static inline void ide_std_init_ports(struct ide_hw *hw,
unsigned long io_addr,
unsigned long ctl_addr)
{
@@ -215,21 +217,12 @@ static inline void ide_std_init_ports(hw_regs_t *hw,
/*
* Special Driver Flags
- *
- * set_geometry : respecify drive geometry
- * recalibrate : seek to cyl 0
- * set_multmode : set multmode count
- * reserved : unused
*/
-typedef union {
- unsigned all : 8;
- struct {
- unsigned set_geometry : 1;
- unsigned recalibrate : 1;
- unsigned set_multmode : 1;
- unsigned reserved : 5;
- } b;
-} special_t;
+enum {
+ IDE_SFLAG_SET_GEOMETRY = (1 << 0),
+ IDE_SFLAG_RECALIBRATE = (1 << 1),
+ IDE_SFLAG_SET_MULTMODE = (1 << 2),
+};
/*
* Status returned from various ide_ functions
@@ -324,7 +317,6 @@ struct ide_cmd {
unsigned int cursg_ofs;
struct request *rq; /* copy of request */
- void *special; /* valid_t generally */
};
/* ATAPI packet command flags */
@@ -360,11 +352,7 @@ struct ide_atapi_pc {
/* data buffer */
u8 *buf;
- /* current buffer position */
- u8 *cur_pos;
int buf_size;
- /* missing/available data on the current buffer */
- int b_count;
/* the corresponding request */
struct request *rq;
@@ -377,10 +365,6 @@ struct ide_atapi_pc {
*/
u8 pc_buf[IDE_PC_BUFFER_SIZE];
- /* idetape only */
- struct idetape_bh *bh;
- char *b_data;
-
unsigned long timeout;
};
@@ -397,6 +381,7 @@ struct ide_drive_s;
struct ide_disk_ops {
int (*check)(struct ide_drive_s *, const char *);
int (*get_capacity)(struct ide_drive_s *);
+ u64 (*set_capacity)(struct ide_drive_s *, u64);
void (*setup)(struct ide_drive_s *);
void (*flush)(struct ide_drive_s *);
int (*init_media)(struct ide_drive_s *, struct gendisk *);
@@ -474,6 +459,8 @@ enum {
IDE_DFLAG_NICE1 = (1 << 5),
/* device is physically present */
IDE_DFLAG_PRESENT = (1 << 6),
+ /* disable Host Protected Area */
+ IDE_DFLAG_NOHPA = (1 << 7),
/* id read from device (synthetic if not set) */
IDE_DFLAG_ID_READ = (1 << 8),
IDE_DFLAG_NOPROBE = (1 << 9),
@@ -512,6 +499,7 @@ enum {
/* write protect */
IDE_DFLAG_WP = (1 << 29),
IDE_DFLAG_FORMAT_IN_PROGRESS = (1 << 30),
+ IDE_DFLAG_NIEN_QUIRK = (1 << 31),
};
struct ide_drive_s {
@@ -536,14 +524,13 @@ struct ide_drive_s {
unsigned long sleep; /* sleep until this time */
unsigned long timeout; /* max time to wait for irq */
- special_t special; /* special action flags */
+ u8 special_flags; /* special action flags */
u8 select; /* basic drive/head select reg value */
u8 retry_pio; /* retrying dma capable host in pio */
u8 waiting_for_dma; /* dma currently in progress */
u8 dma; /* atapi dma flag */
- u8 quirk_list; /* considered quirky, set for a specific host */
u8 init_speed; /* transfer rate set at boot */
u8 current_speed; /* current transfer rate set */
u8 desired_speed; /* desired transfer rate set */
@@ -568,8 +555,7 @@ struct ide_drive_s {
unsigned int drive_data; /* used by set_pio_mode/dev_select() */
unsigned int failures; /* current failure count */
unsigned int max_failures; /* maximum allowed failure count */
- u64 probed_capacity;/* initial reported media capacity (ide-cd only currently) */
-
+ u64 probed_capacity;/* initial/native media capacity */
u64 capacity64; /* total number of sectors */
int lun; /* logical unit */
@@ -593,16 +579,16 @@ struct ide_drive_s {
/* callback for packet commands */
int (*pc_callback)(struct ide_drive_s *, int);
- void (*pc_update_buffers)(struct ide_drive_s *, struct ide_atapi_pc *);
- int (*pc_io_buffers)(struct ide_drive_s *, struct ide_atapi_pc *,
- unsigned int, int);
-
ide_startstop_t (*irq_handler)(struct ide_drive_s *);
unsigned long atapi_flags;
struct ide_atapi_pc request_sense_pc;
- struct request request_sense_rq;
+
+ /* current sense rq and buffer */
+ bool sense_rq_armed;
+ struct request sense_rq;
+ struct request_sense sense_data;
};
typedef struct ide_drive_s ide_drive_t;
@@ -1109,7 +1095,7 @@ void ide_fix_driveid(u16 *);
extern void ide_fixstring(u8 *, const int, const int);
-int ide_busy_sleep(ide_hwif_t *, unsigned long, int);
+int ide_busy_sleep(ide_drive_t *, unsigned long, int);
int ide_wait_stat(ide_startstop_t *, ide_drive_t *, u8, u8, unsigned long);
@@ -1174,7 +1160,10 @@ int ide_do_test_unit_ready(ide_drive_t *, struct gendisk *);
int ide_do_start_stop(ide_drive_t *, struct gendisk *, int);
int ide_set_media_lock(ide_drive_t *, struct gendisk *, int);
void ide_create_request_sense_cmd(ide_drive_t *, struct ide_atapi_pc *);
-void ide_retry_pc(ide_drive_t *, struct gendisk *);
+void ide_retry_pc(ide_drive_t *drive);
+
+void ide_prep_sense(ide_drive_t *drive, struct request *rq);
+int ide_queue_sense_rq(ide_drive_t *drive, void *special);
int ide_cd_expiry(ide_drive_t *);
@@ -1225,7 +1214,7 @@ static inline int ide_pci_is_in_compatibility_mode(struct pci_dev *dev)
}
void ide_pci_setup_ports(struct pci_dev *, const struct ide_port_info *,
- hw_regs_t *, hw_regs_t **);
+ struct ide_hw *, struct ide_hw **);
void ide_setup_pci_noise(struct pci_dev *, const struct ide_port_info *);
#ifdef CONFIG_BLK_DEV_IDEDMA_PCI
@@ -1464,16 +1453,18 @@ static inline void ide_acpi_set_state(ide_hwif_t *hwif, int on) {}
void ide_register_region(struct gendisk *);
void ide_unregister_region(struct gendisk *);
+void ide_check_nien_quirk_list(ide_drive_t *);
void ide_undecoded_slave(ide_drive_t *);
void ide_port_apply_params(ide_hwif_t *);
int ide_sysfs_register_port(ide_hwif_t *);
-struct ide_host *ide_host_alloc(const struct ide_port_info *, hw_regs_t **);
+struct ide_host *ide_host_alloc(const struct ide_port_info *, struct ide_hw **,
+ unsigned int);
void ide_host_free(struct ide_host *);
int ide_host_register(struct ide_host *, const struct ide_port_info *,
- hw_regs_t **);
-int ide_host_add(const struct ide_port_info *, hw_regs_t **,
+ struct ide_hw **);
+int ide_host_add(const struct ide_port_info *, struct ide_hw **, unsigned int,
struct ide_host **);
void ide_host_remove(struct ide_host *);
int ide_legacy_device_add(const struct ide_port_info *, unsigned long);
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 11a60e4..ae3a187 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -79,6 +79,7 @@
#define ETH_P_AOE 0x88A2 /* ATA over Ethernet */
#define ETH_P_TIPC 0x88CA /* TIPC */
#define ETH_P_FCOE 0x8906 /* Fibre Channel over Ethernet */
+#define ETH_P_FIP 0x8914 /* FCoE Initialization Protocol */
#define ETH_P_EDSA 0xDADA /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
/*
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 0e2aa45..b1b827d 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -13,14 +13,17 @@
#include <linux/fs.h>
struct linux_binprm;
+#define IMA_COUNT_UPDATE 1
+#define IMA_COUNT_LEAVE 0
+
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
extern int ima_inode_alloc(struct inode *inode);
extern void ima_inode_free(struct inode *inode);
-extern int ima_path_check(struct path *path, int mask);
+extern int ima_path_check(struct path *path, int mask, int update_counts);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
-extern void ima_shm_check(struct file *file);
+extern void ima_counts_get(struct file *file);
#else
static inline int ima_bprm_check(struct linux_binprm *bprm)
@@ -38,7 +41,7 @@ static inline void ima_inode_free(struct inode *inode)
return;
}
-static inline int ima_path_check(struct path *path, int mask)
+static inline int ima_path_check(struct path *path, int mask, int update_counts)
{
return 0;
}
@@ -53,7 +56,7 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
return 0;
}
-static inline void ima_shm_check(struct file *file)
+static inline void ima_counts_get(struct file *file)
{
return;
}
diff --git a/include/linux/init.h b/include/linux/init.h
index 0e06c17..b218980 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -2,8 +2,6 @@
#define _LINUX_INIT_H
#include <linux/compiler.h>
-#include <linux/section-names.h>
-#include <linux/stringify.h>
/* These macros are used to mark some functions or
* initialized data (doesn't apply to uninitialized data)
@@ -101,7 +99,7 @@
#define __memexitconst __section(.memexit.rodata)
/* For assembly routines */
-#define __HEAD .section __stringify(HEAD_TEXT_SECTION),"ax"
+#define __HEAD .section ".head.text","ax"
#define __INIT .section ".init.text","ax"
#define __FINIT .previous
@@ -225,7 +223,8 @@ struct obs_kernel_param {
* obs_kernel_param "array" too far apart in .init.setup.
*/
#define __setup_param(str, unique_id, fn, early) \
- static char __setup_str_##unique_id[] __initdata __aligned(1) = str; \
+ static const char __setup_str_##unique_id[] __initconst \
+ __aligned(1) = str; \
static struct obs_kernel_param __setup_##unique_id \
__used __section(.init.setup) \
__attribute__((aligned((sizeof(long))))) \
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index d87247d..28b1f30 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -108,6 +108,15 @@ extern struct group_info init_groups;
extern struct cred init_cred;
+#ifdef CONFIG_PERF_COUNTERS
+# define INIT_PERF_COUNTERS(tsk) \
+ .perf_counter_mutex = \
+ __MUTEX_INITIALIZER(tsk.perf_counter_mutex), \
+ .perf_counter_list = LIST_HEAD_INIT(tsk.perf_counter_list),
+#else
+# define INIT_PERF_COUNTERS(tsk)
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -145,8 +154,8 @@ extern struct cred init_cred;
.group_leader = &tsk, \
.real_cred = &init_cred, \
.cred = &init_cred, \
- .cred_exec_mutex = \
- __MUTEX_INITIALIZER(tsk.cred_exec_mutex), \
+ .cred_guard_mutex = \
+ __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
.comm = "swapper", \
.thread = INIT_THREAD, \
.fs = &init_fs, \
@@ -171,9 +180,11 @@ extern struct cred init_cred;
}, \
.dirties = INIT_PROP_LOCAL_SINGLE(dirties), \
INIT_IDS \
+ INIT_PERF_COUNTERS(tsk) \
INIT_TRACE_IRQFLAGS \
INIT_LOCKDEP \
INIT_FTRACE_GRAPH \
+ INIT_TRACE_RECURSION \
}
diff --git a/include/linux/input.h b/include/linux/input.h
index 0e6ff5d..6fed4f6 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -656,6 +656,7 @@ struct input_absinfo {
#define ABS_MT_POSITION_Y 0x36 /* Center Y ellipse position */
#define ABS_MT_TOOL_TYPE 0x37 /* Type of touching device */
#define ABS_MT_BLOB_ID 0x38 /* Group a set of packets as a blob */
+#define ABS_MT_TRACKING_ID 0x39 /* Unique ID of initiated contact */
#define ABS_MAX 0x3f
#define ABS_CNT (ABS_MAX+1)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 91bb76f..c41e812 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -183,6 +183,7 @@ extern void disable_irq(unsigned int irq);
extern void enable_irq(unsigned int irq);
/* The following three functions are for the core kernel use only. */
+#ifdef CONFIG_GENERIC_HARDIRQS
extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
#ifdef CONFIG_PM_SLEEP
@@ -190,6 +191,11 @@ extern int check_wakeup_irqs(void);
#else
static inline int check_wakeup_irqs(void) { return 0; }
#endif
+#else
+static inline void suspend_device_irqs(void) { };
+static inline void resume_device_irqs(void) { };
+static inline int check_wakeup_irqs(void) { return 0; }
+#endif
#if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
@@ -566,6 +572,6 @@ struct irq_desc;
extern int early_irq_init(void);
extern int arch_probe_nr_irqs(void);
extern int arch_early_irq_init(void);
-extern int arch_init_chip_data(struct irq_desc *desc, int cpu);
+extern int arch_init_chip_data(struct irq_desc *desc, int node);
#endif
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 08b987b..dd05434 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -64,7 +64,7 @@ struct cfq_io_context {
* and kmalloc'ed. These could be shared between processes.
*/
struct io_context {
- atomic_t refcount;
+ atomic_long_t refcount;
atomic_t nr_tasks;
/* all the fields below are protected by this lock */
@@ -91,8 +91,8 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
* if ref count is zero, don't allow sharing (ioc is going away, it's
* a race).
*/
- if (ioc && atomic_inc_not_zero(&ioc->refcount)) {
- atomic_inc(&ioc->nr_tasks);
+ if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) {
+ atomic_long_inc(&ioc->refcount);
return ioc;
}
diff --git a/include/linux/irq.h b/include/linux/irq.h
index b7cbeed..1e50c34 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -117,7 +117,7 @@ struct irq_chip {
void (*eoi)(unsigned int irq);
void (*end)(unsigned int irq);
- void (*set_affinity)(unsigned int irq,
+ int (*set_affinity)(unsigned int irq,
const struct cpumask *dest);
int (*retrigger)(unsigned int irq);
int (*set_type)(unsigned int irq, unsigned int flow_type);
@@ -187,7 +187,7 @@ struct irq_desc {
spinlock_t lock;
#ifdef CONFIG_SMP
cpumask_var_t affinity;
- unsigned int cpu;
+ unsigned int node;
#ifdef CONFIG_GENERIC_PENDING_IRQ
cpumask_var_t pending_mask;
#endif
@@ -201,26 +201,23 @@ struct irq_desc {
} ____cacheline_internodealigned_in_smp;
extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
- struct irq_desc *desc, int cpu);
+ struct irq_desc *desc, int node);
extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
#ifndef CONFIG_SPARSE_IRQ
extern struct irq_desc irq_desc[NR_IRQS];
-#else /* CONFIG_SPARSE_IRQ */
-extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
-#endif /* CONFIG_SPARSE_IRQ */
-
-extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
+#endif
-static inline struct irq_desc *
-irq_remap_to_desc(unsigned int irq, struct irq_desc *desc)
-{
-#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
- return irq_to_desc(irq);
+#ifdef CONFIG_NUMA_IRQ_DESC
+extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node);
#else
+static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
+{
return desc;
-#endif
}
+#endif
+
+extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
/*
* Migration helpers for obsolete names, they will go away:
@@ -386,7 +383,7 @@ extern void set_irq_noprobe(unsigned int irq);
extern void set_irq_probe(unsigned int irq);
/* Handle dynamic irq creation and destruction */
-extern unsigned int create_irq_nr(unsigned int irq_want);
+extern unsigned int create_irq_nr(unsigned int irq_want, int node);
extern int create_irq(void);
extern void destroy_irq(unsigned int irq);
@@ -424,47 +421,44 @@ extern int set_irq_msi(unsigned int irq, struct msi_desc *entry);
#ifdef CONFIG_SMP
/**
- * init_alloc_desc_masks - allocate cpumasks for irq_desc
+ * alloc_desc_masks - allocate cpumasks for irq_desc
* @desc: pointer to irq_desc struct
* @cpu: cpu which will be handling the cpumasks
* @boot: true if need bootmem
*
* Allocates affinity and pending_mask cpumask if required.
* Returns true if successful (or not required).
- * Side effect: affinity has all bits set, pending_mask has all bits clear.
*/
-static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
- bool boot)
+static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
+ bool boot)
{
- int node;
-
- if (boot) {
- alloc_bootmem_cpumask_var(&desc->affinity);
- cpumask_setall(desc->affinity);
-
-#ifdef CONFIG_GENERIC_PENDING_IRQ
- alloc_bootmem_cpumask_var(&desc->pending_mask);
- cpumask_clear(desc->pending_mask);
-#endif
- return true;
- }
+ gfp_t gfp = GFP_ATOMIC;
- node = cpu_to_node(cpu);
+ if (boot)
+ gfp = GFP_NOWAIT;
- if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
+#ifdef CONFIG_CPUMASK_OFFSTACK
+ if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
return false;
- cpumask_setall(desc->affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ
- if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
+ if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
free_cpumask_var(desc->affinity);
return false;
}
- cpumask_clear(desc->pending_mask);
+#endif
#endif
return true;
}
+static inline void init_desc_masks(struct irq_desc *desc)
+{
+ cpumask_setall(desc->affinity);
+#ifdef CONFIG_GENERIC_PENDING_IRQ
+ cpumask_clear(desc->pending_mask);
+#endif
+}
+
/**
* init_copy_desc_masks - copy cpumasks for irq_desc
* @old_desc: pointer to old irq_desc struct
@@ -478,7 +472,7 @@ static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
struct irq_desc *new_desc)
{
-#ifdef CONFIG_CPUMASKS_OFFSTACK
+#ifdef CONFIG_CPUMASK_OFFSTACK
cpumask_copy(new_desc->affinity, old_desc->affinity);
#ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -499,12 +493,16 @@ static inline void free_desc_masks(struct irq_desc *old_desc,
#else /* !CONFIG_SMP */
-static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
+static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
bool boot)
{
return true;
}
+static inline void init_desc_masks(struct irq_desc *desc)
+{
+}
+
static inline void init_copy_desc_masks(struct irq_desc *old_desc,
struct irq_desc *new_desc)
{
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 0c8b89f..a77c600 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -81,7 +81,12 @@ static inline unsigned int kstat_irqs(unsigned int irq)
return sum;
}
+
+/*
+ * Lock/unlock the current runqueue - to extract task statistics:
+ */
extern unsigned long long task_delta_exec(struct task_struct *);
+
extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
extern void account_steal_time(cputime_t);
diff --git a/include/linux/keyboard.h b/include/linux/keyboard.h
index a3c984d..33a63f6 100644
--- a/include/linux/keyboard.h
+++ b/include/linux/keyboard.h
@@ -56,6 +56,7 @@ extern int unregister_keyboard_notifier(struct notifier_block *nb);
#define KT_ASCII 9
#define KT_LOCK 10
#define KT_SLOCK 12
+#define KT_DEAD2 13
#define KT_BRL 14
#define K(t,v) (((t)<<8)|(v))
diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h
new file mode 100644
index 0000000..7796aed
--- /dev/null
+++ b/include/linux/kmemleak.h
@@ -0,0 +1,96 @@
+/*
+ * include/linux/kmemleak.h
+ *
+ * Copyright (C) 2008 ARM Limited
+ * Written by Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __KMEMLEAK_H
+#define __KMEMLEAK_H
+
+#ifdef CONFIG_DEBUG_KMEMLEAK
+
+extern void kmemleak_init(void);
+extern void kmemleak_alloc(const void *ptr, size_t size, int min_count,
+ gfp_t gfp);
+extern void kmemleak_free(const void *ptr);
+extern void kmemleak_padding(const void *ptr, unsigned long offset,
+ size_t size);
+extern void kmemleak_not_leak(const void *ptr);
+extern void kmemleak_ignore(const void *ptr);
+extern void kmemleak_scan_area(const void *ptr, unsigned long offset,
+ size_t length, gfp_t gfp);
+extern void kmemleak_no_scan(const void *ptr);
+
+static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
+ int min_count, unsigned long flags,
+ gfp_t gfp)
+{
+ if (!(flags & SLAB_NOLEAKTRACE))
+ kmemleak_alloc(ptr, size, min_count, gfp);
+}
+
+static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+{
+ if (!(flags & SLAB_NOLEAKTRACE))
+ kmemleak_free(ptr);
+}
+
+static inline void kmemleak_erase(void **ptr)
+{
+ *ptr = NULL;
+}
+
+#else
+
+static inline void kmemleak_init(void)
+{
+}
+static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count,
+ gfp_t gfp)
+{
+}
+static inline void kmemleak_alloc_recursive(const void *ptr, size_t size,
+ int min_count, unsigned long flags,
+ gfp_t gfp)
+{
+}
+static inline void kmemleak_free(const void *ptr)
+{
+}
+static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags)
+{
+}
+static inline void kmemleak_not_leak(const void *ptr)
+{
+}
+static inline void kmemleak_ignore(const void *ptr)
+{
+}
+static inline void kmemleak_scan_area(const void *ptr, unsigned long offset,
+ size_t length, gfp_t gfp)
+{
+}
+static inline void kmemleak_erase(void **ptr)
+{
+}
+static inline void kmemleak_no_scan(const void *ptr)
+{
+}
+
+#endif /* CONFIG_DEBUG_KMEMLEAK */
+
+#endif /* __KMEMLEAK_H */
diff --git a/include/linux/kmemtrace.h b/include/linux/kmemtrace.h
new file mode 100644
index 0000000..b616d39
--- /dev/null
+++ b/include/linux/kmemtrace.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2008 Eduard - Gabriel Munteanu
+ *
+ * This file is released under GPL version 2.
+ */
+
+#ifndef _LINUX_KMEMTRACE_H
+#define _LINUX_KMEMTRACE_H
+
+#ifdef __KERNEL__
+
+#include <trace/events/kmem.h>
+
+#ifdef CONFIG_KMEMTRACE
+extern void kmemtrace_init(void);
+#else
+static inline void kmemtrace_init(void)
+{
+}
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* _LINUX_KMEMTRACE_H */
+
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 8cc1379..3db5d8d 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -119,7 +119,7 @@ struct kvm_run {
__u32 error_code;
} ex;
/* KVM_EXIT_IO */
- struct kvm_io {
+ struct {
#define KVM_EXIT_IO_IN 0
#define KVM_EXIT_IO_OUT 1
__u8 direction;
@@ -224,10 +224,10 @@ struct kvm_interrupt {
/* for KVM_GET_DIRTY_LOG */
struct kvm_dirty_log {
__u32 slot;
- __u32 padding;
+ __u32 padding1;
union {
void __user *dirty_bitmap; /* one bit per page */
- __u64 padding;
+ __u64 padding2;
};
};
@@ -409,6 +409,10 @@ struct kvm_trace_rec {
#ifdef __KVM_HAVE_DEVICE_ASSIGNMENT
#define KVM_CAP_DEVICE_DEASSIGNMENT 27
#endif
+#ifdef __KVM_HAVE_MSIX
+#define KVM_CAP_DEVICE_MSIX 28
+#endif
+#define KVM_CAP_ASSIGN_DEV_IRQ 29
/* Another bug in KVM_SET_USER_MEMORY_REGION fixed: */
#define KVM_CAP_JOIN_MEMORY_REGIONS_WORKS 30
@@ -482,11 +486,18 @@ struct kvm_irq_routing {
#define KVM_ASSIGN_PCI_DEVICE _IOR(KVMIO, 0x69, \
struct kvm_assigned_pci_dev)
#define KVM_SET_GSI_ROUTING _IOW(KVMIO, 0x6a, struct kvm_irq_routing)
+/* deprecated, replaced by KVM_ASSIGN_DEV_IRQ */
#define KVM_ASSIGN_IRQ _IOR(KVMIO, 0x70, \
struct kvm_assigned_irq)
+#define KVM_ASSIGN_DEV_IRQ _IOW(KVMIO, 0x70, struct kvm_assigned_irq)
#define KVM_REINJECT_CONTROL _IO(KVMIO, 0x71)
#define KVM_DEASSIGN_PCI_DEVICE _IOW(KVMIO, 0x72, \
struct kvm_assigned_pci_dev)
+#define KVM_ASSIGN_SET_MSIX_NR \
+ _IOW(KVMIO, 0x73, struct kvm_assigned_msix_nr)
+#define KVM_ASSIGN_SET_MSIX_ENTRY \
+ _IOW(KVMIO, 0x74, struct kvm_assigned_msix_entry)
+#define KVM_DEASSIGN_DEV_IRQ _IOW(KVMIO, 0x75, struct kvm_assigned_irq)
/*
* ioctls for vcpu fds
@@ -577,6 +588,8 @@ struct kvm_debug_guest {
#define KVM_TRC_STLB_INVAL (KVM_TRC_HANDLER + 0x18)
#define KVM_TRC_PPC_INSTR (KVM_TRC_HANDLER + 0x19)
+#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
+
struct kvm_assigned_pci_dev {
__u32 assigned_dev_id;
__u32 busnr;
@@ -587,6 +600,17 @@ struct kvm_assigned_pci_dev {
};
};
+#define KVM_DEV_IRQ_HOST_INTX (1 << 0)
+#define KVM_DEV_IRQ_HOST_MSI (1 << 1)
+#define KVM_DEV_IRQ_HOST_MSIX (1 << 2)
+
+#define KVM_DEV_IRQ_GUEST_INTX (1 << 8)
+#define KVM_DEV_IRQ_GUEST_MSI (1 << 9)
+#define KVM_DEV_IRQ_GUEST_MSIX (1 << 10)
+
+#define KVM_DEV_IRQ_HOST_MASK 0x00ff
+#define KVM_DEV_IRQ_GUEST_MASK 0xff00
+
struct kvm_assigned_irq {
__u32 assigned_dev_id;
__u32 host_irq;
@@ -602,9 +626,19 @@ struct kvm_assigned_irq {
};
};
-#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
-#define KVM_DEV_IRQ_ASSIGN_MSI_ACTION KVM_DEV_IRQ_ASSIGN_ENABLE_MSI
-#define KVM_DEV_IRQ_ASSIGN_ENABLE_MSI (1 << 0)
+struct kvm_assigned_msix_nr {
+ __u32 assigned_dev_id;
+ __u16 entry_nr;
+ __u16 padding;
+};
+
+#define KVM_MAX_MSIX_PER_DEV 512
+struct kvm_assigned_msix_entry {
+ __u32 assigned_dev_id;
+ __u32 gsi;
+ __u16 entry; /* The index of entry in the MSI-X table */
+ __u16 padding[3];
+};
#endif
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 894a56e..aacc544 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -38,6 +38,7 @@
#define KVM_REQ_UNHALT 6
#define KVM_REQ_MMU_SYNC 7
#define KVM_REQ_KVMCLOCK_UPDATE 8
+#define KVM_REQ_KICK 9
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
@@ -72,7 +73,6 @@ struct kvm_vcpu {
struct mutex mutex;
int cpu;
struct kvm_run *run;
- int guest_mode;
unsigned long requests;
unsigned long guest_debug;
int fpu_active;
@@ -298,6 +298,7 @@ int kvm_arch_hardware_setup(void);
void kvm_arch_hardware_unsetup(void);
void kvm_arch_check_processor_compat(void *rtn);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
+int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu);
void kvm_free_physmem(struct kvm *kvm);
@@ -319,6 +320,13 @@ struct kvm_irq_ack_notifier {
void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
};
+#define KVM_ASSIGNED_MSIX_PENDING 0x1
+struct kvm_guest_msix_entry {
+ u32 vector;
+ u16 entry;
+ u16 flags;
+};
+
struct kvm_assigned_dev_kernel {
struct kvm_irq_ack_notifier ack_notifier;
struct work_struct interrupt_work;
@@ -326,18 +334,18 @@ struct kvm_assigned_dev_kernel {
int assigned_dev_id;
int host_busnr;
int host_devfn;
+ unsigned int entries_nr;
int host_irq;
bool host_irq_disabled;
+ struct msix_entry *host_msix_entries;
int guest_irq;
-#define KVM_ASSIGNED_DEV_GUEST_INTX (1 << 0)
-#define KVM_ASSIGNED_DEV_GUEST_MSI (1 << 1)
-#define KVM_ASSIGNED_DEV_HOST_INTX (1 << 8)
-#define KVM_ASSIGNED_DEV_HOST_MSI (1 << 9)
+ struct kvm_guest_msix_entry *guest_msix_entries;
unsigned long irq_requested_type;
int irq_source_id;
int flags;
struct pci_dev *dev;
struct kvm *kvm;
+ spinlock_t assigned_dev_lock;
};
struct kvm_irq_mask_notifier {
@@ -360,6 +368,9 @@ void kvm_unregister_irq_ack_notifier(struct kvm_irq_ack_notifier *kian);
int kvm_request_irq_source_id(struct kvm *kvm);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
+/* For vcpu->arch.iommu_flags */
+#define KVM_IOMMU_CACHE_COHERENCY 0x1
+
#ifdef CONFIG_IOMMU_API
int kvm_iommu_map_pages(struct kvm *kvm, gfn_t base_gfn,
unsigned long npages);
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index 2b8318c..fb46efb 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -40,4 +40,31 @@ typedef unsigned long hfn_t;
typedef hfn_t pfn_t;
+union kvm_ioapic_redirect_entry {
+ u64 bits;
+ struct {
+ u8 vector;
+ u8 delivery_mode:3;
+ u8 dest_mode:1;
+ u8 delivery_status:1;
+ u8 polarity:1;
+ u8 remote_irr:1;
+ u8 trig_mode:1;
+ u8 mask:1;
+ u8 reserve:7;
+ u8 reserved[4];
+ u8 dest_id;
+ } fields;
+};
+
+struct kvm_lapic_irq {
+ u32 vector;
+ u32 delivery_mode;
+ u32 dest_mode;
+ u32 level;
+ u32 trig_mode;
+ u32 shorthand;
+ u32 dest_id;
+};
+
#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/lguest.h b/include/linux/lguest.h
index 175e63f..7bc1440 100644
--- a/include/linux/lguest.h
+++ b/include/linux/lguest.h
@@ -30,6 +30,10 @@ struct lguest_data
/* Wallclock time set by the Host. */
struct timespec time;
+ /* Interrupt pending set by the Host. The Guest should do a hypercall
+ * if it re-enables interrupts and sees this set (to X86_EFLAGS_IF). */
+ int irq_pending;
+
/* Async hypercall ring. Instead of directly making hypercalls, we can
* place them in here for processing the next time the Host wants.
* This batching can be quite efficient. */
diff --git a/include/linux/lguest_launcher.h b/include/linux/lguest_launcher.h
index a53407a..bfefbdf 100644
--- a/include/linux/lguest_launcher.h
+++ b/include/linux/lguest_launcher.h
@@ -57,7 +57,8 @@ enum lguest_req
LHREQ_INITIALIZE, /* + base, pfnlimit, start */
LHREQ_GETDMA, /* No longer used */
LHREQ_IRQ, /* + irq */
- LHREQ_BREAK, /* + on/off flag (on blocks until someone does off) */
+ LHREQ_BREAK, /* No longer used */
+ LHREQ_EVENTFD, /* + address, fd. */
};
/* The alignment to use between consumer and producer parts of vring.
diff --git a/include/linux/loop.h b/include/linux/loop.h
index 4072544..66c194e 100644
--- a/include/linux/loop.h
+++ b/include/linux/loop.h
@@ -56,8 +56,7 @@ struct loop_device {
gfp_t old_gfp_mask;
spinlock_t lo_lock;
- struct bio *lo_bio;
- struct bio *lo_biotail;
+ struct bio_list lo_bio_list;
int lo_state;
struct mutex lo_ctl_mutex;
struct task_struct *lo_thread;
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
new file mode 100644
index 0000000..e461b2c
--- /dev/null
+++ b/include/linux/lsm_audit.h
@@ -0,0 +1,111 @@
+/*
+ * Common LSM logging functions
+ * Heavily borrowed from selinux/avc.h
+ *
+ * Author : Etienne BASSET <etienne.basset@ensta.org>
+ *
+ * All credits to : Stephen Smalley, <sds@epoch.ncsc.mil>
+ * All BUGS to : Etienne BASSET <etienne.basset@ensta.org>
+ */
+#ifndef _LSM_COMMON_LOGGING_
+#define _LSM_COMMON_LOGGING_
+
+#include <linux/stddef.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/kdev_t.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/audit.h>
+#include <linux/in6.h>
+#include <linux/path.h>
+#include <linux/key.h>
+#include <linux/skbuff.h>
+#include <asm/system.h>
+
+
+/* Auxiliary data to use in generating the audit record. */
+struct common_audit_data {
+ char type;
+#define LSM_AUDIT_DATA_FS 1
+#define LSM_AUDIT_DATA_NET 2
+#define LSM_AUDIT_DATA_CAP 3
+#define LSM_AUDIT_DATA_IPC 4
+#define LSM_AUDIT_DATA_TASK 5
+#define LSM_AUDIT_DATA_KEY 6
+ struct task_struct *tsk;
+ union {
+ struct {
+ struct path path;
+ struct inode *inode;
+ } fs;
+ struct {
+ int netif;
+ struct sock *sk;
+ u16 family;
+ __be16 dport;
+ __be16 sport;
+ union {
+ struct {
+ __be32 daddr;
+ __be32 saddr;
+ } v4;
+ struct {
+ struct in6_addr daddr;
+ struct in6_addr saddr;
+ } v6;
+ } fam;
+ } net;
+ int cap;
+ int ipc_id;
+ struct task_struct *tsk;
+#ifdef CONFIG_KEYS
+ struct {
+ key_serial_t key;
+ char *key_desc;
+ } key_struct;
+#endif
+ } u;
+ const char *function;
+ /* this union contains LSM specific data */
+ union {
+ /* SMACK data */
+ struct smack_audit_data {
+ char *subject;
+ char *object;
+ char *request;
+ int result;
+ } smack_audit_data;
+ /* SELinux data */
+ struct {
+ u32 ssid;
+ u32 tsid;
+ u16 tclass;
+ u32 requested;
+ u32 audited;
+ struct av_decision *avd;
+ int result;
+ } selinux_audit_data;
+ } lsm_priv;
+ /* these callback will be implemented by a specific LSM */
+ void (*lsm_pre_audit)(struct audit_buffer *, void *);
+ void (*lsm_post_audit)(struct audit_buffer *, void *);
+};
+
+#define v4info fam.v4
+#define v6info fam.v6
+
+int ipv4_skb_to_auditdata(struct sk_buff *skb,
+ struct common_audit_data *ad, u8 *proto);
+
+int ipv6_skb_to_auditdata(struct sk_buff *skb,
+ struct common_audit_data *ad, u8 *proto);
+
+/* Initialize an LSM audit data structure. */
+#define COMMON_AUDIT_DATA_INIT(_d, _t) \
+ { memset((_d), 0, sizeof(struct common_audit_data)); \
+ (_d)->type = LSM_AUDIT_DATA_##_t; (_d)->function = __func__; }
+
+void common_lsm_audit(struct common_audit_data *a);
+
+#endif
diff --git a/include/linux/magic.h b/include/linux/magic.h
index 5b4e28b..1923327 100644
--- a/include/linux/magic.h
+++ b/include/linux/magic.h
@@ -6,9 +6,12 @@
#define AFS_SUPER_MAGIC 0x5346414F
#define AUTOFS_SUPER_MAGIC 0x0187
#define CODA_SUPER_MAGIC 0x73757245
+#define CRAMFS_MAGIC 0x28cd3d45 /* some random number */
+#define CRAMFS_MAGIC_WEND 0x453dcd28 /* magic number with the wrong endianess */
#define DEBUGFS_MAGIC 0x64626720
#define SYSFS_MAGIC 0x62656572
#define SECURITYFS_MAGIC 0x73636673
+#define SELINUX_MAGIC 0xf97cff8c
#define TMPFS_MAGIC 0x01021994
#define SQUASHFS_MAGIC 0x73717368
#define EFS_SUPER_MAGIC 0x414A53
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 516d955..c377118 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -19,6 +19,13 @@
} while (0)
/*
+ * data for the MMC controller
+ */
+struct tmio_mmc_data {
+ unsigned int hclk;
+};
+
+/*
* data for the NAND controller
*/
struct tmio_nand_data {
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h
deleted file mode 100644
index 1f76b1e..0000000
--- a/include/linux/mg_disk.h
+++ /dev/null
@@ -1,206 +0,0 @@
-/*
- * include/linux/mg_disk.c
- *
- * Support for the mGine m[g]flash IO mode.
- * Based on legacy hd.c
- *
- * (c) 2008 mGine Co.,LTD
- * (c) 2008 unsik Kim <donari75@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MG_DISK_H__
-#define __MG_DISK_H__
-
-#include <linux/blkdev.h>
-#include <linux/ata.h>
-
-/* name for block device */
-#define MG_DISK_NAME "mgd"
-/* name for platform device */
-#define MG_DEV_NAME "mg_disk"
-
-#define MG_DISK_MAJ 0
-#define MG_DISK_MAX_PART 16
-#define MG_SECTOR_SIZE 512
-#define MG_MAX_SECTS 256
-
-/* Register offsets */
-#define MG_BUFF_OFFSET 0x8000
-#define MG_STORAGE_BUFFER_SIZE 0x200
-#define MG_REG_OFFSET 0xC000
-#define MG_REG_FEATURE (MG_REG_OFFSET + 2) /* write case */
-#define MG_REG_ERROR (MG_REG_OFFSET + 2) /* read case */
-#define MG_REG_SECT_CNT (MG_REG_OFFSET + 4)
-#define MG_REG_SECT_NUM (MG_REG_OFFSET + 6)
-#define MG_REG_CYL_LOW (MG_REG_OFFSET + 8)
-#define MG_REG_CYL_HIGH (MG_REG_OFFSET + 0xA)
-#define MG_REG_DRV_HEAD (MG_REG_OFFSET + 0xC)
-#define MG_REG_COMMAND (MG_REG_OFFSET + 0xE) /* write case */
-#define MG_REG_STATUS (MG_REG_OFFSET + 0xE) /* read case */
-#define MG_REG_DRV_CTRL (MG_REG_OFFSET + 0x10)
-#define MG_REG_BURST_CTRL (MG_REG_OFFSET + 0x12)
-
-/* "Drive Select/Head Register" bit values */
-#define MG_REG_HEAD_MUST_BE_ON 0xA0 /* These 2 bits are always on */
-#define MG_REG_HEAD_DRIVE_MASTER (0x00 | MG_REG_HEAD_MUST_BE_ON)
-#define MG_REG_HEAD_DRIVE_SLAVE (0x10 | MG_REG_HEAD_MUST_BE_ON)
-#define MG_REG_HEAD_LBA_MODE (0x40 | MG_REG_HEAD_MUST_BE_ON)
-
-
-/* "Device Control Register" bit values */
-#define MG_REG_CTRL_INTR_ENABLE 0x0
-#define MG_REG_CTRL_INTR_DISABLE (0x1<<1)
-#define MG_REG_CTRL_RESET (0x1<<2)
-#define MG_REG_CTRL_INTR_POLA_ACTIVE_HIGH 0x0
-#define MG_REG_CTRL_INTR_POLA_ACTIVE_LOW (0x1<<4)
-#define MG_REG_CTRL_DPD_POLA_ACTIVE_LOW 0x0
-#define MG_REG_CTRL_DPD_POLA_ACTIVE_HIGH (0x1<<5)
-#define MG_REG_CTRL_DPD_DISABLE 0x0
-#define MG_REG_CTRL_DPD_ENABLE (0x1<<6)
-
-/* Status register bit */
-/* error bit in status register */
-#define MG_REG_STATUS_BIT_ERROR 0x01
-/* corrected error in status register */
-#define MG_REG_STATUS_BIT_CORRECTED_ERROR 0x04
-/* data request bit in status register */
-#define MG_REG_STATUS_BIT_DATA_REQ 0x08
-/* DSC - Drive Seek Complete */
-#define MG_REG_STATUS_BIT_SEEK_DONE 0x10
-/* DWF - Drive Write Fault */
-#define MG_REG_STATUS_BIT_WRITE_FAULT 0x20
-#define MG_REG_STATUS_BIT_READY 0x40
-#define MG_REG_STATUS_BIT_BUSY 0x80
-
-/* handy status */
-#define MG_STAT_READY (MG_REG_STATUS_BIT_READY | MG_REG_STATUS_BIT_SEEK_DONE)
-#define MG_READY_OK(s) (((s) & (MG_STAT_READY | \
- (MG_REG_STATUS_BIT_BUSY | \
- MG_REG_STATUS_BIT_WRITE_FAULT | \
- MG_REG_STATUS_BIT_ERROR))) == MG_STAT_READY)
-
-/* Error register */
-#define MG_REG_ERR_AMNF 0x01
-#define MG_REG_ERR_ABRT 0x04
-#define MG_REG_ERR_IDNF 0x10
-#define MG_REG_ERR_UNC 0x40
-#define MG_REG_ERR_BBK 0x80
-
-/* error code for others */
-#define MG_ERR_NONE 0
-#define MG_ERR_TIMEOUT 0x100
-#define MG_ERR_INIT_STAT 0x101
-#define MG_ERR_TRANSLATION 0x102
-#define MG_ERR_CTRL_RST 0x103
-#define MG_ERR_INV_STAT 0x104
-#define MG_ERR_RSTOUT 0x105
-
-#define MG_MAX_ERRORS 6 /* Max read/write errors */
-
-/* command */
-#define MG_CMD_RD 0x20
-#define MG_CMD_WR 0x30
-#define MG_CMD_SLEEP 0x99
-#define MG_CMD_WAKEUP 0xC3
-#define MG_CMD_ID 0xEC
-#define MG_CMD_WR_CONF 0x3C
-#define MG_CMD_RD_CONF 0x40
-
-/* operation mode */
-#define MG_OP_CASCADE (1 << 0)
-#define MG_OP_CASCADE_SYNC_RD (1 << 1)
-#define MG_OP_CASCADE_SYNC_WR (1 << 2)
-#define MG_OP_INTERLEAVE (1 << 3)
-
-/* synchronous */
-#define MG_BURST_LAT_4 (3 << 4)
-#define MG_BURST_LAT_5 (4 << 4)
-#define MG_BURST_LAT_6 (5 << 4)
-#define MG_BURST_LAT_7 (6 << 4)
-#define MG_BURST_LAT_8 (7 << 4)
-#define MG_BURST_LEN_4 (1 << 1)
-#define MG_BURST_LEN_8 (2 << 1)
-#define MG_BURST_LEN_16 (3 << 1)
-#define MG_BURST_LEN_32 (4 << 1)
-#define MG_BURST_LEN_CONT (0 << 1)
-
-/* timeout value (unit: ms) */
-#define MG_TMAX_CONF_TO_CMD 1
-#define MG_TMAX_WAIT_RD_DRQ 10
-#define MG_TMAX_WAIT_WR_DRQ 500
-#define MG_TMAX_RST_TO_BUSY 10
-#define MG_TMAX_HDRST_TO_RDY 500
-#define MG_TMAX_SWRST_TO_RDY 500
-#define MG_TMAX_RSTOUT 3000
-
-/* device attribution */
-/* use mflash as boot device */
-#define MG_BOOT_DEV (1 << 0)
-/* use mflash as storage device */
-#define MG_STORAGE_DEV (1 << 1)
-/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
-#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
-
-#define MG_DEV_MASK (MG_BOOT_DEV | MG_STORAGE_DEV | MG_STORAGE_DEV_SKIP_RST)
-
-/* names of GPIO resource */
-#define MG_RST_PIN "mg_rst"
-/* except MG_BOOT_DEV, reset-out pin should be assigned */
-#define MG_RSTOUT_PIN "mg_rstout"
-
-/* private driver data */
-struct mg_drv_data {
- /* disk resource */
- u32 use_polling;
-
- /* device attribution */
- u32 dev_attr;
-
- /* internally used */
- struct mg_host *host;
-};
-
-/* main structure for mflash driver */
-struct mg_host {
- struct device *dev;
-
- struct request_queue *breq;
- spinlock_t lock;
- struct gendisk *gd;
-
- struct timer_list timer;
- void (*mg_do_intr) (struct mg_host *);
-
- u16 id[ATA_ID_WORDS];
-
- u16 cyls;
- u16 heads;
- u16 sectors;
- u32 n_sectors;
- u32 nres_sectors;
-
- void __iomem *dev_base;
- unsigned int irq;
- unsigned int rst;
- unsigned int rstout;
-
- u32 major;
- u32 error;
-};
-
-/*
- * Debugging macro and defines
- */
-#undef DO_MG_DEBUG
-#ifdef DO_MG_DEBUG
-# define MG_DBG(fmt, args...) \
- printk(KERN_DEBUG "%s:%d "fmt, __func__, __LINE__, ##args)
-#else /* CONFIG_MG_DEBUG */
-# define MG_DBG(fmt, args...) do { } while (0)
-#endif /* CONFIG_MG_DEBUG */
-
-#endif
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 3aff8a6..ce7cc6c 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -210,6 +210,7 @@ struct mlx4_caps {
int num_comp_vectors;
int num_mpts;
int num_mtt_segs;
+ int mtts_per_seg;
int fmr_reserved_mtts;
int reserved_mtts;
int reserved_mrws;
diff --git a/include/linux/mlx4/qp.h b/include/linux/mlx4/qp.h
index bf8f119..9f29d86 100644
--- a/include/linux/mlx4/qp.h
+++ b/include/linux/mlx4/qp.h
@@ -165,6 +165,7 @@ enum {
MLX4_WQE_CTRL_IP_CSUM = 1 << 4,
MLX4_WQE_CTRL_TCP_UDP_CSUM = 1 << 5,
MLX4_WQE_CTRL_INS_VLAN = 1 << 6,
+ MLX4_WQE_CTRL_STRONG_ORDER = 1 << 7,
};
struct mlx4_wqe_ctrl_seg {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bff1f0d..ad613ed 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -19,6 +19,7 @@ struct anon_vma;
struct file_ra_state;
struct user_struct;
struct writeback_control;
+struct rlimit;
#ifndef CONFIG_DISCONTIGMEM /* Don't use mapnrs, do it properly */
extern unsigned long max_mapnr;
@@ -580,12 +581,10 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
*/
static inline unsigned long round_hint_to_min(unsigned long hint)
{
-#ifdef CONFIG_SECURITY
hint &= PAGE_MASK;
if (((void *)hint != NULL) &&
(hint < mmap_min_addr))
return PAGE_ALIGN(mmap_min_addr);
-#endif
return hint;
}
@@ -1031,8 +1030,6 @@ extern void add_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn);
extern void remove_active_range(unsigned int nid, unsigned long start_pfn,
unsigned long end_pfn);
-extern void push_node_boundaries(unsigned int nid, unsigned long start_pfn,
- unsigned long end_pfn);
extern void remove_all_active_ranges(void);
extern unsigned long absent_pages_in_range(unsigned long start_pfn,
unsigned long end_pfn);
@@ -1319,8 +1316,8 @@ int vmemmap_populate_basepages(struct page *start_page,
int vmemmap_populate(struct page *start_page, unsigned long pages, int node);
void vmemmap_populate_print_last(void);
-extern void *alloc_locked_buffer(size_t size);
-extern void free_locked_buffer(void *buffer, size_t size);
-extern void release_locked_buffer(void *buffer, size_t size);
+extern int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
+ size_t size);
+extern void refund_locked_memory(struct mm_struct *mm, size_t size);
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
index 3d1b7bd..97491f7 100644
--- a/include/linux/mmiotrace.h
+++ b/include/linux/mmiotrace.h
@@ -30,6 +30,8 @@ extern unsigned int kmmio_count;
extern int register_kmmio_probe(struct kmmio_probe *p);
extern void unregister_kmmio_probe(struct kmmio_probe *p);
+extern int kmmio_init(void);
+extern void kmmio_cleanup(void);
#ifdef CONFIG_MMIOTRACE
/* kmmio is active by some kmmio_probes? */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 186ec6a..a47c879 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -1097,6 +1097,32 @@ unsigned long __init node_memmap_size_bytes(int, unsigned long, unsigned long);
#define pfn_valid_within(pfn) (1)
#endif
+#ifdef CONFIG_ARCH_HAS_HOLES_MEMORYMODEL
+/*
+ * pfn_valid() is meant to be able to tell if a given PFN has valid memmap
+ * associated with it or not. In FLATMEM, it is expected that holes always
+ * have valid memmap as long as there is valid PFNs either side of the hole.
+ * In SPARSEMEM, it is assumed that a valid section has a memmap for the
+ * entire section.
+ *
+ * However, an ARM, and maybe other embedded architectures in the future
+ * free memmap backing holes to save memory on the assumption the memmap is
+ * never used. The page_zone linkages are then broken even though pfn_valid()
+ * returns true. A walker of the full memmap must then do this additional
+ * check to ensure the memmap they are looking at is sane by making sure
+ * the zone and PFN linkages are still valid. This is expensive, but walkers
+ * of the full memmap are extremely rare.
+ */
+int memmap_valid_within(unsigned long pfn,
+ struct page *page, struct zone *zone);
+#else
+static inline int memmap_valid_within(unsigned long pfn,
+ struct page *page, struct zone *zone)
+{
+ return 1;
+}
+#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
+
#endif /* !__GENERATING_BOUNDS.H */
#endif /* !__ASSEMBLY__ */
#endif /* _LINUX_MMZONE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 627ac08..a7bc6e7 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -77,6 +77,7 @@ search_extable(const struct exception_table_entry *first,
void sort_extable(struct exception_table_entry *start,
struct exception_table_entry *finish);
void sort_main_extable(void);
+void trim_init_extable(struct module *m);
#ifdef MODULE
#define MODULE_GENERIC_TABLE(gtype,name) \
@@ -337,6 +338,14 @@ struct module
const char **trace_bprintk_fmt_start;
unsigned int num_trace_bprintk_fmt;
#endif
+#ifdef CONFIG_EVENT_TRACING
+ struct ftrace_event_call *trace_events;
+ unsigned int num_trace_events;
+#endif
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+ unsigned long *ftrace_callsites;
+ unsigned int num_ftrace_callsites;
+#endif
#ifdef CONFIG_MODULE_UNLOAD
/* What modules depend on me? */
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index a4f0b93..6547c3c 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -36,9 +36,14 @@ typedef int (*param_set_fn)(const char *val, struct kernel_param *kp);
/* Returns length written or -errno. Buffer is 4k (ie. be short!) */
typedef int (*param_get_fn)(char *buffer, struct kernel_param *kp);
+/* Flag bits for kernel_param.flags */
+#define KPARAM_KMALLOCED 1
+#define KPARAM_ISBOOL 2
+
struct kernel_param {
const char *name;
- unsigned int perm;
+ u16 perm;
+ u16 flags;
param_set_fn set;
param_get_fn get;
union {
@@ -79,7 +84,7 @@ struct kparam_array
parameters. perm sets the visibility in sysfs: 000 means it's
not there, read bits mean it's readable, write bits mean it's
writable. */
-#define __module_param_call(prefix, name, set, get, arg, perm) \
+#define __module_param_call(prefix, name, set, get, arg, isbool, perm) \
/* Default value instead of permissions? */ \
static int __param_perm_check_##name __attribute__((unused)) = \
BUILD_BUG_ON_ZERO((perm) < 0 || (perm) > 0777 || ((perm) & 2)) \
@@ -88,10 +93,13 @@ struct kparam_array
static struct kernel_param __moduleparam_const __param_##name \
__used \
__attribute__ ((unused,__section__ ("__param"),aligned(sizeof(void *)))) \
- = { __param_str_##name, perm, set, get, { arg } }
+ = { __param_str_##name, perm, isbool ? KPARAM_ISBOOL : 0, \
+ set, get, { arg } }
#define module_param_call(name, set, get, arg, perm) \
- __module_param_call(MODULE_PARAM_PREFIX, name, set, get, arg, perm)
+ __module_param_call(MODULE_PARAM_PREFIX, \
+ name, set, get, arg, \
+ __same_type(*(arg), bool), perm)
/* Helper functions: type is byte, short, ushort, int, uint, long,
ulong, charp, bool or invbool, or XXX if you define param_get_XXX,
@@ -120,15 +128,16 @@ struct kparam_array
#define core_param(name, var, type, perm) \
param_check_##type(name, &(var)); \
__module_param_call("", name, param_set_##type, param_get_##type, \
- &var, perm)
+ &var, __same_type(var, bool), perm)
#endif /* !MODULE */
/* Actually copy string: maxlen param is usually sizeof(string). */
#define module_param_string(name, string, len, perm) \
static const struct kparam_string __param_string_##name \
= { len, string }; \
- module_param_call(name, param_set_copystring, param_get_string, \
- .str = &__param_string_##name, perm); \
+ __module_param_call(MODULE_PARAM_PREFIX, name, \
+ param_set_copystring, param_get_string, \
+ .str = &__param_string_##name, 0, perm); \
__MODULE_PARM_TYPE(name, "string")
/* Called on module insert or kernel boot */
@@ -186,21 +195,30 @@ extern int param_set_charp(const char *val, struct kernel_param *kp);
extern int param_get_charp(char *buffer, struct kernel_param *kp);
#define param_check_charp(name, p) __param_check(name, p, char *)
+/* For historical reasons "bool" parameters can be (unsigned) "int". */
extern int param_set_bool(const char *val, struct kernel_param *kp);
extern int param_get_bool(char *buffer, struct kernel_param *kp);
-#define param_check_bool(name, p) __param_check(name, p, int)
+#define param_check_bool(name, p) \
+ static inline void __check_##name(void) \
+ { \
+ BUILD_BUG_ON(!__same_type(*(p), bool) && \
+ !__same_type(*(p), unsigned int) && \
+ !__same_type(*(p), int)); \
+ }
extern int param_set_invbool(const char *val, struct kernel_param *kp);
extern int param_get_invbool(char *buffer, struct kernel_param *kp);
-#define param_check_invbool(name, p) __param_check(name, p, int)
+#define param_check_invbool(name, p) __param_check(name, p, bool)
/* Comma-separated array: *nump is set to number they actually specified. */
#define module_param_array_named(name, array, type, nump, perm) \
static const struct kparam_array __param_arr_##name \
= { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
sizeof(array[0]), array }; \
- module_param_call(name, param_array_set, param_array_get, \
- .arr = &__param_arr_##name, perm); \
+ __module_param_call(MODULE_PARAM_PREFIX, name, \
+ param_array_set, param_array_get, \
+ .arr = &__param_arr_##name, \
+ __same_type(array[0], bool), perm); \
__MODULE_PARM_TYPE(name, "array of " #type)
#define module_param_array(name, type, nump, perm) \
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 51f55f9..5d52753 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -30,7 +30,7 @@ struct mnt_namespace;
#define MNT_STRICTATIME 0x80
#define MNT_SHRINKABLE 0x100
-#define MNT_IMBALANCED_WRITE_COUNT 0x200 /* just for debugging */
+#define MNT_WRITE_HOLD 0x200
#define MNT_SHARED 0x1000 /* if the vfsmount is a shared mount */
#define MNT_UNBINDABLE 0x2000 /* if the vfsmount is a unbindable mount */
@@ -65,13 +65,22 @@ struct vfsmount {
int mnt_expiry_mark; /* true if marked for expiry */
int mnt_pinned;
int mnt_ghosts;
- /*
- * This value is not stable unless all of the mnt_writers[] spinlocks
- * are held, and all mnt_writer[]s on this mount have 0 as their ->count
- */
- atomic_t __mnt_writers;
+#ifdef CONFIG_SMP
+ int *mnt_writers;
+#else
+ int mnt_writers;
+#endif
};
+static inline int *get_mnt_writers_ptr(struct vfsmount *mnt)
+{
+#ifdef CONFIG_SMP
+ return mnt->mnt_writers;
+#else
+ return &mnt->mnt_writers;
+#endif
+}
+
static inline struct vfsmount *mntget(struct vfsmount *mnt)
{
if (mnt)
@@ -79,7 +88,11 @@ static inline struct vfsmount *mntget(struct vfsmount *mnt)
return mnt;
}
+struct file; /* forward dec */
+
extern int mnt_want_write(struct vfsmount *mnt);
+extern int mnt_want_write_file(struct file *file);
+extern int mnt_clone_write(struct vfsmount *mnt);
extern void mnt_drop_write(struct vfsmount *mnt);
extern void mntput_no_expire(struct vfsmount *mnt);
extern void mnt_pin(struct vfsmount *mnt);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 3069ec7..878cab4 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -150,5 +150,6 @@ extern int __must_check mutex_lock_killable(struct mutex *lock);
*/
extern int mutex_trylock(struct mutex *lock);
extern void mutex_unlock(struct mutex *lock);
+extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
#endif
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 518098f..d870ae2 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -18,6 +18,7 @@ enum { MAX_NESTED_LINKS = 8 };
struct nameidata {
struct path path;
struct qstr last;
+ struct path root;
unsigned int flags;
int last_type;
unsigned depth;
@@ -77,8 +78,8 @@ extern void release_open_intent(struct nameidata *);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_noperm(const char *, struct dentry *);
-extern int follow_down(struct vfsmount **, struct dentry **);
-extern int follow_up(struct vfsmount **, struct dentry **);
+extern int follow_down(struct path *);
+extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
diff --git a/include/linux/net_dropmon.h b/include/linux/net_dropmon.h
index e8a8b5c..3ceb0cc 100644
--- a/include/linux/net_dropmon.h
+++ b/include/linux/net_dropmon.h
@@ -1,6 +1,7 @@
#ifndef __NET_DROPMON_H
#define __NET_DROPMON_H
+#include <linux/types.h>
#include <linux/netlink.h>
#include <linux/types.h>
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index bcd0201..a6d9ef2 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -125,11 +125,9 @@ void nfsd_export_flush(void);
void exp_readlock(void);
void exp_readunlock(void);
struct svc_export * rqst_exp_get_by_name(struct svc_rqst *,
- struct vfsmount *,
- struct dentry *);
+ struct path *);
struct svc_export * rqst_exp_parent(struct svc_rqst *,
- struct vfsmount *mnt,
- struct dentry *dentry);
+ struct path *);
int exp_rootfh(struct auth_domain *,
char *path, struct knfsd_fh *, int maxsize);
__be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *);
diff --git a/include/linux/page_cgroup.h b/include/linux/page_cgroup.h
index 7339c7b..13f126c 100644
--- a/include/linux/page_cgroup.h
+++ b/include/linux/page_cgroup.h
@@ -18,7 +18,19 @@ struct page_cgroup {
};
void __meminit pgdat_page_cgroup_init(struct pglist_data *pgdat);
-void __init page_cgroup_init(void);
+
+#ifdef CONFIG_SPARSEMEM
+static inline void __init page_cgroup_init_flatmem(void)
+{
+}
+extern void __init page_cgroup_init(void);
+#else
+void __init page_cgroup_init_flatmem(void);
+static inline void __init page_cgroup_init(void)
+{
+}
+#endif
+
struct page_cgroup *lookup_page_cgroup(struct page *page);
enum {
@@ -87,6 +99,10 @@ static inline void page_cgroup_init(void)
{
}
+static inline void __init page_cgroup_init_flatmem(void)
+{
+}
+
#endif
#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
diff --git a/include/linux/parport.h b/include/linux/parport.h
index e1f83c5..38a423e 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -324,6 +324,10 @@ struct parport {
int spintime;
atomic_t ref_count;
+ unsigned long devflags;
+#define PARPORT_DEVPROC_REGISTERED 0
+ struct pardevice *proc_device; /* Currently register proc device */
+
struct list_head full_list;
struct parport *slaves[3];
};
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index b87c51a..3435c1f 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1005,6 +1005,7 @@
#define PCI_DEVICE_ID_PLX_PCI200SYN 0x3196
#define PCI_DEVICE_ID_PLX_9030 0x9030
#define PCI_DEVICE_ID_PLX_9050 0x9050
+#define PCI_DEVICE_ID_PLX_9056 0x9056
#define PCI_DEVICE_ID_PLX_9080 0x9080
#define PCI_DEVICE_ID_PLX_GTEK_SERIAL2 0xa001
@@ -1276,6 +1277,13 @@
#define PCI_VENDOR_ID_CREATIVE 0x1102 /* duplicate: ECTIVA */
#define PCI_DEVICE_ID_CREATIVE_EMU10K1 0x0002
+#define PCI_DEVICE_ID_CREATIVE_20K1 0x0005
+#define PCI_DEVICE_ID_CREATIVE_20K2 0x000b
+#define PCI_SUBDEVICE_ID_CREATIVE_SB0760 0x0024
+#define PCI_SUBDEVICE_ID_CREATIVE_SB08801 0x0041
+#define PCI_SUBDEVICE_ID_CREATIVE_SB08802 0x0042
+#define PCI_SUBDEVICE_ID_CREATIVE_SB08803 0x0043
+#define PCI_SUBDEVICE_ID_CREATIVE_HENDRIX 0x6000
#define PCI_VENDOR_ID_ECTIVA 0x1102 /* duplicate: CREATIVE */
#define PCI_DEVICE_ID_ECTIVA_EV1938 0x8938
@@ -1368,7 +1376,7 @@
#define PCI_DEVICE_ID_VIA_82C598_1 0x8598
#define PCI_DEVICE_ID_VIA_838X_1 0xB188
#define PCI_DEVICE_ID_VIA_83_87XX_1 0xB198
-#define PCI_DEVICE_ID_VIA_C409_IDE 0XC409
+#define PCI_DEVICE_ID_VIA_VX855_IDE 0xC409
#define PCI_DEVICE_ID_VIA_ANON 0xFFFF
#define PCI_VENDOR_ID_SIEMENS 0x110A
@@ -1809,6 +1817,10 @@
#define PCI_SUBDEVICE_ID_HYPERCOPE_METRO 0x0107
#define PCI_SUBDEVICE_ID_HYPERCOPE_CHAMP2 0x0108
+#define PCI_VENDOR_ID_DIGIGRAM 0x1369
+#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM 0xc001
+#define PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM 0xc002
+
#define PCI_VENDOR_ID_KAWASAKI 0x136b
#define PCI_DEVICE_ID_MCHIP_KL5A72002 0xff01
@@ -1960,10 +1972,12 @@
#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_U 0xC118
#define PCI_DEVICE_ID_OXSEMI_PCIe952_1_GU 0xC11C
#define PCI_DEVICE_ID_OXSEMI_16PCI954 0x9501
+#define PCI_DEVICE_ID_OXSEMI_C950 0x950B
#define PCI_DEVICE_ID_OXSEMI_16PCI95N 0x9511
#define PCI_DEVICE_ID_OXSEMI_16PCI954PP 0x9513
#define PCI_DEVICE_ID_OXSEMI_16PCI952 0x9521
#define PCI_DEVICE_ID_OXSEMI_16PCI952PP 0x9523
+#define PCI_SUBDEVICE_ID_OXSEMI_C950 0x0001
#define PCI_VENDOR_ID_CHELSIO 0x1425
@@ -2077,6 +2091,7 @@
#define PCI_VENDOR_ID_MAINPINE 0x1522
#define PCI_DEVICE_ID_MAINPINE_PBRIDGE 0x0100
#define PCI_VENDOR_ID_ENE 0x1524
+#define PCI_DEVICE_ID_ENE_CB710_FLASH 0x0510
#define PCI_DEVICE_ID_ENE_CB712_SD 0x0550
#define PCI_DEVICE_ID_ENE_CB712_SD_2 0x0551
#define PCI_DEVICE_ID_ENE_CB714_SD 0x0750
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 1581ff2..26fd9d1 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -86,7 +86,12 @@ struct percpu_data {
void *ptrs[1];
};
+/* pointer disguising messes up the kmemleak objects tracking */
+#ifndef CONFIG_DEBUG_KMEMLEAK
#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
+#else
+#define __percpu_disguise(pdata) (struct percpu_data *)(pdata)
+#endif
#define per_cpu_ptr(ptr, cpu) \
({ \
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
new file mode 100644
index 0000000..1b3118a
--- /dev/null
+++ b/include/linux/perf_counter.h
@@ -0,0 +1,709 @@
+/*
+ * Performance counters:
+ *
+ * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
+ *
+ * Data type definitions, declarations, prototypes.
+ *
+ * Started by: Thomas Gleixner and Ingo Molnar
+ *
+ * For licencing details see kernel-base/COPYING
+ */
+#ifndef _LINUX_PERF_COUNTER_H
+#define _LINUX_PERF_COUNTER_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+#include <asm/byteorder.h>
+
+/*
+ * User-space ABI bits:
+ */
+
+/*
+ * attr.type
+ */
+enum perf_type_id {
+ PERF_TYPE_HARDWARE = 0,
+ PERF_TYPE_SOFTWARE = 1,
+ PERF_TYPE_TRACEPOINT = 2,
+ PERF_TYPE_HW_CACHE = 3,
+ PERF_TYPE_RAW = 4,
+
+ PERF_TYPE_MAX, /* non-ABI */
+};
+
+/*
+ * Generalized performance counter event types, used by the
+ * attr.event_id parameter of the sys_perf_counter_open()
+ * syscall:
+ */
+enum perf_hw_id {
+ /*
+ * Common hardware events, generalized by the kernel:
+ */
+ PERF_COUNT_HW_CPU_CYCLES = 0,
+ PERF_COUNT_HW_INSTRUCTIONS = 1,
+ PERF_COUNT_HW_CACHE_REFERENCES = 2,
+ PERF_COUNT_HW_CACHE_MISSES = 3,
+ PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
+ PERF_COUNT_HW_BRANCH_MISSES = 5,
+ PERF_COUNT_HW_BUS_CYCLES = 6,
+
+ PERF_COUNT_HW_MAX, /* non-ABI */
+};
+
+/*
+ * Generalized hardware cache counters:
+ *
+ * { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
+ * { read, write, prefetch } x
+ * { accesses, misses }
+ */
+enum perf_hw_cache_id {
+ PERF_COUNT_HW_CACHE_L1D = 0,
+ PERF_COUNT_HW_CACHE_L1I = 1,
+ PERF_COUNT_HW_CACHE_LL = 2,
+ PERF_COUNT_HW_CACHE_DTLB = 3,
+ PERF_COUNT_HW_CACHE_ITLB = 4,
+ PERF_COUNT_HW_CACHE_BPU = 5,
+
+ PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
+};
+
+enum perf_hw_cache_op_id {
+ PERF_COUNT_HW_CACHE_OP_READ = 0,
+ PERF_COUNT_HW_CACHE_OP_WRITE = 1,
+ PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
+
+ PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
+};
+
+enum perf_hw_cache_op_result_id {
+ PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
+ PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
+
+ PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
+};
+
+/*
+ * Special "software" counters provided by the kernel, even if the hardware
+ * does not support performance counters. These counters measure various
+ * physical and sw events of the kernel (and allow the profiling of them as
+ * well):
+ */
+enum perf_sw_ids {
+ PERF_COUNT_SW_CPU_CLOCK = 0,
+ PERF_COUNT_SW_TASK_CLOCK = 1,
+ PERF_COUNT_SW_PAGE_FAULTS = 2,
+ PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
+ PERF_COUNT_SW_CPU_MIGRATIONS = 4,
+ PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
+ PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
+
+ PERF_COUNT_SW_MAX, /* non-ABI */
+};
+
+/*
+ * Bits that can be set in attr.sample_type to request information
+ * in the overflow packets.
+ */
+enum perf_counter_sample_format {
+ PERF_SAMPLE_IP = 1U << 0,
+ PERF_SAMPLE_TID = 1U << 1,
+ PERF_SAMPLE_TIME = 1U << 2,
+ PERF_SAMPLE_ADDR = 1U << 3,
+ PERF_SAMPLE_GROUP = 1U << 4,
+ PERF_SAMPLE_CALLCHAIN = 1U << 5,
+ PERF_SAMPLE_ID = 1U << 6,
+ PERF_SAMPLE_CPU = 1U << 7,
+ PERF_SAMPLE_PERIOD = 1U << 8,
+
+ PERF_SAMPLE_MAX = 1U << 9, /* non-ABI */
+};
+
+/*
+ * Bits that can be set in attr.read_format to request that
+ * reads on the counter should return the indicated quantities,
+ * in increasing order of bit value, after the counter value.
+ */
+enum perf_counter_read_format {
+ PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
+ PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
+ PERF_FORMAT_ID = 1U << 2,
+
+ PERF_FORMAT_MAX = 1U << 3, /* non-ABI */
+};
+
+#define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
+
+/*
+ * Hardware event to monitor via a performance monitoring counter:
+ */
+struct perf_counter_attr {
+
+ /*
+ * Major type: hardware/software/tracepoint/etc.
+ */
+ __u32 type;
+
+ /*
+ * Size of the attr structure, for fwd/bwd compat.
+ */
+ __u32 size;
+
+ /*
+ * Type specific configuration information.
+ */
+ __u64 config;
+
+ union {
+ __u64 sample_period;
+ __u64 sample_freq;
+ };
+
+ __u64 sample_type;
+ __u64 read_format;
+
+ __u64 disabled : 1, /* off by default */
+ inherit : 1, /* children inherit it */
+ pinned : 1, /* must always be on PMU */
+ exclusive : 1, /* only group on PMU */
+ exclude_user : 1, /* don't count user */
+ exclude_kernel : 1, /* ditto kernel */
+ exclude_hv : 1, /* ditto hypervisor */
+ exclude_idle : 1, /* don't count when idle */
+ mmap : 1, /* include mmap data */
+ comm : 1, /* include comm data */
+ freq : 1, /* use freq, not period */
+
+ __reserved_1 : 53;
+
+ __u32 wakeup_events; /* wakeup every n events */
+ __u32 __reserved_2;
+
+ __u64 __reserved_3;
+};
+
+/*
+ * Ioctls that can be done on a perf counter fd:
+ */
+#define PERF_COUNTER_IOC_ENABLE _IO ('$', 0)
+#define PERF_COUNTER_IOC_DISABLE _IO ('$', 1)
+#define PERF_COUNTER_IOC_REFRESH _IO ('$', 2)
+#define PERF_COUNTER_IOC_RESET _IO ('$', 3)
+#define PERF_COUNTER_IOC_PERIOD _IOW('$', 4, u64)
+
+enum perf_counter_ioc_flags {
+ PERF_IOC_FLAG_GROUP = 1U << 0,
+};
+
+/*
+ * Structure of the page that can be mapped via mmap
+ */
+struct perf_counter_mmap_page {
+ __u32 version; /* version number of this structure */
+ __u32 compat_version; /* lowest version this is compat with */
+
+ /*
+ * Bits needed to read the hw counters in user-space.
+ *
+ * u32 seq;
+ * s64 count;
+ *
+ * do {
+ * seq = pc->lock;
+ *
+ * barrier()
+ * if (pc->index) {
+ * count = pmc_read(pc->index - 1);
+ * count += pc->offset;
+ * } else
+ * goto regular_read;
+ *
+ * barrier();
+ * } while (pc->lock != seq);
+ *
+ * NOTE: for obvious reason this only works on self-monitoring
+ * processes.
+ */
+ __u32 lock; /* seqlock for synchronization */
+ __u32 index; /* hardware counter identifier */
+ __s64 offset; /* add to hardware counter value */
+
+ /*
+ * Control data for the mmap() data buffer.
+ *
+ * User-space reading this value should issue an rmb(), on SMP capable
+ * platforms, after reading this value -- see perf_counter_wakeup().
+ */
+ __u64 data_head; /* head in the data section */
+};
+
+#define PERF_EVENT_MISC_CPUMODE_MASK (3 << 0)
+#define PERF_EVENT_MISC_CPUMODE_UNKNOWN (0 << 0)
+#define PERF_EVENT_MISC_KERNEL (1 << 0)
+#define PERF_EVENT_MISC_USER (2 << 0)
+#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
+#define PERF_EVENT_MISC_OVERFLOW (1 << 2)
+
+struct perf_event_header {
+ __u32 type;
+ __u16 misc;
+ __u16 size;
+};
+
+enum perf_event_type {
+
+ /*
+ * The MMAP events record the PROT_EXEC mappings so that we can
+ * correlate userspace IPs to code. They have the following structure:
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * u64 addr;
+ * u64 len;
+ * u64 pgoff;
+ * char filename[];
+ * };
+ */
+ PERF_EVENT_MMAP = 1,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ *
+ * u32 pid, tid;
+ * char comm[];
+ * };
+ */
+ PERF_EVENT_COMM = 3,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 time;
+ * u64 id;
+ * u64 sample_period;
+ * };
+ */
+ PERF_EVENT_PERIOD = 4,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u64 time;
+ * u64 id;
+ * };
+ */
+ PERF_EVENT_THROTTLE = 5,
+ PERF_EVENT_UNTHROTTLE = 6,
+
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid, ppid;
+ * };
+ */
+ PERF_EVENT_FORK = 7,
+
+ /*
+ * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
+ * will be PERF_RECORD_*
+ *
+ * struct {
+ * struct perf_event_header header;
+ *
+ * { u64 ip; } && PERF_RECORD_IP
+ * { u32 pid, tid; } && PERF_RECORD_TID
+ * { u64 time; } && PERF_RECORD_TIME
+ * { u64 addr; } && PERF_RECORD_ADDR
+ * { u64 config; } && PERF_RECORD_CONFIG
+ * { u32 cpu, res; } && PERF_RECORD_CPU
+ *
+ * { u64 nr;
+ * { u64 id, val; } cnt[nr]; } && PERF_RECORD_GROUP
+ *
+ * { u16 nr,
+ * hv,
+ * kernel,
+ * user;
+ * u64 ips[nr]; } && PERF_RECORD_CALLCHAIN
+ * };
+ */
+};
+
+#ifdef __KERNEL__
+/*
+ * Kernel-internal data types and definitions:
+ */
+
+#ifdef CONFIG_PERF_COUNTERS
+# include <asm/perf_counter.h>
+#endif
+
+#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
+#include <linux/spinlock.h>
+#include <linux/hrtimer.h>
+#include <linux/fs.h>
+#include <linux/pid_namespace.h>
+#include <asm/atomic.h>
+
+struct task_struct;
+
+/**
+ * struct hw_perf_counter - performance counter hardware details:
+ */
+struct hw_perf_counter {
+#ifdef CONFIG_PERF_COUNTERS
+ union {
+ struct { /* hardware */
+ u64 config;
+ unsigned long config_base;
+ unsigned long counter_base;
+ int idx;
+ };
+ union { /* software */
+ atomic64_t count;
+ struct hrtimer hrtimer;
+ };
+ };
+ atomic64_t prev_count;
+ u64 sample_period;
+ u64 last_period;
+ atomic64_t period_left;
+ u64 interrupts;
+
+ u64 freq_count;
+ u64 freq_interrupts;
+ u64 freq_stamp;
+#endif
+};
+
+struct perf_counter;
+
+/**
+ * struct pmu - generic performance monitoring unit
+ */
+struct pmu {
+ int (*enable) (struct perf_counter *counter);
+ void (*disable) (struct perf_counter *counter);
+ void (*read) (struct perf_counter *counter);
+ void (*unthrottle) (struct perf_counter *counter);
+};
+
+/**
+ * enum perf_counter_active_state - the states of a counter
+ */
+enum perf_counter_active_state {
+ PERF_COUNTER_STATE_ERROR = -2,
+ PERF_COUNTER_STATE_OFF = -1,
+ PERF_COUNTER_STATE_INACTIVE = 0,
+ PERF_COUNTER_STATE_ACTIVE = 1,
+};
+
+struct file;
+
+struct perf_mmap_data {
+ struct rcu_head rcu_head;
+ int nr_pages; /* nr of data pages */
+ int nr_locked; /* nr pages mlocked */
+
+ atomic_t poll; /* POLL_ for wakeups */
+ atomic_t events; /* event limit */
+
+ atomic_long_t head; /* write position */
+ atomic_long_t done_head; /* completed head */
+
+ atomic_t lock; /* concurrent writes */
+
+ atomic_t wakeup; /* needs a wakeup */
+
+ struct perf_counter_mmap_page *user_page;
+ void *data_pages[0];
+};
+
+struct perf_pending_entry {
+ struct perf_pending_entry *next;
+ void (*func)(struct perf_pending_entry *);
+};
+
+/**
+ * struct perf_counter - performance counter kernel representation:
+ */
+struct perf_counter {
+#ifdef CONFIG_PERF_COUNTERS
+ struct list_head list_entry;
+ struct list_head event_entry;
+ struct list_head sibling_list;
+ int nr_siblings;
+ struct perf_counter *group_leader;
+ const struct pmu *pmu;
+
+ enum perf_counter_active_state state;
+ atomic64_t count;
+
+ /*
+ * These are the total time in nanoseconds that the counter
+ * has been enabled (i.e. eligible to run, and the task has
+ * been scheduled in, if this is a per-task counter)
+ * and running (scheduled onto the CPU), respectively.
+ *
+ * They are computed from tstamp_enabled, tstamp_running and
+ * tstamp_stopped when the counter is in INACTIVE or ACTIVE state.
+ */
+ u64 total_time_enabled;
+ u64 total_time_running;
+
+ /*
+ * These are timestamps used for computing total_time_enabled
+ * and total_time_running when the counter is in INACTIVE or
+ * ACTIVE state, measured in nanoseconds from an arbitrary point
+ * in time.
+ * tstamp_enabled: the notional time when the counter was enabled
+ * tstamp_running: the notional time when the counter was scheduled on
+ * tstamp_stopped: in INACTIVE state, the notional time when the
+ * counter was scheduled off.
+ */
+ u64 tstamp_enabled;
+ u64 tstamp_running;
+ u64 tstamp_stopped;
+
+ struct perf_counter_attr attr;
+ struct hw_perf_counter hw;
+
+ struct perf_counter_context *ctx;
+ struct file *filp;
+
+ /*
+ * These accumulate total time (in nanoseconds) that children
+ * counters have been enabled and running, respectively.
+ */
+ atomic64_t child_total_time_enabled;
+ atomic64_t child_total_time_running;
+
+ /*
+ * Protect attach/detach and child_list:
+ */
+ struct mutex child_mutex;
+ struct list_head child_list;
+ struct perf_counter *parent;
+
+ int oncpu;
+ int cpu;
+
+ struct list_head owner_entry;
+ struct task_struct *owner;
+
+ /* mmap bits */
+ struct mutex mmap_mutex;
+ atomic_t mmap_count;
+ struct perf_mmap_data *data;
+
+ /* poll related */
+ wait_queue_head_t waitq;
+ struct fasync_struct *fasync;
+
+ /* delayed work for NMIs and such */
+ int pending_wakeup;
+ int pending_kill;
+ int pending_disable;
+ struct perf_pending_entry pending;
+
+ atomic_t event_limit;
+
+ void (*destroy)(struct perf_counter *);
+ struct rcu_head rcu_head;
+
+ struct pid_namespace *ns;
+ u64 id;
+#endif
+};
+
+/**
+ * struct perf_counter_context - counter context structure
+ *
+ * Used as a container for task counters and CPU counters as well:
+ */
+struct perf_counter_context {
+ /*
+ * Protect the states of the counters in the list,
+ * nr_active, and the list:
+ */
+ spinlock_t lock;
+ /*
+ * Protect the list of counters. Locking either mutex or lock
+ * is sufficient to ensure the list doesn't change; to change
+ * the list you need to lock both the mutex and the spinlock.
+ */
+ struct mutex mutex;
+
+ struct list_head counter_list;
+ struct list_head event_list;
+ int nr_counters;
+ int nr_active;
+ int is_active;
+ atomic_t refcount;
+ struct task_struct *task;
+
+ /*
+ * Context clock, runs when context enabled.
+ */
+ u64 time;
+ u64 timestamp;
+
+ /*
+ * These fields let us detect when two contexts have both
+ * been cloned (inherited) from a common ancestor.
+ */
+ struct perf_counter_context *parent_ctx;
+ u64 parent_gen;
+ u64 generation;
+ int pin_count;
+ struct rcu_head rcu_head;
+};
+
+/**
+ * struct perf_counter_cpu_context - per cpu counter context structure
+ */
+struct perf_cpu_context {
+ struct perf_counter_context ctx;
+ struct perf_counter_context *task_ctx;
+ int active_oncpu;
+ int max_pertask;
+ int exclusive;
+
+ /*
+ * Recursion avoidance:
+ *
+ * task, softirq, irq, nmi context
+ */
+ int recursion[4];
+};
+
+#ifdef CONFIG_PERF_COUNTERS
+
+/*
+ * Set by architecture code:
+ */
+extern int perf_max_counters;
+
+extern const struct pmu *hw_perf_counter_init(struct perf_counter *counter);
+
+extern void perf_counter_task_sched_in(struct task_struct *task, int cpu);
+extern void perf_counter_task_sched_out(struct task_struct *task,
+ struct task_struct *next, int cpu);
+extern void perf_counter_task_tick(struct task_struct *task, int cpu);
+extern int perf_counter_init_task(struct task_struct *child);
+extern void perf_counter_exit_task(struct task_struct *child);
+extern void perf_counter_free_task(struct task_struct *task);
+extern void perf_counter_do_pending(void);
+extern void perf_counter_print_debug(void);
+extern void __perf_disable(void);
+extern bool __perf_enable(void);
+extern void perf_disable(void);
+extern void perf_enable(void);
+extern int perf_counter_task_disable(void);
+extern int perf_counter_task_enable(void);
+extern int hw_perf_group_sched_in(struct perf_counter *group_leader,
+ struct perf_cpu_context *cpuctx,
+ struct perf_counter_context *ctx, int cpu);
+extern void perf_counter_update_userpage(struct perf_counter *counter);
+
+struct perf_sample_data {
+ struct pt_regs *regs;
+ u64 addr;
+ u64 period;
+};
+
+extern int perf_counter_overflow(struct perf_counter *counter, int nmi,
+ struct perf_sample_data *data);
+
+/*
+ * Return 1 for a software counter, 0 for a hardware counter
+ */
+static inline int is_software_counter(struct perf_counter *counter)
+{
+ return (counter->attr.type != PERF_TYPE_RAW) &&
+ (counter->attr.type != PERF_TYPE_HARDWARE) &&
+ (counter->attr.type != PERF_TYPE_HW_CACHE);
+}
+
+extern void perf_swcounter_event(u32, u64, int, struct pt_regs *, u64);
+
+extern void __perf_counter_mmap(struct vm_area_struct *vma);
+
+static inline void perf_counter_mmap(struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_EXEC)
+ __perf_counter_mmap(vma);
+}
+
+extern void perf_counter_comm(struct task_struct *tsk);
+extern void perf_counter_fork(struct task_struct *tsk);
+
+extern void perf_counter_task_migration(struct task_struct *task, int cpu);
+
+#define MAX_STACK_DEPTH 255
+
+struct perf_callchain_entry {
+ u16 nr;
+ u16 hv;
+ u16 kernel;
+ u16 user;
+ u64 ip[MAX_STACK_DEPTH];
+};
+
+extern struct perf_callchain_entry *perf_callchain(struct pt_regs *regs);
+
+extern int sysctl_perf_counter_paranoid;
+extern int sysctl_perf_counter_mlock;
+extern int sysctl_perf_counter_sample_rate;
+
+extern void perf_counter_init(void);
+
+#ifndef perf_misc_flags
+#define perf_misc_flags(regs) (user_mode(regs) ? PERF_EVENT_MISC_USER : \
+ PERF_EVENT_MISC_KERNEL)
+#define perf_instruction_pointer(regs) instruction_pointer(regs)
+#endif
+
+#else
+static inline void
+perf_counter_task_sched_in(struct task_struct *task, int cpu) { }
+static inline void
+perf_counter_task_sched_out(struct task_struct *task,
+ struct task_struct *next, int cpu) { }
+static inline void
+perf_counter_task_tick(struct task_struct *task, int cpu) { }
+static inline int perf_counter_init_task(struct task_struct *child) { return 0; }
+static inline void perf_counter_exit_task(struct task_struct *child) { }
+static inline void perf_counter_free_task(struct task_struct *task) { }
+static inline void perf_counter_do_pending(void) { }
+static inline void perf_counter_print_debug(void) { }
+static inline void perf_disable(void) { }
+static inline void perf_enable(void) { }
+static inline int perf_counter_task_disable(void) { return -EINVAL; }
+static inline int perf_counter_task_enable(void) { return -EINVAL; }
+
+static inline void
+perf_swcounter_event(u32 event, u64 nr, int nmi,
+ struct pt_regs *regs, u64 addr) { }
+
+static inline void perf_counter_mmap(struct vm_area_struct *vma) { }
+static inline void perf_counter_comm(struct task_struct *tsk) { }
+static inline void perf_counter_fork(struct task_struct *tsk) { }
+static inline void perf_counter_init(void) { }
+static inline void perf_counter_task_migration(struct task_struct *task,
+ int cpu) { }
+#endif
+
+#endif /* __KERNEL__ */
+#endif /* _LINUX_PERF_COUNTER_H */
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index c8f0385..b43a9e0 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -152,5 +152,6 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void
void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
+void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
#endif
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 1d4e2d2..b3f7476 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -382,14 +382,13 @@ struct dev_pm_info {
#ifdef CONFIG_PM_SLEEP
extern void device_pm_lock(void);
extern int sysdev_resume(void);
-extern void device_power_up(pm_message_t state);
-extern void device_resume(pm_message_t state);
+extern void dpm_resume_noirq(pm_message_t state);
+extern void dpm_resume_end(pm_message_t state);
extern void device_pm_unlock(void);
extern int sysdev_suspend(pm_message_t state);
-extern int device_power_down(pm_message_t state);
-extern int device_suspend(pm_message_t state);
-extern int device_prepare_suspend(pm_message_t state);
+extern int dpm_suspend_noirq(pm_message_t state);
+extern int dpm_suspend_start(pm_message_t state);
extern void __suspend_report_result(const char *function, void *fn, int ret);
@@ -403,7 +402,7 @@ extern void __suspend_report_result(const char *function, void *fn, int ret);
#define device_pm_lock() do {} while (0)
#define device_pm_unlock() do {} while (0)
-static inline int device_suspend(pm_message_t state)
+static inline int dpm_suspend_start(pm_message_t state)
{
return 0;
}
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index ca3c887..b063c73 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -446,6 +446,7 @@ int pnp_start_dev(struct pnp_dev *dev);
int pnp_stop_dev(struct pnp_dev *dev);
int pnp_activate_dev(struct pnp_dev *dev);
int pnp_disable_dev(struct pnp_dev *dev);
+int pnp_range_reserved(resource_size_t start, resource_size_t end);
/* protocol helpers */
int pnp_is_active(struct pnp_dev *dev);
@@ -476,6 +477,7 @@ static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; }
+static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0;}
/* protocol helpers */
static inline int pnp_is_active(struct pnp_dev *dev) { return 0; }
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index 48d887e..b00df4c 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -85,4 +85,7 @@
#define PR_SET_TIMERSLACK 29
#define PR_GET_TIMERSLACK 30
+#define PR_TASK_PERF_COUNTERS_DISABLE 31
+#define PR_TASK_PERF_COUNTERS_ENABLE 32
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index fbfa3d4..e6e77d3 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -93,20 +93,9 @@ struct vmcore {
#ifdef CONFIG_PROC_FS
-extern spinlock_t proc_subdir_lock;
-
extern void proc_root_init(void);
void proc_flush_task(struct task_struct *task);
-struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, struct nameidata *);
-int proc_pid_readdir(struct file * filp, void * dirent, filldir_t filldir);
-unsigned long task_vsize(struct mm_struct *);
-int task_statm(struct mm_struct *, int *, int *, int *, int *);
-void task_mem(struct seq_file *, struct mm_struct *);
-void clear_refs_smap(struct mm_struct *mm);
-
-struct proc_dir_entry *de_get(struct proc_dir_entry *de);
-void de_put(struct proc_dir_entry *de);
extern struct proc_dir_entry *create_proc_entry(const char *name, mode_t mode,
struct proc_dir_entry *parent);
@@ -116,20 +105,7 @@ struct proc_dir_entry *proc_create_data(const char *name, mode_t mode,
void *data);
extern void remove_proc_entry(const char *name, struct proc_dir_entry *parent);
-extern struct vfsmount *proc_mnt;
struct pid_namespace;
-extern int proc_fill_super(struct super_block *);
-extern struct inode *proc_get_inode(struct super_block *, unsigned int, struct proc_dir_entry *);
-
-/*
- * These are generic /proc routines that use the internal
- * "struct proc_dir_entry" tree to traverse the filesystem.
- *
- * The /proc root directory has extended versions to take care
- * of the /proc/<pid> subdirectories.
- */
-extern int proc_readdir(struct file *, void *, filldir_t);
-extern struct dentry *proc_lookup(struct inode *, struct dentry *, struct nameidata *);
extern int pid_ns_prepare_proc(struct pid_namespace *ns);
extern void pid_ns_release_proc(struct pid_namespace *ns);
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 67c1565..59e133d 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -95,7 +95,6 @@ extern void __ptrace_link(struct task_struct *child,
struct task_struct *new_parent);
extern void __ptrace_unlink(struct task_struct *child);
extern void exit_ptrace(struct task_struct *tracer);
-extern void ptrace_fork(struct task_struct *task, unsigned long clone_flags);
#define PTRACE_MODE_READ 1
#define PTRACE_MODE_ATTACH 2
/* Returns 0 on success, -errno on denial. */
@@ -327,15 +326,6 @@ static inline void user_enable_block_step(struct task_struct *task)
#define arch_ptrace_untrace(task) do { } while (0)
#endif
-#ifndef arch_ptrace_fork
-/*
- * Do machine-specific work to initialize a new task.
- *
- * This is called from copy_process().
- */
-#define arch_ptrace_fork(child, clone_flags) do { } while (0)
-#endif
-
extern int task_current_syscall(struct task_struct *target, long *callno,
unsigned long args[6], unsigned int maxargs,
unsigned long *sp, unsigned long *pc);
diff --git a/include/linux/qnx4_fs.h b/include/linux/qnx4_fs.h
index 787d19e..8b9aee1 100644
--- a/include/linux/qnx4_fs.h
+++ b/include/linux/qnx4_fs.h
@@ -85,65 +85,4 @@ struct qnx4_super_block {
struct qnx4_inode_entry AltBoot;
};
-#ifdef __KERNEL__
-
-#define QNX4_DEBUG 0
-
-#if QNX4_DEBUG
-#define QNX4DEBUG(X) printk X
-#else
-#define QNX4DEBUG(X) (void) 0
-#endif
-
-struct qnx4_sb_info {
- struct buffer_head *sb_buf; /* superblock buffer */
- struct qnx4_super_block *sb; /* our superblock */
- unsigned int Version; /* may be useful */
- struct qnx4_inode_entry *BitMap; /* useful */
-};
-
-struct qnx4_inode_info {
- struct qnx4_inode_entry raw;
- loff_t mmu_private;
- struct inode vfs_inode;
-};
-
-extern struct inode *qnx4_iget(struct super_block *, unsigned long);
-extern struct dentry *qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd);
-extern unsigned long qnx4_count_free_blocks(struct super_block *sb);
-extern unsigned long qnx4_block_map(struct inode *inode, long iblock);
-
-extern struct buffer_head *qnx4_bread(struct inode *, int, int);
-
-extern const struct inode_operations qnx4_file_inode_operations;
-extern const struct inode_operations qnx4_dir_inode_operations;
-extern const struct file_operations qnx4_file_operations;
-extern const struct file_operations qnx4_dir_operations;
-extern int qnx4_is_free(struct super_block *sb, long block);
-extern int qnx4_set_bitmap(struct super_block *sb, long block, int busy);
-extern int qnx4_create(struct inode *inode, struct dentry *dentry, int mode, struct nameidata *nd);
-extern void qnx4_truncate(struct inode *inode);
-extern void qnx4_free_inode(struct inode *inode);
-extern int qnx4_unlink(struct inode *dir, struct dentry *dentry);
-extern int qnx4_rmdir(struct inode *dir, struct dentry *dentry);
-extern int qnx4_sync_file(struct file *file, struct dentry *dentry, int);
-extern int qnx4_sync_inode(struct inode *inode);
-
-static inline struct qnx4_sb_info *qnx4_sb(struct super_block *sb)
-{
- return sb->s_fs_info;
-}
-
-static inline struct qnx4_inode_info *qnx4_i(struct inode *inode)
-{
- return container_of(inode, struct qnx4_inode_info, vfs_inode);
-}
-
-static inline struct qnx4_inode_entry *qnx4_raw_inode(struct inode *inode)
-{
- return &qnx4_i(inode)->raw;
-}
-
-#endif /* __KERNEL__ */
-
#endif
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 36353d9..7bc4575 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -20,7 +20,12 @@ static inline struct quota_info *sb_dqopt(struct super_block *sb)
/*
* declaration of quota_function calls in kernel.
*/
-void sync_dquots(struct super_block *sb, int type);
+void sync_quota_sb(struct super_block *sb, int type);
+static inline void writeout_quota_sb(struct super_block *sb, int type)
+{
+ if (sb->s_qcop->quota_sync)
+ sb->s_qcop->quota_sync(sb, type);
+}
int dquot_initialize(struct inode *inode, int type);
int dquot_drop(struct inode *inode);
@@ -253,12 +258,7 @@ static inline void vfs_dq_free_inode(struct inode *inode)
inode->i_sb->dq_op->free_inode(inode, 1);
}
-/* The following two functions cannot be called inside a transaction */
-static inline void vfs_dq_sync(struct super_block *sb)
-{
- sync_dquots(sb, -1);
-}
-
+/* Cannot be called inside a transaction */
static inline int vfs_dq_off(struct super_block *sb, int remount)
{
int ret = -ENOSYS;
@@ -334,7 +334,11 @@ static inline void vfs_dq_free_inode(struct inode *inode)
{
}
-static inline void vfs_dq_sync(struct super_block *sb)
+static inline void sync_quota_sb(struct super_block *sb, int type)
+{
+}
+
+static inline void writeout_quota_sb(struct super_block *sb, int type)
{
}
diff --git a/include/linux/rational.h b/include/linux/rational.h
new file mode 100644
index 0000000..4f532fc
--- /dev/null
+++ b/include/linux/rational.h
@@ -0,0 +1,19 @@
+/*
+ * rational fractions
+ *
+ * Copyright (C) 2009 emlix GmbH, Oskar Schirmer <os@emlix.com>
+ *
+ * helper functions when coping with rational numbers,
+ * e.g. when calculating optimum numerator/denominator pairs for
+ * pll configuration taking into account restricted register size
+ */
+
+#ifndef _LINUX_RATIONAL_H
+#define _LINUX_RATIONAL_H
+
+void rational_best_approximation(
+ unsigned long given_numerator, unsigned long given_denominator,
+ unsigned long max_numerator, unsigned long max_denominator,
+ unsigned long *best_numerator, unsigned long *best_denominator);
+
+#endif /* _LINUX_RATIONAL_H */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index e649bd3..5710f43 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -198,6 +198,32 @@ static inline void list_splice_init_rcu(struct list_head *list,
at->prev = last;
}
+/**
+ * list_entry_rcu - get the struct for this entry
+ * @ptr: the &struct list_head pointer.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ *
+ * This primitive may safely run concurrently with the _rcu list-mutation
+ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
+ */
+#define list_entry_rcu(ptr, type, member) \
+ container_of(rcu_dereference(ptr), type, member)
+
+/**
+ * list_first_entry_rcu - get the first element from a list
+ * @ptr: the list head to take the element from.
+ * @type: the type of the struct this is embedded in.
+ * @member: the name of the list_struct within the struct.
+ *
+ * Note, that list is expected to be not empty.
+ *
+ * This primitive may safely run concurrently with the _rcu list-mutation
+ * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
+ */
+#define list_first_entry_rcu(ptr, type, member) \
+ list_entry_rcu((ptr)->next, type, member)
+
#define __list_for_each_rcu(pos, head) \
for (pos = rcu_dereference((head)->next); \
pos != (head); \
@@ -214,9 +240,9 @@ static inline void list_splice_init_rcu(struct list_head *list,
* as long as the traversal is guarded by rcu_read_lock().
*/
#define list_for_each_entry_rcu(pos, head, member) \
- for (pos = list_entry(rcu_dereference((head)->next), typeof(*pos), member); \
+ for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \
- pos = list_entry(rcu_dereference(pos->member.next), typeof(*pos), member))
+ pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
/**
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 58b2aa5..5a51538 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -161,8 +161,15 @@ struct rcu_data {
unsigned long offline_fqs; /* Kicked due to being offline. */
unsigned long resched_ipi; /* Sent a resched IPI. */
- /* 5) For future __rcu_pending statistics. */
+ /* 5) __rcu_pending() statistics. */
long n_rcu_pending; /* rcu_pending() calls since boot. */
+ long n_rp_qs_pending;
+ long n_rp_cb_ready;
+ long n_rp_cpu_needs_gp;
+ long n_rp_gp_completed;
+ long n_rp_gp_started;
+ long n_rp_need_fqs;
+ long n_rp_need_nothing;
int cpu;
};
diff --git a/include/linux/reiserfs_fs_sb.h b/include/linux/reiserfs_fs_sb.h
index 6473650..dab68bb 100644
--- a/include/linux/reiserfs_fs_sb.h
+++ b/include/linux/reiserfs_fs_sb.h
@@ -453,6 +453,7 @@ enum reiserfs_mount_options {
REISERFS_ATTRS,
REISERFS_XATTRS_USER,
REISERFS_POSIXACL,
+ REISERFS_EXPOSE_PRIVROOT,
REISERFS_BARRIER_NONE,
REISERFS_BARRIER_FLUSH,
@@ -490,6 +491,7 @@ enum reiserfs_mount_options {
#define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK))
#define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER))
#define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL))
+#define reiserfs_expose_privroot(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_EXPOSE_PRIVROOT))
#define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s))
#define reiserfs_barrier_none(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_NONE))
#define reiserfs_barrier_flush(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_BARRIER_FLUSH))
diff --git a/include/linux/reiserfs_xattr.h b/include/linux/reiserfs_xattr.h
index cdedc01..99928dc 100644
--- a/include/linux/reiserfs_xattr.h
+++ b/include/linux/reiserfs_xattr.h
@@ -41,6 +41,7 @@ int reiserfs_xattr_init(struct super_block *sb, int mount_flags);
int reiserfs_lookup_privroot(struct super_block *sb);
int reiserfs_delete_xattrs(struct inode *inode);
int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs);
+int reiserfs_permission(struct inode *inode, int mask);
#ifdef CONFIG_REISERFS_FS_XATTR
#define has_xattr_dir(inode) (REISERFS_I(inode)->i_flags & i_has_xattr_dir)
@@ -50,7 +51,6 @@ int reiserfs_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
ssize_t reiserfs_listxattr(struct dentry *dentry, char *buffer, size_t size);
int reiserfs_removexattr(struct dentry *dentry, const char *name);
-int reiserfs_permission(struct inode *inode, int mask);
int reiserfs_xattr_get(struct inode *, const char *, void *, size_t);
int reiserfs_xattr_set(struct inode *, const char *, const void *, size_t, int);
@@ -117,8 +117,6 @@ static inline void reiserfs_init_xattr_rwsem(struct inode *inode)
#define reiserfs_listxattr NULL
#define reiserfs_removexattr NULL
-#define reiserfs_permission NULL
-
static inline void reiserfs_init_xattr_rwsem(struct inode *inode)
{
}
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index e1b7b21..8670f15 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -11,7 +11,7 @@ struct ring_buffer_iter;
* Don't refer to this struct directly, use functions below.
*/
struct ring_buffer_event {
- u32 type:2, len:3, time_delta:27;
+ u32 type_len:5, time_delta:27;
u32 array[];
};
@@ -24,7 +24,8 @@ struct ring_buffer_event {
* size is variable depending on how much
* padding is needed
* If time_delta is non zero:
- * everything else same as RINGBUF_TYPE_DATA
+ * array[0] holds the actual length
+ * size = 4 + length (bytes)
*
* @RINGBUF_TYPE_TIME_EXTEND: Extend the time delta
* array[0] = time delta (28 .. 59)
@@ -35,22 +36,23 @@ struct ring_buffer_event {
* array[1..2] = tv_sec
* size = 16 bytes
*
- * @RINGBUF_TYPE_DATA: Data record
- * If len is zero:
+ * <= @RINGBUF_TYPE_DATA_TYPE_LEN_MAX:
+ * Data record
+ * If type_len is zero:
* array[0] holds the actual length
* array[1..(length+3)/4] holds data
- * size = 4 + 4 + length (bytes)
+ * size = 4 + length (bytes)
* else
- * length = len << 2
+ * length = type_len << 2
* array[0..(length+3)/4-1] holds data
* size = 4 + length (bytes)
*/
enum ring_buffer_type {
+ RINGBUF_TYPE_DATA_TYPE_LEN_MAX = 28,
RINGBUF_TYPE_PADDING,
RINGBUF_TYPE_TIME_EXTEND,
/* FIXME: RINGBUF_TYPE_TIME_STAMP not implemented */
RINGBUF_TYPE_TIME_STAMP,
- RINGBUF_TYPE_DATA,
};
unsigned ring_buffer_event_length(struct ring_buffer_event *event);
@@ -68,13 +70,54 @@ ring_buffer_event_time_delta(struct ring_buffer_event *event)
return event->time_delta;
}
+/*
+ * ring_buffer_event_discard can discard any event in the ring buffer.
+ * it is up to the caller to protect against a reader from
+ * consuming it or a writer from wrapping and replacing it.
+ *
+ * No external protection is needed if this is called before
+ * the event is commited. But in that case it would be better to
+ * use ring_buffer_discard_commit.
+ *
+ * Note, if an event that has not been committed is discarded
+ * with ring_buffer_event_discard, it must still be committed.
+ */
void ring_buffer_event_discard(struct ring_buffer_event *event);
/*
+ * ring_buffer_discard_commit will remove an event that has not
+ * ben committed yet. If this is used, then ring_buffer_unlock_commit
+ * must not be called on the discarded event. This function
+ * will try to remove the event from the ring buffer completely
+ * if another event has not been written after it.
+ *
+ * Example use:
+ *
+ * if (some_condition)
+ * ring_buffer_discard_commit(buffer, event);
+ * else
+ * ring_buffer_unlock_commit(buffer, event);
+ */
+void ring_buffer_discard_commit(struct ring_buffer *buffer,
+ struct ring_buffer_event *event);
+
+/*
* size is in bytes for each per CPU buffer.
*/
struct ring_buffer *
-ring_buffer_alloc(unsigned long size, unsigned flags);
+__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
+
+/*
+ * Because the ring buffer is generic, if other users of the ring buffer get
+ * traced by ftrace, it can produce lockdep warnings. We need to keep each
+ * ring buffer's lock class separate.
+ */
+#define ring_buffer_alloc(size, flags) \
+({ \
+ static struct lock_class_key __key; \
+ __ring_buffer_alloc((size), (flags), &__key); \
+})
+
void ring_buffer_free(struct ring_buffer *buffer);
int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size);
@@ -122,6 +165,8 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer);
unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_nmi_dropped_cpu(struct ring_buffer *buffer, int cpu);
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
@@ -137,6 +182,11 @@ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
size_t len, int cpu, int full);
+struct trace_seq;
+
+int ring_buffer_print_entry_header(struct trace_seq *s);
+int ring_buffer_print_page_header(struct trace_seq *s);
+
enum ring_buffer_flags {
RB_FL_OVERWRITE = 1 << 0,
};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d853f6b..fea9d18 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -77,6 +77,7 @@ struct sched_param {
#include <linux/proportions.h>
#include <linux/seccomp.h>
#include <linux/rcupdate.h>
+#include <linux/rculist.h>
#include <linux/rtmutex.h>
#include <linux/time.h>
@@ -96,8 +97,9 @@ struct exec_domain;
struct futex_pi_state;
struct robust_list_head;
struct bio;
-struct bts_tracer;
struct fs_struct;
+struct bts_context;
+struct perf_counter_context;
/*
* List of flags we want to share for kernel threads,
@@ -116,6 +118,7 @@ struct fs_struct;
* 11 bit fractions.
*/
extern unsigned long avenrun[]; /* Load averages */
+extern void get_avenrun(unsigned long *loads, unsigned long offset, int shift);
#define FSHIFT 11 /* nr of bits of precision */
#define FIXED_1 (1<<FSHIFT) /* 1.0 as fixed-point */
@@ -135,8 +138,9 @@ DECLARE_PER_CPU(unsigned long, process_counts);
extern int nr_processes(void);
extern unsigned long nr_running(void);
extern unsigned long nr_uninterruptible(void);
-extern unsigned long nr_active(void);
extern unsigned long nr_iowait(void);
+extern void calc_global_load(void);
+extern u64 cpu_nr_migrations(int cpu);
extern unsigned long get_parent_ip(unsigned long addr);
@@ -672,6 +676,10 @@ struct user_struct {
struct work_struct work;
#endif
#endif
+
+#ifdef CONFIG_PERF_COUNTERS
+ atomic_long_t locked_vm;
+#endif
};
extern int uids_sysfs_init(void);
@@ -838,7 +846,17 @@ struct sched_group {
*/
u32 reciprocal_cpu_power;
- unsigned long cpumask[];
+ /*
+ * The CPUs this group covers.
+ *
+ * NOTE: this field is variable length. (Allocated dynamically
+ * by attaching extra space to the end of the structure,
+ * depending on how many CPUs the kernel has booted up with)
+ *
+ * It is also be embedded into static data structures at build
+ * time. (See 'struct static_sched_group' in kernel/sched.c)
+ */
+ unsigned long cpumask[0];
};
static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
@@ -924,8 +942,17 @@ struct sched_domain {
char *name;
#endif
- /* span of all CPUs in this domain */
- unsigned long span[];
+ /*
+ * Span of all CPUs in this domain.
+ *
+ * NOTE: this field is variable length. (Allocated dynamically
+ * by attaching extra space to the end of the structure,
+ * depending on how many CPUs the kernel has booted up with)
+ *
+ * It is also be embedded into static data structures at build
+ * time. (See 'struct static_sched_domain' in kernel/sched.c)
+ */
+ unsigned long span[0];
};
static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
@@ -1052,9 +1079,10 @@ struct sched_entity {
u64 last_wakeup;
u64 avg_overlap;
+ u64 nr_migrations;
+
u64 start_runtime;
u64 avg_wakeup;
- u64 nr_migrations;
#ifdef CONFIG_SCHEDSTATS
u64 wait_start;
@@ -1209,18 +1237,11 @@ struct task_struct {
struct list_head ptraced;
struct list_head ptrace_entry;
-#ifdef CONFIG_X86_PTRACE_BTS
/*
* This is the tracer handle for the ptrace BTS extension.
* This field actually belongs to the ptracer task.
*/
- struct bts_tracer *bts;
- /*
- * The buffer to hold the BTS data.
- */
- void *bts_buffer;
- size_t bts_size;
-#endif /* CONFIG_X86_PTRACE_BTS */
+ struct bts_context *bts;
/* PID/PID hash table linkage. */
struct pid_link pids[PIDTYPE_MAX];
@@ -1247,7 +1268,9 @@ struct task_struct {
* credentials (COW) */
const struct cred *cred; /* effective (overridable) subjective task
* credentials (COW) */
- struct mutex cred_exec_mutex; /* execve vs ptrace cred calculation mutex */
+ struct mutex cred_guard_mutex; /* guard against foreign influences on
+ * credential calculations
+ * (notably. ptrace) */
char comm[TASK_COMM_LEN]; /* executable name excluding path
- access with [gs]et_task_comm (which lock
@@ -1380,6 +1403,11 @@ struct task_struct {
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
#endif
+#ifdef CONFIG_PERF_COUNTERS
+ struct perf_counter_context *perf_counter_ctxp;
+ struct mutex perf_counter_mutex;
+ struct list_head perf_counter_list;
+#endif
#ifdef CONFIG_NUMA
struct mempolicy *mempolicy;
short il_next;
@@ -1428,7 +1456,9 @@ struct task_struct {
#ifdef CONFIG_TRACING
/* state flags for use by tracers */
unsigned long trace;
-#endif
+ /* bitmask of trace recursion */
+ unsigned long trace_recursion;
+#endif /* CONFIG_TRACING */
};
/* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -1885,6 +1915,7 @@ extern void sched_dead(struct task_struct *p);
extern void proc_caches_init(void);
extern void flush_signals(struct task_struct *);
+extern void __flush_signals(struct task_struct *);
extern void ignore_signals(struct task_struct *);
extern void flush_signal_handlers(struct task_struct *, int force_default);
extern int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info);
@@ -2001,8 +2032,10 @@ extern void set_task_comm(struct task_struct *tsk, char *from);
extern char *get_task_comm(char *to, struct task_struct *tsk);
#ifdef CONFIG_SMP
+extern void wait_task_context_switch(struct task_struct *p);
extern unsigned long wait_task_inactive(struct task_struct *, long match_state);
#else
+static inline void wait_task_context_switch(struct task_struct *p) {}
static inline unsigned long wait_task_inactive(struct task_struct *p,
long match_state)
{
@@ -2010,7 +2043,8 @@ static inline unsigned long wait_task_inactive(struct task_struct *p,
}
#endif
-#define next_task(p) list_entry(rcu_dereference((p)->tasks.next), struct task_struct, tasks)
+#define next_task(p) \
+ list_entry_rcu((p)->tasks.next, struct task_struct, tasks)
#define for_each_process(p) \
for (p = &init_task ; (p = next_task(p)) != &init_task ; )
@@ -2049,8 +2083,8 @@ int same_thread_group(struct task_struct *p1, struct task_struct *p2)
static inline struct task_struct *next_thread(const struct task_struct *p)
{
- return list_entry(rcu_dereference(p->thread_group.next),
- struct task_struct, thread_group);
+ return list_entry_rcu(p->thread_group.next,
+ struct task_struct, thread_group);
}
static inline int thread_group_empty(struct task_struct *p)
@@ -2394,6 +2428,13 @@ static inline void inc_syscw(struct task_struct *tsk)
#define TASK_SIZE_OF(tsk) TASK_SIZE
#endif
+/*
+ * Call the function if the target task is executing on a CPU right now:
+ */
+extern void task_oncpu_function_call(struct task_struct *p,
+ void (*func) (void *info), void *info);
+
+
#ifdef CONFIG_MM_OWNER
extern void mm_update_next_owner(struct mm_struct *mm);
extern void mm_init_owner(struct mm_struct *mm, struct task_struct *p);
diff --git a/include/linux/section-names.h b/include/linux/section-names.h
deleted file mode 100644
index c956f4e..0000000
--- a/include/linux/section-names.h
+++ /dev/null
@@ -1,6 +0,0 @@
-#ifndef __LINUX_SECTION_NAMES_H
-#define __LINUX_SECTION_NAMES_H
-
-#define HEAD_TEXT_SECTION .head.text
-
-#endif /* !__LINUX_SECTION_NAMES_H */
diff --git a/include/linux/security.h b/include/linux/security.h
index d5fd616..5eff459 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -2197,6 +2197,8 @@ static inline int security_file_mmap(struct file *file, unsigned long reqprot,
unsigned long addr,
unsigned long addr_only)
{
+ if ((addr < mmap_min_addr) && !capable(CAP_SYS_RAWIO))
+ return -EACCES;
return 0;
}
diff --git a/include/linux/serial.h b/include/linux/serial.h
index 9136cc5..e5bb75a 100644
--- a/include/linux/serial.h
+++ b/include/linux/serial.h
@@ -96,54 +96,76 @@ struct serial_uart_config {
/*
* Definitions for async_struct (and serial_struct) flags field
+ *
+ * Define ASYNCB_* for convenient use with {test,set,clear}_bit.
*/
-#define ASYNC_HUP_NOTIFY 0x0001 /* Notify getty on hangups and closes
- on the callout port */
-#define ASYNC_FOURPORT 0x0002 /* Set OU1, OUT2 per AST Fourport settings */
-#define ASYNC_SAK 0x0004 /* Secure Attention Key (Orange book) */
-#define ASYNC_SPLIT_TERMIOS 0x0008 /* Separate termios for dialin/callout */
-
-#define ASYNC_SPD_MASK 0x1030
-#define ASYNC_SPD_HI 0x0010 /* Use 56000 instead of 38400 bps */
-
-#define ASYNC_SPD_VHI 0x0020 /* Use 115200 instead of 38400 bps */
-#define ASYNC_SPD_CUST 0x0030 /* Use user-specified divisor */
-
-#define ASYNC_SKIP_TEST 0x0040 /* Skip UART test during autoconfiguration */
-#define ASYNC_AUTO_IRQ 0x0080 /* Do automatic IRQ during autoconfiguration */
-#define ASYNC_SESSION_LOCKOUT 0x0100 /* Lock out cua opens based on session */
-#define ASYNC_PGRP_LOCKOUT 0x0200 /* Lock out cua opens based on pgrp */
-#define ASYNC_CALLOUT_NOHUP 0x0400 /* Don't do hangups for cua device */
-
-#define ASYNC_HARDPPS_CD 0x0800 /* Call hardpps when CD goes high */
-
-#define ASYNC_SPD_SHI 0x1000 /* Use 230400 instead of 38400 bps */
-#define ASYNC_SPD_WARP 0x1010 /* Use 460800 instead of 38400 bps */
-
-#define ASYNC_LOW_LATENCY 0x2000 /* Request low latency behaviour */
-
-#define ASYNC_BUGGY_UART 0x4000 /* This is a buggy UART, skip some safety
- * checks. Note: can be dangerous! */
-
-#define ASYNC_AUTOPROBE 0x8000 /* Port was autoprobed by PCI or PNP code */
-
-#define ASYNC_FLAGS 0x7FFF /* Possible legal async flags */
-#define ASYNC_USR_MASK 0x3430 /* Legal flags that non-privileged
- * users can set or reset */
-
-/* Internal flags used only by kernel/chr_drv/serial.c */
-#define ASYNC_INITIALIZED 0x80000000 /* Serial port was initialized */
-#define ASYNC_NORMAL_ACTIVE 0x20000000 /* Normal device is active */
-#define ASYNC_BOOT_AUTOCONF 0x10000000 /* Autoconfigure port on bootup */
-#define ASYNC_CLOSING 0x08000000 /* Serial port is closing */
-#define ASYNC_CTS_FLOW 0x04000000 /* Do CTS flow control */
-#define ASYNC_CHECK_CD 0x02000000 /* i.e., CLOCAL */
-#define ASYNC_SHARE_IRQ 0x01000000 /* for multifunction cards
- --- no longer used */
-#define ASYNC_CONS_FLOW 0x00800000 /* flow control for console */
-
-#define ASYNC_BOOT_ONLYMCA 0x00400000 /* Probe only if MCA bus */
-#define ASYNC_INTERNAL_FLAGS 0xFFC00000 /* Internal flags */
+#define ASYNCB_HUP_NOTIFY 0 /* Notify getty on hangups and closes
+ * on the callout port */
+#define ASYNCB_FOURPORT 1 /* Set OU1, OUT2 per AST Fourport settings */
+#define ASYNCB_SAK 2 /* Secure Attention Key (Orange book) */
+#define ASYNCB_SPLIT_TERMIOS 3 /* Separate termios for dialin/callout */
+#define ASYNCB_SPD_HI 4 /* Use 56000 instead of 38400 bps */
+#define ASYNCB_SPD_VHI 5 /* Use 115200 instead of 38400 bps */
+#define ASYNCB_SKIP_TEST 6 /* Skip UART test during autoconfiguration */
+#define ASYNCB_AUTO_IRQ 7 /* Do automatic IRQ during
+ * autoconfiguration */
+#define ASYNCB_SESSION_LOCKOUT 8 /* Lock out cua opens based on session */
+#define ASYNCB_PGRP_LOCKOUT 9 /* Lock out cua opens based on pgrp */
+#define ASYNCB_CALLOUT_NOHUP 10 /* Don't do hangups for cua device */
+#define ASYNCB_HARDPPS_CD 11 /* Call hardpps when CD goes high */
+#define ASYNCB_SPD_SHI 12 /* Use 230400 instead of 38400 bps */
+#define ASYNCB_LOW_LATENCY 13 /* Request low latency behaviour */
+#define ASYNCB_BUGGY_UART 14 /* This is a buggy UART, skip some safety
+ * checks. Note: can be dangerous! */
+#define ASYNCB_AUTOPROBE 15 /* Port was autoprobed by PCI or PNP code */
+#define ASYNCB_LAST_USER 15
+
+/* Internal flags used only by kernel */
+#define ASYNCB_INITIALIZED 31 /* Serial port was initialized */
+#define ASYNCB_NORMAL_ACTIVE 29 /* Normal device is active */
+#define ASYNCB_BOOT_AUTOCONF 28 /* Autoconfigure port on bootup */
+#define ASYNCB_CLOSING 27 /* Serial port is closing */
+#define ASYNCB_CTS_FLOW 26 /* Do CTS flow control */
+#define ASYNCB_CHECK_CD 25 /* i.e., CLOCAL */
+#define ASYNCB_SHARE_IRQ 24 /* for multifunction cards, no longer used */
+#define ASYNCB_CONS_FLOW 23 /* flow control for console */
+#define ASYNCB_BOOT_ONLYMCA 22 /* Probe only if MCA bus */
+#define ASYNCB_FIRST_KERNEL 22
+
+#define ASYNC_HUP_NOTIFY (1U << ASYNCB_HUP_NOTIFY)
+#define ASYNC_FOURPORT (1U << ASYNCB_FOURPORT)
+#define ASYNC_SAK (1U << ASYNCB_SAK)
+#define ASYNC_SPLIT_TERMIOS (1U << ASYNCB_SPLIT_TERMIOS)
+#define ASYNC_SPD_HI (1U << ASYNCB_SPD_HI)
+#define ASYNC_SPD_VHI (1U << ASYNCB_SPD_VHI)
+#define ASYNC_SKIP_TEST (1U << ASYNCB_SKIP_TEST)
+#define ASYNC_AUTO_IRQ (1U << ASYNCB_AUTO_IRQ)
+#define ASYNC_SESSION_LOCKOUT (1U << ASYNCB_SESSION_LOCKOUT)
+#define ASYNC_PGRP_LOCKOUT (1U << ASYNCB_PGRP_LOCKOUT)
+#define ASYNC_CALLOUT_NOHUP (1U << ASYNCB_CALLOUT_NOHUP)
+#define ASYNC_HARDPPS_CD (1U << ASYNCB_HARDPPS_CD)
+#define ASYNC_SPD_SHI (1U << ASYNCB_SPD_SHI)
+#define ASYNC_LOW_LATENCY (1U << ASYNCB_LOW_LATENCY)
+#define ASYNC_BUGGY_UART (1U << ASYNCB_BUGGY_UART)
+#define ASYNC_AUTOPROBE (1U << ASYNCB_AUTOPROBE)
+
+#define ASYNC_FLAGS ((1U << ASYNCB_LAST_USER) - 1)
+#define ASYNC_USR_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI| \
+ ASYNC_CALLOUT_NOHUP|ASYNC_SPD_SHI|ASYNC_LOW_LATENCY)
+#define ASYNC_SPD_CUST (ASYNC_SPD_HI|ASYNC_SPD_VHI)
+#define ASYNC_SPD_WARP (ASYNC_SPD_HI|ASYNC_SPD_SHI)
+#define ASYNC_SPD_MASK (ASYNC_SPD_HI|ASYNC_SPD_VHI|ASYNC_SPD_SHI)
+
+#define ASYNC_INITIALIZED (1U << ASYNCB_INITIALIZED)
+#define ASYNC_NORMAL_ACTIVE (1U << ASYNCB_NORMAL_ACTIVE)
+#define ASYNC_BOOT_AUTOCONF (1U << ASYNCB_BOOT_AUTOCONF)
+#define ASYNC_CLOSING (1U << ASYNCB_CLOSING)
+#define ASYNC_CTS_FLOW (1U << ASYNCB_CTS_FLOW)
+#define ASYNC_CHECK_CD (1U << ASYNCB_CHECK_CD)
+#define ASYNC_SHARE_IRQ (1U << ASYNCB_SHARE_IRQ)
+#define ASYNC_CONS_FLOW (1U << ASYNCB_CONS_FLOW)
+#define ASYNC_BOOT_ONLYMCA (1U << ASYNCB_BOOT_ONLYMCA)
+#define ASYNC_INTERNAL_FLAGS (~((1U << ASYNCB_FIRST_KERNEL) - 1))
/*
* Multiport serial configuration structure --- external structure
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 57a97e5..6fd80c4 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -41,7 +41,8 @@
#define PORT_XSCALE 15
#define PORT_RM9000 16 /* PMC-Sierra RM9xxx internal UART */
#define PORT_OCTEON 17 /* Cavium OCTEON internal UART */
-#define PORT_MAX_8250 17 /* max port ID */
+#define PORT_AR7 18 /* Texas Instruments AR7 internal UART */
+#define PORT_MAX_8250 18 /* max port ID */
/*
* ARM specific type numbers. These are not currently guaranteed
@@ -167,6 +168,9 @@
/* MAX3100 */
#define PORT_MAX3100 86
+/* Timberdale UART */
+#define PORT_TIMBUART 87
+
#ifdef __KERNEL__
#include <linux/compiler.h>
diff --git a/include/linux/serial_sci.h b/include/linux/serial_sci.h
index 893cc53..1c297dd 100644
--- a/include/linux/serial_sci.h
+++ b/include/linux/serial_sci.h
@@ -25,8 +25,7 @@ struct plat_sci_port {
unsigned int irqs[SCIx_NR_IRQS]; /* ERI, RXI, TXI, BRI */
unsigned int type; /* SCI / SCIF / IRDA */
upf_t flags; /* UPF_* flags */
+ char *clk; /* clock string */
};
-int early_sci_setup(struct uart_port *port);
-
#endif /* __LINUX_SERIAL_SCI_H */
diff --git a/include/linux/sh_cmt.h b/include/linux/sh_cmt.h
deleted file mode 100644
index 68cacde..0000000
--- a/include/linux/sh_cmt.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __SH_CMT_H__
-#define __SH_CMT_H__
-
-struct sh_cmt_config {
- char *name;
- unsigned long channel_offset;
- int timer_bit;
- char *clk;
- unsigned long clockevent_rating;
- unsigned long clocksource_rating;
-};
-
-#endif /* __SH_CMT_H__ */
diff --git a/include/linux/sh_timer.h b/include/linux/sh_timer.h
new file mode 100644
index 0000000..864bd56
--- /dev/null
+++ b/include/linux/sh_timer.h
@@ -0,0 +1,13 @@
+#ifndef __SH_TIMER_H__
+#define __SH_TIMER_H__
+
+struct sh_timer_config {
+ char *name;
+ long channel_offset;
+ int timer_bit;
+ char *clk;
+ unsigned long clockevent_rating;
+ unsigned long clocksource_rating;
+};
+
+#endif /* __SH_TIMER_H__ */
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 84f997f..c755283 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -235,6 +235,8 @@ static inline int valid_signal(unsigned long sig)
extern int next_signal(struct sigpending *pending, sigset_t *mask);
extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
+extern long do_rt_tgsigqueueinfo(pid_t tgid, pid_t pid, int sig,
+ siginfo_t *info);
extern long do_sigpending(void __user *, unsigned long);
extern int sigprocmask(int, sigset_t *, sigset_t *);
extern int show_unhandled_signals;
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 24c5602..219b8fb 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -62,6 +62,8 @@
# define SLAB_DEBUG_OBJECTS 0x00000000UL
#endif
+#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */
+
/* The following flags affect the page allocator grouping pages by mobility */
#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
@@ -317,4 +319,6 @@ static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
return kmalloc_node(size, flags | __GFP_ZERO, node);
}
+void __init kmem_cache_init_late(void);
+
#endif /* _LINUX_SLAB_H */
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index 5ac9b0b..713f841 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -14,7 +14,7 @@
#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
#include <linux/compiler.h>
-#include <trace/kmemtrace.h>
+#include <linux/kmemtrace.h>
/* Size description struct for general caches. */
struct cache_sizes {
diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
index 0ec00b3..bb5368d 100644
--- a/include/linux/slob_def.h
+++ b/include/linux/slob_def.h
@@ -34,4 +34,9 @@ static __always_inline void *__kmalloc(size_t size, gfp_t flags)
return kmalloc(size, flags);
}
+static inline void kmem_cache_init_late(void)
+{
+ /* Nothing to do */
+}
+
#endif /* __LINUX_SLOB_DEF_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 5046f90..4dcbc2c 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -10,7 +10,7 @@
#include <linux/gfp.h>
#include <linux/workqueue.h>
#include <linux/kobject.h>
-#include <trace/kmemtrace.h>
+#include <linux/kmemtrace.h>
enum stat_item {
ALLOC_FASTPATH, /* Allocation from cpu slab */
@@ -302,4 +302,6 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
}
#endif
+void __init kmem_cache_init_late(void);
+
#endif /* _LINUX_SLUB_DEF_H */
diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h
index 938234c..d4841ed 100644
--- a/include/linux/spinlock_up.h
+++ b/include/linux/spinlock_up.h
@@ -60,6 +60,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
#define __raw_spin_is_locked(lock) ((void)(lock), 0)
/* for sched.c and kernel_lock.c: */
# define __raw_spin_lock(lock) do { (void)(lock); } while (0)
+# define __raw_spin_lock_flags(lock, flags) do { (void)(lock); } while (0)
# define __raw_spin_unlock(lock) do { (void)(lock); } while (0)
# define __raw_spin_trylock(lock) ({ (void)(lock); 1; })
#endif /* DEBUG_SPINLOCK */
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 5f3faa9..18e7c7c 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -11,8 +11,7 @@
#include <linux/pipe_fs_i.h>
/*
- * splice is tied to pipes as a transport (at least for now), so we'll just
- * add the splice flags here.
+ * Flags passed in from splice/tee/vmsplice
*/
#define SPLICE_F_MOVE (0x01) /* move pages instead of copying */
#define SPLICE_F_NONBLOCK (0x02) /* don't block on the pipe splicing (but */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 795032e..cd15df6 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -245,11 +245,6 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
extern int hibernate(void);
-extern int hibernate_nvs_register(unsigned long start, unsigned long size);
-extern int hibernate_nvs_alloc(void);
-extern void hibernate_nvs_free(void);
-extern void hibernate_nvs_save(void);
-extern void hibernate_nvs_restore(void);
extern bool system_entering_hibernation(void);
#else /* CONFIG_HIBERNATION */
static inline int swsusp_page_is_forbidden(struct page *p) { return 0; }
@@ -258,6 +253,16 @@ static inline void swsusp_unset_page_free(struct page *p) {}
static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
static inline int hibernate(void) { return -ENOSYS; }
+static inline bool system_entering_hibernation(void) { return false; }
+#endif /* CONFIG_HIBERNATION */
+
+#ifdef CONFIG_HIBERNATION_NVS
+extern int hibernate_nvs_register(unsigned long start, unsigned long size);
+extern int hibernate_nvs_alloc(void);
+extern void hibernate_nvs_free(void);
+extern void hibernate_nvs_save(void);
+extern void hibernate_nvs_restore(void);
+#else /* CONFIG_HIBERNATION_NVS */
static inline int hibernate_nvs_register(unsigned long a, unsigned long b)
{
return 0;
@@ -266,8 +271,7 @@ static inline int hibernate_nvs_alloc(void) { return 0; }
static inline void hibernate_nvs_free(void) {}
static inline void hibernate_nvs_save(void) {}
static inline void hibernate_nvs_restore(void) {}
-static inline bool system_entering_hibernation(void) { return false; }
-#endif /* CONFIG_HIBERNATION */
+#endif /* CONFIG_HIBERNATION_NVS */
#ifdef CONFIG_PM_SLEEP
void save_processor_state(void);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 62d8143..d476aad 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -437,6 +437,11 @@ static inline int mem_cgroup_cache_charge_swapin(struct page *page,
return 0;
}
+static inline void
+mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
+{
+}
+
#endif /* CONFIG_SWAP */
#endif /* __KERNEL__*/
#endif /* _LINUX_SWAP_H */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index ac9ff54..cb1a663 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -29,7 +29,8 @@ extern void *swiotlb_alloc(unsigned order, unsigned long nslabs);
extern dma_addr_t swiotlb_phys_to_bus(struct device *hwdev,
phys_addr_t address);
-extern phys_addr_t swiotlb_bus_to_phys(dma_addr_t address);
+extern phys_addr_t swiotlb_bus_to_phys(struct device *hwdev,
+ dma_addr_t address);
extern int swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 3052084..418d90f 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -55,6 +55,7 @@ struct compat_timeval;
struct robust_list_head;
struct getcpu_cache;
struct old_linux_dirent;
+struct perf_counter_attr;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -755,4 +756,8 @@ asmlinkage long sys_pipe(int __user *);
int kernel_execve(const char *filename, char *const argv[], char *const envp[]);
+
+asmlinkage long sys_perf_counter_open(
+ struct perf_counter_attr __user *attr_uptr,
+ pid_t pid, int cpu, int group_fd, unsigned long flags);
#endif
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index e6b820f..a8cc4e1 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -21,13 +21,14 @@ struct restart_block {
struct {
unsigned long arg0, arg1, arg2, arg3;
};
- /* For futex_wait */
+ /* For futex_wait and futex_wait_requeue_pi */
struct {
u32 *uaddr;
u32 val;
u32 flags;
u32 bitset;
u64 time;
+ u32 *uaddr2;
} futex;
/* For nanosleep */
struct {
diff --git a/include/linux/time.h b/include/linux/time.h
index 242f624..ea16c1a 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -113,6 +113,21 @@ struct timespec current_kernel_time(void);
#define CURRENT_TIME (current_kernel_time())
#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
+/* Some architectures do not supply their own clocksource.
+ * This is mainly the case in architectures that get their
+ * inter-tick times by reading the counter on their interval
+ * timer. Since these timers wrap every tick, they're not really
+ * useful as clocksources. Wrapping them to act like one is possible
+ * but not very efficient. So we provide a callout these arches
+ * can implement for use with the jiffies clocksource to provide
+ * finer then tick granular time.
+ */
+#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
+extern u32 arch_gettimeoffset(void);
+#else
+static inline u32 arch_gettimeoffset(void) { return 0; }
+#endif
+
extern void do_gettimeofday(struct timeval *tv);
extern int do_settimeofday(struct timespec *tv);
extern int do_sys_settimeofday(struct timespec *tv, struct timezone *tz);
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
new file mode 100644
index 0000000..c68bccb
--- /dev/null
+++ b/include/linux/trace_seq.h
@@ -0,0 +1,92 @@
+#ifndef _LINUX_TRACE_SEQ_H
+#define _LINUX_TRACE_SEQ_H
+
+#include <linux/fs.h>
+
+/*
+ * Trace sequences are used to allow a function to call several other functions
+ * to create a string of data to use (up to a max of PAGE_SIZE.
+ */
+
+struct trace_seq {
+ unsigned char buffer[PAGE_SIZE];
+ unsigned int len;
+ unsigned int readpos;
+};
+
+static inline void
+trace_seq_init(struct trace_seq *s)
+{
+ s->len = 0;
+ s->readpos = 0;
+}
+
+/*
+ * Currently only defined when tracing is enabled.
+ */
+#ifdef CONFIG_TRACING
+extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+ __attribute__ ((format (printf, 2, 3)));
+extern int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
+ __attribute__ ((format (printf, 2, 0)));
+extern int
+trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
+extern void trace_print_seq(struct seq_file *m, struct trace_seq *s);
+extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
+ size_t cnt);
+extern int trace_seq_puts(struct trace_seq *s, const char *str);
+extern int trace_seq_putc(struct trace_seq *s, unsigned char c);
+extern int trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len);
+extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
+ size_t len);
+extern void *trace_seq_reserve(struct trace_seq *s, size_t len);
+extern int trace_seq_path(struct trace_seq *s, struct path *path);
+
+#else /* CONFIG_TRACING */
+static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
+{
+ return 0;
+}
+static inline int
+trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
+{
+ return 0;
+}
+
+static inline void trace_print_seq(struct seq_file *m, struct trace_seq *s)
+{
+}
+static inline ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
+ size_t cnt)
+{
+ return 0;
+}
+static inline int trace_seq_puts(struct trace_seq *s, const char *str)
+{
+ return 0;
+}
+static inline int trace_seq_putc(struct trace_seq *s, unsigned char c)
+{
+ return 0;
+}
+static inline int
+trace_seq_putmem(struct trace_seq *s, const void *mem, size_t len)
+{
+ return 0;
+}
+static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
+ size_t len)
+{
+ return 0;
+}
+static inline void *trace_seq_reserve(struct trace_seq *s, size_t len)
+{
+ return NULL;
+}
+static inline int trace_seq_path(struct trace_seq *s, struct path *path)
+{
+ return 0;
+}
+#endif /* CONFIG_TRACING */
+
+#endif /* _LINUX_TRACE_SEQ_H */
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index c7aa154..eb96603 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -259,14 +259,12 @@ static inline void tracehook_finish_clone(struct task_struct *child,
/**
* tracehook_report_clone - in parent, new child is about to start running
- * @trace: return value from tracehook_prepare_clone()
* @regs: parent's user register state
* @clone_flags: flags from parent's system call
* @pid: new child's PID in the parent's namespace
* @child: new child task
*
- * Called after a child is set up, but before it has been started
- * running. @trace is the value returned by tracehook_prepare_clone().
+ * Called after a child is set up, but before it has been started running.
* This is not a good place to block, because the child has not started
* yet. Suspend the child here if desired, and then block in
* tracehook_report_clone_complete(). This must prevent the child from
@@ -276,13 +274,14 @@ static inline void tracehook_finish_clone(struct task_struct *child,
*
* Called with no locks held, but the child cannot run until this returns.
*/
-static inline void tracehook_report_clone(int trace, struct pt_regs *regs,
+static inline void tracehook_report_clone(struct pt_regs *regs,
unsigned long clone_flags,
pid_t pid, struct task_struct *child)
{
- if (unlikely(trace) || unlikely(clone_flags & CLONE_PTRACE)) {
+ if (unlikely(task_ptrace(child))) {
/*
- * The child starts up with an immediate SIGSTOP.
+ * It doesn't matter who attached/attaching to this
+ * task, the pending SIGSTOP is right in any case.
*/
sigaddset(&child->pending.signal, SIGSTOP);
set_tsk_thread_flag(child, TIF_SIGPENDING);
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index d35a7ee..14df7e6 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -31,6 +31,8 @@ struct tracepoint {
* Keep in sync with vmlinux.lds.h.
*/
+#ifndef DECLARE_TRACE
+
#define TP_PROTO(args...) args
#define TP_ARGS(args...) args
@@ -114,6 +116,7 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
struct tracepoint *end)
{ }
#endif /* CONFIG_TRACEPOINTS */
+#endif /* DECLARE_TRACE */
/*
* Connect a probe to a tracepoint.
@@ -154,10 +157,8 @@ static inline void tracepoint_synchronize_unregister(void)
}
#define PARAMS(args...) args
-#define TRACE_FORMAT(name, proto, args, fmt) \
- DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
-
+#ifndef TRACE_EVENT
/*
* For use with the TRACE_EVENT macro:
*
@@ -262,5 +263,6 @@ static inline void tracepoint_synchronize_unregister(void)
#define TRACE_EVENT(name, proto, args, struct, assign, print) \
DECLARE_TRACE(name, PARAMS(proto), PARAMS(args))
+#endif
#endif
diff --git a/include/linux/tty.h b/include/linux/tty.h
index fc39db9..1488d8c 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -185,7 +185,7 @@ struct tty_port;
struct tty_port_operations {
/* Return 1 if the carrier is raised */
int (*carrier_raised)(struct tty_port *port);
- void (*raise_dtr_rts)(struct tty_port *port);
+ void (*dtr_rts)(struct tty_port *port, int raise);
};
struct tty_port {
@@ -201,6 +201,9 @@ struct tty_port {
unsigned char *xmit_buf; /* Optional buffer */
int close_delay; /* Close port delay */
int closing_wait; /* Delay for output */
+ int drain_delay; /* Set to zero if no pure time
+ based drain is needed else
+ set to size of fifo */
};
/*
@@ -223,8 +226,11 @@ struct tty_struct {
struct tty_driver *driver;
const struct tty_operations *ops;
int index;
- /* The ldisc objects are protected by tty_ldisc_lock at the moment */
- struct tty_ldisc ldisc;
+
+ /* Protects ldisc changes: Lock tty not pty */
+ struct mutex ldisc_mutex;
+ struct tty_ldisc *ldisc;
+
struct mutex termios_mutex;
spinlock_t ctrl_lock;
/* Termios values are protected by the termios mutex */
@@ -311,6 +317,7 @@ struct tty_struct {
#define TTY_CLOSING 7 /* ->close() in progress */
#define TTY_LDISC 9 /* Line discipline attached */
#define TTY_LDISC_CHANGING 10 /* Line discipline changing */
+#define TTY_LDISC_OPEN 11 /* Line discipline is open */
#define TTY_HW_COOK_OUT 14 /* Hardware can do output cooking */
#define TTY_HW_COOK_IN 15 /* Hardware can do input cooking */
#define TTY_PTY_LOCK 16 /* pty private */
@@ -403,6 +410,7 @@ extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b);
extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
extern void tty_ldisc_deref(struct tty_ldisc *);
extern struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *);
+extern void tty_ldisc_hangup(struct tty_struct *tty);
extern const struct file_operations tty_ldiscs_proc_fops;
extern void tty_wakeup(struct tty_struct *tty);
@@ -425,6 +433,9 @@ extern struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx,
extern void tty_release_dev(struct file *filp);
extern int tty_init_termios(struct tty_struct *tty);
+extern struct tty_struct *tty_pair_get_tty(struct tty_struct *tty);
+extern struct tty_struct *tty_pair_get_pty(struct tty_struct *tty);
+
extern struct mutex tty_mutex;
extern void tty_write_unlock(struct tty_struct *tty);
@@ -438,6 +449,7 @@ extern struct tty_struct *tty_port_tty_get(struct tty_port *port);
extern void tty_port_tty_set(struct tty_port *port, struct tty_struct *tty);
extern int tty_port_carrier_raised(struct tty_port *port);
extern void tty_port_raise_dtr_rts(struct tty_port *port);
+extern void tty_port_lower_dtr_rts(struct tty_port *port);
extern void tty_port_hangup(struct tty_port *port);
extern int tty_port_block_til_ready(struct tty_port *port,
struct tty_struct *tty, struct file *filp);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index bcba84e..3566129 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -127,7 +127,8 @@
* the line discipline are close to full, and it should somehow
* signal that no more characters should be sent to the tty.
*
- * Optional: Always invoke via tty_throttle();
+ * Optional: Always invoke via tty_throttle(), called under the
+ * termios lock.
*
* void (*unthrottle)(struct tty_struct * tty);
*
@@ -135,7 +136,8 @@
* that characters can now be sent to the tty without fear of
* overrunning the input buffers of the line disciplines.
*
- * Optional: Always invoke via tty_unthrottle();
+ * Optional: Always invoke via tty_unthrottle(), called under the
+ * termios lock.
*
* void (*stop)(struct tty_struct *tty);
*
diff --git a/include/linux/ultrasound.h b/include/linux/ultrasound.h
index 6b7703e..71339dc 100644
--- a/include/linux/ultrasound.h
+++ b/include/linux/ultrasound.h
@@ -34,7 +34,7 @@
* _GUS_VOICEOFF - Stops voice (no parameters)
* _GUS_VOICEFADE - Stops the voice smoothly.
* _GUS_VOICEMODE - Alters the voice mode, don't start or stop voice (P1=voice mode)
- * _GUS_VOICEBALA - Sets voice balence (P1, 0=left, 7=middle and 15=right, default 7)
+ * _GUS_VOICEBALA - Sets voice balance (P1, 0=left, 7=middle and 15=right, default 7)
* _GUS_VOICEFREQ - Sets voice (sample) playback frequency (P1=Hz)
* _GUS_VOICEVOL - Sets voice volume (P1=volume, 0xfff=max, 0xeff=half, 0x000=off)
* _GUS_VOICEVOL2 - Sets voice volume (P1=volume, 0xfff=max, 0xeff=half, 0x000=off)
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 625e9e4..8cdfed7 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -224,8 +224,7 @@ struct usb_serial_driver {
/* Called by console with tty = NULL and by tty */
int (*open)(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
- void (*close)(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+ void (*close)(struct usb_serial_port *port);
int (*write)(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
/* Called only by the tty layer */
@@ -241,6 +240,10 @@ struct usb_serial_driver {
int (*tiocmget)(struct tty_struct *tty, struct file *file);
int (*tiocmset)(struct tty_struct *tty, struct file *file,
unsigned int set, unsigned int clear);
+ /* Called by the tty layer for port level work. There may or may not
+ be an attached tty at this point */
+ void (*dtr_rts)(struct usb_serial_port *port, int on);
+ int (*carrier_raised)(struct usb_serial_port *port);
/* USB events */
void (*read_int_callback)(struct urb *urb);
void (*write_int_callback)(struct urb *urb);
@@ -283,8 +286,7 @@ extern int usb_serial_generic_open(struct tty_struct *tty,
struct usb_serial_port *port, struct file *filp);
extern int usb_serial_generic_write(struct tty_struct *tty,
struct usb_serial_port *port, const unsigned char *buf, int count);
-extern void usb_serial_generic_close(struct tty_struct *tty,
- struct usb_serial_port *port, struct file *filp);
+extern void usb_serial_generic_close(struct usb_serial_port *port);
extern int usb_serial_generic_resume(struct usb_serial *serial);
extern int usb_serial_generic_write_room(struct tty_struct *tty);
extern int usb_serial_generic_chars_in_buffer(struct tty_struct *tty);
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 06005fa..4fca4f5 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -10,14 +10,17 @@
/**
* virtqueue - a queue to register buffers for sending or receiving.
+ * @list: the chain of virtqueues for this device
* @callback: the function to call when buffers are consumed (can be NULL).
+ * @name: the name of this virtqueue (mainly for debugging)
* @vdev: the virtio device this queue was created for.
* @vq_ops: the operations for this virtqueue (see below).
* @priv: a pointer for the virtqueue implementation to use.
*/
-struct virtqueue
-{
+struct virtqueue {
+ struct list_head list;
void (*callback)(struct virtqueue *vq);
+ const char *name;
struct virtio_device *vdev;
struct virtqueue_ops *vq_ops;
void *priv;
@@ -76,15 +79,16 @@ struct virtqueue_ops {
* @dev: underlying device.
* @id: the device type identification (used to match it with a driver).
* @config: the configuration ops for this device.
+ * @vqs: the list of virtqueues for this device.
* @features: the features supported by both driver and device.
* @priv: private pointer for the driver's use.
*/
-struct virtio_device
-{
+struct virtio_device {
int index;
struct device dev;
struct virtio_device_id id;
struct virtio_config_ops *config;
+ struct list_head vqs;
/* Note that this is a Linux set_bit-style bitmap. */
unsigned long features[1];
void *priv;
@@ -99,8 +103,7 @@ void unregister_virtio_device(struct virtio_device *dev);
* @id_table: the ids serviced by this driver.
* @feature_table: an array of feature numbers supported by this device.
* @feature_table_size: number of entries in the feature table array.
- * @probe: the function to call when a device is found. Returns a token for
- * remove, or PTR_ERR().
+ * @probe: the function to call when a device is found. Returns 0 or -errno.
* @remove: the function when a device is removed.
* @config_changed: optional function to call when the device configuration
* changes; may be called in interrupt context.
diff --git a/include/linux/virtio_blk.h b/include/linux/virtio_blk.h
index 94c56d2..be7d255 100644
--- a/include/linux/virtio_blk.h
+++ b/include/linux/virtio_blk.h
@@ -15,6 +15,10 @@
#define VIRTIO_BLK_F_GEOMETRY 4 /* Legacy geometry available */
#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */
#define VIRTIO_BLK_F_BLK_SIZE 6 /* Block size of disk is available*/
+#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
+#define VIRTIO_BLK_F_IDENTIFY 8 /* ATA IDENTIFY supported */
+
+#define VIRTIO_BLK_ID_BYTES (sizeof(__u16[256])) /* IDENTIFY DATA */
struct virtio_blk_config
{
@@ -32,6 +36,7 @@ struct virtio_blk_config
} geometry;
/* block size of device (if VIRTIO_BLK_F_BLK_SIZE) */
__u32 blk_size;
+ __u8 identify[VIRTIO_BLK_ID_BYTES];
} __attribute__((packed));
/* These two define direction. */
@@ -55,6 +60,13 @@ struct virtio_blk_outhdr
__u64 sector;
};
+struct virtio_scsi_inhdr {
+ __u32 errors;
+ __u32 data_len;
+ __u32 sense_len;
+ __u32 residual;
+};
+
/* And this is the final byte of the write scatter-gather list. */
#define VIRTIO_BLK_S_OK 0
#define VIRTIO_BLK_S_IOERR 1
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index bf8ec28..99f5145 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -29,6 +29,7 @@
#define VIRTIO_F_NOTIFY_ON_EMPTY 24
#ifdef __KERNEL__
+#include <linux/err.h>
#include <linux/virtio.h>
/**
@@ -49,15 +50,26 @@
* @set_status: write the status byte
* vdev: the virtio_device
* status: the new status byte
+ * @request_vqs: request the specified number of virtqueues
+ * vdev: the virtio_device
+ * max_vqs: the max number of virtqueues we want
+ * If supplied, must call before any virtqueues are instantiated.
+ * To modify the max number of virtqueues after request_vqs has been
+ * called, call free_vqs and then request_vqs with a new value.
+ * @free_vqs: cleanup resources allocated by request_vqs
+ * vdev: the virtio_device
+ * If supplied, must call after all virtqueues have been deleted.
* @reset: reset the device
* vdev: the virtio device
* After this, status and feature negotiation must be done again
- * @find_vq: find a virtqueue and instantiate it.
+ * @find_vqs: find virtqueues and instantiate them.
* vdev: the virtio_device
- * index: the 0-based virtqueue number in case there's more than one.
- * callback: the virqtueue callback
- * Returns the new virtqueue or ERR_PTR() (eg. -ENOENT).
- * @del_vq: free a virtqueue found by find_vq().
+ * nvqs: the number of virtqueues to find
+ * vqs: on success, includes new virtqueues
+ * callbacks: array of callbacks, for each virtqueue
+ * names: array of virtqueue names (mainly for debugging)
+ * Returns 0 on success or error status
+ * @del_vqs: free virtqueues found by find_vqs().
* @get_features: get the array of feature bits for this device.
* vdev: the virtio_device
* Returns the first 32 feature bits (all we currently need).
@@ -66,6 +78,7 @@
* This gives the final feature bits for the device: it can change
* the dev->feature bits if it wants.
*/
+typedef void vq_callback_t(struct virtqueue *);
struct virtio_config_ops
{
void (*get)(struct virtio_device *vdev, unsigned offset,
@@ -75,10 +88,11 @@ struct virtio_config_ops
u8 (*get_status)(struct virtio_device *vdev);
void (*set_status)(struct virtio_device *vdev, u8 status);
void (*reset)(struct virtio_device *vdev);
- struct virtqueue *(*find_vq)(struct virtio_device *vdev,
- unsigned index,
- void (*callback)(struct virtqueue *));
- void (*del_vq)(struct virtqueue *vq);
+ int (*find_vqs)(struct virtio_device *, unsigned nvqs,
+ struct virtqueue *vqs[],
+ vq_callback_t *callbacks[],
+ const char *names[]);
+ void (*del_vqs)(struct virtio_device *);
u32 (*get_features)(struct virtio_device *vdev);
void (*finalize_features)(struct virtio_device *vdev);
};
@@ -99,7 +113,9 @@ static inline bool virtio_has_feature(const struct virtio_device *vdev,
if (__builtin_constant_p(fbit))
BUILD_BUG_ON(fbit >= 32);
- virtio_check_driver_offered_feature(vdev, fbit);
+ if (fbit < VIRTIO_TRANSPORT_F_START)
+ virtio_check_driver_offered_feature(vdev, fbit);
+
return test_bit(fbit, vdev->features);
}
@@ -126,5 +142,18 @@ static inline int virtio_config_buf(struct virtio_device *vdev,
vdev->config->get(vdev, offset, buf, len);
return 0;
}
+
+static inline
+struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
+ vq_callback_t *c, const char *n)
+{
+ vq_callback_t *callbacks[] = { c };
+ const char *names[] = { n };
+ struct virtqueue *vq;
+ int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names);
+ if (err < 0)
+ return ERR_PTR(err);
+ return vq;
+}
#endif /* __KERNEL__ */
#endif /* _LINUX_VIRTIO_CONFIG_H */
diff --git a/include/linux/virtio_pci.h b/include/linux/virtio_pci.h
index cd0fd5d..9a3d7c4 100644
--- a/include/linux/virtio_pci.h
+++ b/include/linux/virtio_pci.h
@@ -47,9 +47,17 @@
/* The bit of the ISR which indicates a device configuration change. */
#define VIRTIO_PCI_ISR_CONFIG 0x2
+/* MSI-X registers: only enabled if MSI-X is enabled. */
+/* A 16-bit vector for configuration changes. */
+#define VIRTIO_MSI_CONFIG_VECTOR 20
+/* A 16-bit vector for selected queue notifications. */
+#define VIRTIO_MSI_QUEUE_VECTOR 22
+/* Vector value used to disable MSI for queue */
+#define VIRTIO_MSI_NO_VECTOR 0xffff
+
/* The remaining space is defined by each driver as the per-driver
* configuration space */
-#define VIRTIO_PCI_CONFIG 20
+#define VIRTIO_PCI_CONFIG(dev) ((dev)->msix_enabled ? 24 : 20)
/* Virtio ABI version, this must match exactly */
#define VIRTIO_PCI_ABI_VERSION 0
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index 71e0372..693e0ec 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -14,6 +14,8 @@
#define VRING_DESC_F_NEXT 1
/* This marks a buffer as write-only (otherwise read-only). */
#define VRING_DESC_F_WRITE 2
+/* This means the buffer contains a list of buffer descriptors. */
+#define VRING_DESC_F_INDIRECT 4
/* The Host uses this in used->flags to advise the Guest: don't kick me when
* you add a buffer. It's unreliable, so it's simply an optimization. Guest
@@ -24,6 +26,9 @@
* optimization. */
#define VRING_AVAIL_F_NO_INTERRUPT 1
+/* We support indirect buffer descriptors */
+#define VIRTIO_RING_F_INDIRECT_DESC 28
+
/* Virtio ring descriptors: 16 bytes. These can chain together via "next". */
struct vring_desc
{
@@ -119,7 +124,8 @@ struct virtqueue *vring_new_virtqueue(unsigned int num,
struct virtio_device *vdev,
void *pages,
void (*notify)(struct virtqueue *vq),
- void (*callback)(struct virtqueue *vq));
+ void (*callback)(struct virtqueue *vq),
+ const char *name);
void vring_del_virtqueue(struct virtqueue *vq);
/* Filter out transport-specific feature bits. */
void vring_transport_features(struct virtio_device *vdev);
diff --git a/include/linux/wait.h b/include/linux/wait.h
index bc02463..6788e1a 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -132,8 +132,6 @@ static inline void __remove_wait_queue(wait_queue_head_t *head,
list_del(&old->task_list);
}
-void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
- int nr_exclusive, int sync, void *key);
void __wake_up(wait_queue_head_t *q, unsigned int mode, int nr, void *key);
void __wake_up_locked_key(wait_queue_head_t *q, unsigned int mode, void *key);
void __wake_up_sync_key(wait_queue_head_t *q, unsigned int mode, int nr,
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index 9344547..3224820 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -79,7 +79,6 @@ struct writeback_control {
void writeback_inodes(struct writeback_control *wbc);
int inode_wait(void *);
void sync_inodes_sb(struct super_block *, int wait);
-void sync_inodes(int wait);
/* writeback.h requires fs.h; it, too, is not included from here. */
static inline void wait_on_inode(struct inode *inode)
diff --git a/include/scsi/fc/fc_fip.h b/include/scsi/fc/fc_fip.h
index 0627a9a..3d138c1 100644
--- a/include/scsi/fc/fc_fip.h
+++ b/include/scsi/fc/fc_fip.h
@@ -22,13 +22,6 @@
* http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf
*/
-/*
- * The FIP ethertype eventually goes in net/if_ether.h.
- */
-#ifndef ETH_P_FIP
-#define ETH_P_FIP 0x8914 /* FIP Ethertype */
-#endif
-
#define FIP_DEF_PRI 128 /* default selection priority */
#define FIP_DEF_FC_MAP 0x0efc00 /* default FCoE MAP (MAC OUI) value */
#define FIP_DEF_FKA 8000 /* default FCF keep-alive/advert period (mS) */
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index d0ed522..4426f00 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -22,6 +22,11 @@
#define ISCSI_IF_H
#include <scsi/iscsi_proto.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+
+#define ISCSI_NL_GRP_ISCSID 1
+#define ISCSI_NL_GRP_UIP 2
#define UEVENT_BASE 10
#define KEVENT_BASE 100
@@ -50,7 +55,10 @@ enum iscsi_uevent_e {
ISCSI_UEVENT_TGT_DSCVR = UEVENT_BASE + 15,
ISCSI_UEVENT_SET_HOST_PARAM = UEVENT_BASE + 16,
ISCSI_UEVENT_UNBIND_SESSION = UEVENT_BASE + 17,
- ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18,
+ ISCSI_UEVENT_CREATE_BOUND_SESSION = UEVENT_BASE + 18,
+ ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST = UEVENT_BASE + 19,
+
+ ISCSI_UEVENT_PATH_UPDATE = UEVENT_BASE + 20,
/* up events */
ISCSI_KEVENT_RECV_PDU = KEVENT_BASE + 1,
@@ -59,6 +67,9 @@ enum iscsi_uevent_e {
ISCSI_KEVENT_DESTROY_SESSION = KEVENT_BASE + 4,
ISCSI_KEVENT_UNBIND_SESSION = KEVENT_BASE + 5,
ISCSI_KEVENT_CREATE_SESSION = KEVENT_BASE + 6,
+
+ ISCSI_KEVENT_PATH_REQ = KEVENT_BASE + 7,
+ ISCSI_KEVENT_IF_DOWN = KEVENT_BASE + 8,
};
enum iscsi_tgt_dscvr {
@@ -131,6 +142,10 @@ struct iscsi_uevent {
struct msg_transport_connect {
uint32_t non_blocking;
} ep_connect;
+ struct msg_transport_connect_through_host {
+ uint32_t host_no;
+ uint32_t non_blocking;
+ } ep_connect_through_host;
struct msg_transport_poll {
uint64_t ep_handle;
uint32_t timeout_ms;
@@ -154,6 +169,9 @@ struct iscsi_uevent {
uint32_t param; /* enum iscsi_host_param */
uint32_t len;
} set_host_param;
+ struct msg_set_path {
+ uint32_t host_no;
+ } set_path;
} u;
union {
/* messages k -> u */
@@ -187,10 +205,39 @@ struct iscsi_uevent {
struct msg_transport_connect_ret {
uint64_t handle;
} ep_connect_ret;
+ struct msg_req_path {
+ uint32_t host_no;
+ } req_path;
+ struct msg_notify_if_down {
+ uint32_t host_no;
+ } notify_if_down;
} r;
} __attribute__ ((aligned (sizeof(uint64_t))));
/*
+ * To keep the struct iscsi_uevent size the same for userspace code
+ * compatibility, the main structure for ISCSI_UEVENT_PATH_UPDATE and
+ * ISCSI_KEVENT_PATH_REQ is defined separately and comes after the
+ * struct iscsi_uevent in the NETLINK_ISCSI message.
+ */
+struct iscsi_path {
+ uint64_t handle;
+ uint8_t mac_addr[6];
+ uint8_t mac_addr_old[6];
+ uint32_t ip_addr_len; /* 4 or 16 */
+ union {
+ struct in_addr v4_addr;
+ struct in6_addr v6_addr;
+ } src;
+ union {
+ struct in_addr v4_addr;
+ struct in6_addr v6_addr;
+ } dst;
+ uint16_t vlan_id;
+ uint16_t pmtu;
+} __attribute__ ((aligned (sizeof(uint64_t))));
+
+/*
* Common error codes
*/
enum iscsi_err {
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 45f9cc6..ebdd9f4 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -679,6 +679,7 @@ struct fc_lport {
unsigned int e_d_tov;
unsigned int r_a_tov;
u8 max_retry_count;
+ u8 max_rport_retry_count;
u16 link_speed;
u16 link_supported_speeds;
u16 lro_xid; /* max xid for fcoe lro */
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 0289f57..196525c 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -82,9 +82,12 @@ enum {
enum {
+ ISCSI_TASK_FREE,
ISCSI_TASK_COMPLETED,
ISCSI_TASK_PENDING,
ISCSI_TASK_RUNNING,
+ ISCSI_TASK_ABRT_TMF, /* aborted due to TMF */
+ ISCSI_TASK_ABRT_SESS_RECOV, /* aborted due to session recovery */
};
struct iscsi_r2t_info {
@@ -181,9 +184,7 @@ struct iscsi_conn {
/* xmit */
struct list_head mgmtqueue; /* mgmt (control) xmit queue */
- struct list_head mgmt_run_list; /* list of control tasks */
- struct list_head xmitqueue; /* data-path cmd queue */
- struct list_head run_list; /* list of cmds in progress */
+ struct list_head cmdqueue; /* data-path cmd queue */
struct list_head requeue; /* tasks needing another run */
struct work_struct xmitwork; /* per-conn. xmit workqueue */
unsigned long suspend_tx; /* suspend Tx */
@@ -406,6 +407,7 @@ extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
char *, int);
extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
+extern struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *, itt_t);
extern void iscsi_requeue_task(struct iscsi_task *task);
extern void iscsi_put_task(struct iscsi_task *task);
extern void __iscsi_get_task(struct iscsi_task *task);
diff --git a/include/scsi/osd_attributes.h b/include/scsi/osd_attributes.h
index f888a6f..56e920a 100644
--- a/include/scsi/osd_attributes.h
+++ b/include/scsi/osd_attributes.h
@@ -29,6 +29,7 @@ enum {
OSD_APAGE_PARTITION_INFORMATION = OSD_APAGE_PARTITION_FIRST + 1,
OSD_APAGE_PARTITION_QUOTAS = OSD_APAGE_PARTITION_FIRST + 2,
OSD_APAGE_PARTITION_TIMESTAMP = OSD_APAGE_PARTITION_FIRST + 3,
+ OSD_APAGE_PARTITION_ATTR_ACCESS = OSD_APAGE_PARTITION_FIRST + 4,
OSD_APAGE_PARTITION_SECURITY = OSD_APAGE_PARTITION_FIRST + 5,
OSD_APAGE_PARTITION_LAST = 0x5FFFFFFF,
@@ -51,7 +52,9 @@ enum {
OSD_APAGE_RESERVED_TYPE_LAST = 0xEFFFFFFF,
OSD_APAGE_COMMON_FIRST = 0xF0000000,
- OSD_APAGE_COMMON_LAST = 0xFFFFFFFE,
+ OSD_APAGE_COMMON_LAST = 0xFFFFFFFD,
+
+ OSD_APAGE_CURRENT_COMMAND = 0xFFFFFFFE,
OSD_APAGE_REQUEST_ALL = 0xFFFFFFFF,
};
@@ -106,10 +109,30 @@ enum {
OSD_ATTR_RI_PRODUCT_REVISION_LEVEL = 0x7, /* 4 */
OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER = 0x8, /* variable */
OSD_ATTR_RI_OSD_NAME = 0x9, /* variable */
+ OSD_ATTR_RI_MAX_CDB_CONTINUATION_LEN = 0xA, /* 4 */
OSD_ATTR_RI_TOTAL_CAPACITY = 0x80, /* 8 */
OSD_ATTR_RI_USED_CAPACITY = 0x81, /* 8 */
OSD_ATTR_RI_NUMBER_OF_PARTITIONS = 0xC0, /* 8 */
OSD_ATTR_RI_CLOCK = 0x100, /* 6 */
+ OARI_DEFAULT_ISOLATION_METHOD = 0X110, /* 1 */
+ OARI_SUPPORTED_ISOLATION_METHODS = 0X111, /* 32 */
+
+ OARI_DATA_ATOMICITY_GUARANTEE = 0X120, /* 8 */
+ OARI_DATA_ATOMICITY_ALIGNMENT = 0X121, /* 8 */
+ OARI_ATTRIBUTES_ATOMICITY_GUARANTEE = 0X122, /* 8 */
+ OARI_DATA_ATTRIBUTES_ATOMICITY_MULTIPLIER = 0X123, /* 1 */
+
+ OARI_MAXIMUM_SNAPSHOTS_COUNT = 0X1C1, /* 0 or 4 */
+ OARI_MAXIMUM_CLONES_COUNT = 0X1C2, /* 0 or 4 */
+ OARI_MAXIMUM_BRANCH_DEPTH = 0X1CC, /* 0 or 4 */
+ OARI_SUPPORTED_OBJECT_DUPLICATION_METHOD_FIRST = 0X200, /* 0 or 4 */
+ OARI_SUPPORTED_OBJECT_DUPLICATION_METHOD_LAST = 0X2ff, /* 0 or 4 */
+ OARI_SUPPORTED_TIME_OF_DUPLICATION_METHOD_FIRST = 0X300, /* 0 or 4 */
+ OARI_SUPPORTED_TIME_OF_DUPLICATION_METHOD_LAST = 0X30F, /* 0 or 4 */
+ OARI_SUPPORT_FOR_DUPLICATED_OBJECT_FREEZING = 0X310, /* 0 or 4 */
+ OARI_SUPPORT_FOR_SNAPSHOT_REFRESHING = 0X311, /* 0 or 1 */
+ OARI_SUPPORTED_CDB_CONTINUATION_DESC_TYPE_FIRST = 0X7000001,/* 0 or 4 */
+ OARI_SUPPORTED_CDB_CONTINUATION_DESC_TYPE_LAST = 0X700FFFF,/* 0 or 4 */
};
/* Root_Information_attributes_page does not have a get_page structure */
@@ -120,7 +143,15 @@ enum {
OSD_ATTR_PI_PARTITION_ID = 0x1, /* 8 */
OSD_ATTR_PI_USERNAME = 0x9, /* variable */
OSD_ATTR_PI_USED_CAPACITY = 0x81, /* 8 */
+ OSD_ATTR_PI_USED_CAPACITY_INCREMENT = 0x84, /* 0 or 8 */
OSD_ATTR_PI_NUMBER_OF_OBJECTS = 0xC1, /* 8 */
+
+ OSD_ATTR_PI_ACTUAL_DATA_SPACE = 0xD1, /* 0 or 8 */
+ OSD_ATTR_PI_RESERVED_DATA_SPACE = 0xD2, /* 0 or 8 */
+ OSD_ATTR_PI_DEFAULT_SNAPSHOT_DUPLICATION_METHOD = 0x200,/* 0 or 4 */
+ OSD_ATTR_PI_DEFAULT_CLONE_DUPLICATION_METHOD = 0x201,/* 0 or 4 */
+ OSD_ATTR_PI_DEFAULT_SP_TIME_OF_DUPLICATION = 0x300,/* 0 or 4 */
+ OSD_ATTR_PI_DEFAULT_CLONE_TIME_OF_DUPLICATION = 0x301,/* 0 or 4 */
};
/* Partition Information attributes page does not have a get_page structure */
@@ -131,6 +162,7 @@ enum {
OSD_ATTR_CI_PARTITION_ID = 0x1, /* 8 */
OSD_ATTR_CI_COLLECTION_OBJECT_ID = 0x2, /* 8 */
OSD_ATTR_CI_USERNAME = 0x9, /* variable */
+ OSD_ATTR_CI_COLLECTION_TYPE = 0xA, /* 1 */
OSD_ATTR_CI_USED_CAPACITY = 0x81, /* 8 */
};
/* Collection Information attributes page does not have a get_page structure */
@@ -144,6 +176,8 @@ enum {
OSD_ATTR_OI_USERNAME = 0x9, /* variable */
OSD_ATTR_OI_USED_CAPACITY = 0x81, /* 8 */
OSD_ATTR_OI_LOGICAL_LENGTH = 0x82, /* 8 */
+ SD_ATTR_OI_ACTUAL_DATA_SPACE = 0XD1, /* 0 OR 8 */
+ SD_ATTR_OI_RESERVED_DATA_SPACE = 0XD2, /* 0 OR 8 */
};
/* Object Information attributes page does not have a get_page structure */
@@ -248,7 +282,18 @@ struct object_timestamps_attributes_page {
struct osd_timestamp data_modified_time;
} __packed;
-/* 7.1.2.19 Collections attributes page */
+/* OSD2r05: 7.1.3.19 Attributes Access attributes page
+ * (OSD_APAGE_PARTITION_ATTR_ACCESS)
+ *
+ * each attribute is of the form below. Total array length is deduced
+ * from the attribute's length
+ * (See allowed_attributes_access of the struct osd_cap_object_descriptor)
+ */
+struct attributes_access_attr {
+ struct osd_attributes_list_attrid attr_list[0];
+} __packed;
+
+/* OSD2r05: 7.1.2.21 Collections attributes page */
/* TBD */
/* 7.1.2.20 Root Policy/Security attributes page (OSD_APAGE_ROOT_SECURITY) */
@@ -324,4 +369,29 @@ struct object_security_attributes_page {
__be32 policy_access_tag;
} __packed;
+/* OSD2r05: 7.1.3.31 Current Command attributes page
+ * (OSD_APAGE_CURRENT_COMMAND)
+ */
+enum {
+ OSD_ATTR_CC_RESPONSE_INTEGRITY_CHECK_VALUE = 0x1, /* 32 */
+ OSD_ATTR_CC_OBJECT_TYPE = 0x2, /* 1 */
+ OSD_ATTR_CC_PARTITION_ID = 0x3, /* 8 */
+ OSD_ATTR_CC_OBJECT_ID = 0x4, /* 8 */
+ OSD_ATTR_CC_STARTING_BYTE_ADDRESS_OF_APPEND = 0x5, /* 8 */
+ OSD_ATTR_CC_CHANGE_IN_USED_CAPACITY = 0x6, /* 8 */
+};
+
+/*TBD: osdv1_current_command_attributes_page */
+
+struct osdv2_current_command_attributes_page {
+ struct osd_attr_page_header hdr; /* id=0xFFFFFFFE, size=0x44 */
+ u8 response_integrity_check_value[OSD_CRYPTO_KEYID_SIZE];
+ u8 object_type;
+ u8 reserved[3];
+ __be64 partition_id;
+ __be64 object_id;
+ __be64 starting_byte_address_of_append;
+ __be64 change_in_used_capacity;
+};
+
#endif /*ndef __OSD_ATTRIBUTES_H__*/
diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h
index b24d961..02bd9f7 100644
--- a/include/scsi/osd_initiator.h
+++ b/include/scsi/osd_initiator.h
@@ -18,6 +18,7 @@
#include "osd_types.h"
#include <linux/blkdev.h>
+#include <scsi/scsi_device.h>
/* Note: "NI" in comments below means "Not Implemented yet" */
@@ -47,6 +48,7 @@ enum osd_std_version {
*/
struct osd_dev {
struct scsi_device *scsi_device;
+ struct file *file;
unsigned def_timeout;
#ifdef OSD_VER1_SUPPORT
@@ -69,6 +71,10 @@ void osd_dev_fini(struct osd_dev *od);
/* some hi level device operations */
int osd_auto_detect_ver(struct osd_dev *od, void *caps); /* GFP_KERNEL */
+static inline struct request_queue *osd_request_queue(struct osd_dev *od)
+{
+ return od->scsi_device->request_queue;
+}
/* we might want to use function vector in the future */
static inline void osd_dev_set_ver(struct osd_dev *od, enum osd_std_version v)
@@ -363,7 +369,9 @@ void osd_req_create_object(struct osd_request *or, struct osd_obj_id *);
void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *);
void osd_req_write(struct osd_request *or,
- const struct osd_obj_id *, struct bio *data_out, u64 offset);
+ const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len);
+int osd_req_write_kern(struct osd_request *or,
+ const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
void osd_req_append(struct osd_request *or,
const struct osd_obj_id *, struct bio *data_out);/* NI */
void osd_req_create_write(struct osd_request *or,
@@ -378,7 +386,9 @@ void osd_req_flush_object(struct osd_request *or,
/*V2*/ u64 offset, /*V2*/ u64 len);
void osd_req_read(struct osd_request *or,
- const struct osd_obj_id *, struct bio *data_in, u64 offset);
+ const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len);
+int osd_req_read_kern(struct osd_request *or,
+ const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
/*
* Root/Partition/Collection/Object Attributes commands
diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h
index 62b2ab8..2cc8e8b 100644
--- a/include/scsi/osd_protocol.h
+++ b/include/scsi/osd_protocol.h
@@ -303,7 +303,15 @@ enum osd_service_actions {
OSD_ACT_V2(REMOVE_MEMBER_OBJECTS, 0x21)
OSD_ACT_V2(GET_MEMBER_ATTRIBUTES, 0x22)
OSD_ACT_V2(SET_MEMBER_ATTRIBUTES, 0x23)
+
+ OSD_ACT_V2(CREATE_CLONE, 0x28)
+ OSD_ACT_V2(CREATE_SNAPSHOT, 0x29)
+ OSD_ACT_V2(DETACH_CLONE, 0x2A)
+ OSD_ACT_V2(REFRESH_SNAPSHOT_CLONE, 0x2B)
+ OSD_ACT_V2(RESTORE_PARTITION_FROM_SNAPSHOT, 0x2C)
+
OSD_ACT_V2(READ_MAP, 0x31)
+ OSD_ACT_V2(READ_MAPS_COMPARE, 0x32)
OSD_ACT_V1_V2(PERFORM_SCSI_COMMAND, 0x8F7E, 0x8F7C)
OSD_ACT_V1_V2(SCSI_TASK_MANAGEMENT, 0x8F7F, 0x8F7D)
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 43b50d3..3878d1d 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -270,7 +270,7 @@ static inline unsigned char scsi_get_prot_type(struct scsi_cmnd *scmd)
static inline sector_t scsi_get_lba(struct scsi_cmnd *scmd)
{
- return scmd->request->sector;
+ return blk_rq_pos(scmd->request);
}
static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index c9184f7..68a8d87 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -680,7 +680,7 @@ fc_remote_port_chkready(struct fc_rport *rport)
if (rport->roles & FC_PORT_ROLE_FCP_TARGET)
result = 0;
else if (rport->flags & FC_RPORT_DEVLOSS_PENDING)
- result = DID_TRANSPORT_DISRUPTED << 16;
+ result = DID_IMM_RETRY << 16;
else
result = DID_NO_CONNECT << 16;
break;
@@ -688,7 +688,7 @@ fc_remote_port_chkready(struct fc_rport *rport)
if (rport->flags & FC_RPORT_FAST_FAIL_TIMEDOUT)
result = DID_TRANSPORT_FAILFAST << 16;
else
- result = DID_TRANSPORT_DISRUPTED << 16;
+ result = DID_IMM_RETRY << 16;
break;
default:
result = DID_NO_CONNECT << 16;
diff --git a/include/scsi/scsi_transport_iscsi.h b/include/scsi/scsi_transport_iscsi.h
index 457588e..349c7f3 100644
--- a/include/scsi/scsi_transport_iscsi.h
+++ b/include/scsi/scsi_transport_iscsi.h
@@ -126,12 +126,14 @@ struct iscsi_transport {
int *index, int *age);
void (*session_recovery_timedout) (struct iscsi_cls_session *session);
- struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr,
+ struct iscsi_endpoint *(*ep_connect) (struct Scsi_Host *shost,
+ struct sockaddr *dst_addr,
int non_blocking);
int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
void (*ep_disconnect) (struct iscsi_endpoint *ep);
int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
uint32_t enable, struct sockaddr *dst_addr);
+ int (*set_path) (struct Scsi_Host *shost, struct iscsi_path *params);
};
/*
@@ -148,6 +150,10 @@ extern void iscsi_conn_error_event(struct iscsi_cls_conn *conn,
extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
char *data, uint32_t data_size);
+extern int iscsi_offload_mesg(struct Scsi_Host *shost,
+ struct iscsi_transport *transport, uint32_t type,
+ char *data, uint16_t data_size);
+
struct iscsi_cls_conn {
struct list_head conn_list; /* item in connlist */
void *dd_data; /* LLD private data */
diff --git a/include/sound/asound.h b/include/sound/asound.h
index 6add80f..82aed3f 100644
--- a/include/sound/asound.h
+++ b/include/sound/asound.h
@@ -255,6 +255,7 @@ typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_INFO_HALF_DUPLEX 0x00100000 /* only half duplex */
#define SNDRV_PCM_INFO_JOINT_DUPLEX 0x00200000 /* playback and capture stream are somewhat correlated */
#define SNDRV_PCM_INFO_SYNC_START 0x00400000 /* pcm support some kind of sync go */
+#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
typedef int __bitwise snd_pcm_state_t;
#define SNDRV_PCM_STATE_OPEN ((__force snd_pcm_state_t) 0) /* stream is open */
diff --git a/include/sound/core.h b/include/sound/core.h
index 3dea798..309cb96 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -300,19 +300,10 @@ int snd_card_create(int idx, const char *id,
struct module *module, int extra_size,
struct snd_card **card_ret);
-static inline __deprecated
-struct snd_card *snd_card_new(int idx, const char *id,
- struct module *module, int extra_size)
-{
- struct snd_card *card;
- if (snd_card_create(idx, id, module, extra_size, &card) < 0)
- return NULL;
- return card;
-}
-
int snd_card_disconnect(struct snd_card *card);
int snd_card_free(struct snd_card *card);
int snd_card_free_when_closed(struct snd_card *card);
+void snd_card_set_id(struct snd_card *card, const char *id);
int snd_card_register(struct snd_card *card);
int snd_card_info_init(void);
int snd_card_info_done(void);
diff --git a/include/sound/driver.h b/include/sound/driver.h
deleted file mode 100644
index f035943..0000000
--- a/include/sound/driver.h
+++ /dev/null
@@ -1 +0,0 @@
-#warning "This file is deprecated"
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index c172968..2389352 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -98,6 +98,7 @@ struct snd_pcm_ops {
#define SNDRV_PCM_IOCTL1_INFO 1
#define SNDRV_PCM_IOCTL1_CHANNEL_INFO 2
#define SNDRV_PCM_IOCTL1_GSTATE 3
+#define SNDRV_PCM_IOCTL1_FIFO_SIZE 4
#define SNDRV_PCM_TRIGGER_STOP 0
#define SNDRV_PCM_TRIGGER_START 1
@@ -270,6 +271,7 @@ struct snd_pcm_runtime {
snd_pcm_uframes_t hw_ptr_base; /* Position at buffer restart */
snd_pcm_uframes_t hw_ptr_interrupt; /* Position at interrupt time */
unsigned long hw_ptr_jiffies; /* Time when hw_ptr is updated */
+ snd_pcm_sframes_t delay; /* extra delay; typically FIFO size */
/* -- HW params -- */
snd_pcm_access_t access; /* access mode */
@@ -486,80 +488,6 @@ void snd_pcm_detach_substream(struct snd_pcm_substream *substream);
void snd_pcm_vma_notify_data(void *client, void *data);
int snd_pcm_mmap_data(struct snd_pcm_substream *substream, struct file *file, struct vm_area_struct *area);
-#if BITS_PER_LONG >= 64
-
-static inline void div64_32(u_int64_t *n, u_int32_t div, u_int32_t *rem)
-{
- *rem = *n % div;
- *n /= div;
-}
-
-#elif defined(i386)
-
-static inline void div64_32(u_int64_t *n, u_int32_t div, u_int32_t *rem)
-{
- u_int32_t low, high;
- low = *n & 0xffffffff;
- high = *n >> 32;
- if (high) {
- u_int32_t high1 = high % div;
- high /= div;
- asm("divl %2":"=a" (low), "=d" (*rem):"rm" (div), "a" (low), "d" (high1));
- *n = (u_int64_t)high << 32 | low;
- } else {
- *n = low / div;
- *rem = low % div;
- }
-}
-#else
-
-static inline void divl(u_int32_t high, u_int32_t low,
- u_int32_t div,
- u_int32_t *q, u_int32_t *r)
-{
- u_int64_t n = (u_int64_t)high << 32 | low;
- u_int64_t d = (u_int64_t)div << 31;
- u_int32_t q1 = 0;
- int c = 32;
- while (n > 0xffffffffU) {
- q1 <<= 1;
- if (n >= d) {
- n -= d;
- q1 |= 1;
- }
- d >>= 1;
- c--;
- }
- q1 <<= c;
- if (n) {
- low = n;
- *q = q1 | (low / div);
- *r = low % div;
- } else {
- *r = 0;
- *q = q1;
- }
- return;
-}
-
-static inline void div64_32(u_int64_t *n, u_int32_t div, u_int32_t *rem)
-{
- u_int32_t low, high;
- low = *n & 0xffffffff;
- high = *n >> 32;
- if (high) {
- u_int32_t high1 = high % div;
- u_int32_t low1 = low;
- high /= div;
- divl(high1, low1, div, &low, rem);
- *n = (u_int64_t)high << 32 | low;
- } else {
- *n = low / div;
- *rem = low % div;
- }
-}
-#endif
-
/*
* PCM library
*/
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 1367647..352d7ee 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -45,24 +45,6 @@ struct snd_pcm_substream;
#define SND_SOC_DAIFMT_GATED (1 << 4) /* clock is gated */
/*
- * DAI Left/Right Clocks.
- *
- * Specifies whether the DAI can support different samples for similtanious
- * playback and capture. This usually requires a seperate physical frame
- * clock for playback and capture.
- */
-#define SND_SOC_DAIFMT_SYNC (0 << 5) /* Tx FRM = Rx FRM */
-#define SND_SOC_DAIFMT_ASYNC (1 << 5) /* Tx FRM ~ Rx FRM */
-
-/*
- * TDM
- *
- * Time Division Multiplexing. Allows PCM data to be multplexed with other
- * data on the DAI.
- */
-#define SND_SOC_DAIFMT_TDM (1 << 6)
-
-/*
* DAI hardware signal inversions.
*
* Specifies whether the DAI can also support inverted clocks for the specified
@@ -96,6 +78,10 @@ struct snd_pcm_substream;
#define SND_SOC_CLOCK_IN 0
#define SND_SOC_CLOCK_OUT 1
+#define SND_SOC_STD_AC97_FMTS (SNDRV_PCM_FMTBIT_S16_LE |\
+ SNDRV_PCM_FMTBIT_S32_LE |\
+ SNDRV_PCM_FMTBIT_S32_BE)
+
struct snd_soc_dai_ops;
struct snd_soc_dai;
struct snd_ac97_bus_ops;
@@ -208,6 +194,7 @@ struct snd_soc_dai {
/* DAI capabilities */
struct snd_soc_pcm_stream capture;
struct snd_soc_pcm_stream playback;
+ unsigned int symmetric_rates:1;
/* DAI runtime info */
struct snd_pcm_runtime *runtime;
@@ -219,11 +206,8 @@ struct snd_soc_dai {
/* DAI private data */
void *private_data;
- /* parent codec/platform */
- union {
- struct snd_soc_codec *codec;
- struct snd_soc_platform *platform;
- };
+ /* parent platform */
+ struct snd_soc_platform *platform;
struct list_head list;
};
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index a7def6a..ec8a45f 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -140,16 +140,30 @@
#define SND_SOC_DAPM_DAC(wname, stname, wreg, wshift, winvert) \
{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, .reg = wreg, \
.shift = wshift, .invert = winvert}
+#define SND_SOC_DAPM_DAC_E(wname, stname, wreg, wshift, winvert, \
+ wevent, wflags) \
+{ .id = snd_soc_dapm_dac, .name = wname, .sname = stname, .reg = wreg, \
+ .shift = wshift, .invert = winvert, \
+ .event = wevent, .event_flags = wflags}
#define SND_SOC_DAPM_ADC(wname, stname, wreg, wshift, winvert) \
{ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, .reg = wreg, \
.shift = wshift, .invert = winvert}
+#define SND_SOC_DAPM_ADC_E(wname, stname, wreg, wshift, winvert, \
+ wevent, wflags) \
+{ .id = snd_soc_dapm_adc, .name = wname, .sname = stname, .reg = wreg, \
+ .shift = wshift, .invert = winvert, \
+ .event = wevent, .event_flags = wflags}
-/* generic register modifier widget */
+/* generic widgets */
#define SND_SOC_DAPM_REG(wid, wname, wreg, wshift, wmask, won_val, woff_val) \
{ .id = wid, .name = wname, .kcontrols = NULL, .num_kcontrols = 0, \
.reg = -((wreg) + 1), .shift = wshift, .mask = wmask, \
.on_val = won_val, .off_val = woff_val, .event = dapm_reg_event, \
.event_flags = SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD}
+#define SND_SOC_DAPM_SUPPLY(wname, wreg, wshift, winvert, wevent, wflags) \
+{ .id = snd_soc_dapm_supply, .name = wname, .reg = wreg, \
+ .shift = wshift, .invert = winvert, .event = wevent, \
+ .event_flags = wflags}
/* dapm kcontrol types */
#define SOC_DAPM_SINGLE(xname, reg, shift, max, invert) \
@@ -265,8 +279,6 @@ int snd_soc_dapm_add_routes(struct snd_soc_codec *codec,
/* dapm events */
int snd_soc_dapm_stream_event(struct snd_soc_codec *codec, char *stream,
int event);
-int snd_soc_dapm_set_bias_level(struct snd_soc_device *socdev,
- enum snd_soc_bias_level level);
/* dapm sys fs - used by the core */
int snd_soc_dapm_sys_add(struct device *dev);
@@ -298,6 +310,7 @@ enum snd_soc_dapm_type {
snd_soc_dapm_vmid, /* codec bias/vmid - to minimise pops */
snd_soc_dapm_pre, /* machine specific pre widget - exec first */
snd_soc_dapm_post, /* machine specific post widget - exec last */
+ snd_soc_dapm_supply, /* power/clock supply */
};
/*
@@ -357,6 +370,8 @@ struct snd_soc_dapm_widget {
unsigned char suspend:1; /* was active before suspend */
unsigned char pmdown:1; /* waiting for timeout */
+ int (*power_check)(struct snd_soc_dapm_widget *w);
+
/* external events */
unsigned short event_flags; /* flags to specify event types */
int (*event)(struct snd_soc_dapm_widget*, struct snd_kcontrol *, int);
@@ -368,6 +383,9 @@ struct snd_soc_dapm_widget {
/* widget input and outputs */
struct list_head sources;
struct list_head sinks;
+
+ /* used during DAPM updates */
+ struct list_head power_list;
};
#endif
diff --git a/include/sound/soc.h b/include/sound/soc.h
index a40bc6f..cf6111d 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -118,6 +118,14 @@
.info = snd_soc_info_volsw, \
.get = xhandler_get, .put = xhandler_put, \
.private_value = SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert) }
+#define SOC_DOUBLE_EXT(xname, xreg, shift_left, shift_right, xmax, xinvert,\
+ xhandler_get, xhandler_put) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname),\
+ .info = snd_soc_info_volsw, \
+ .get = xhandler_get, .put = xhandler_put, \
+ .private_value = (unsigned long)&(struct soc_mixer_control) \
+ {.reg = xreg, .shift = shift_left, .rshift = shift_right, \
+ .max = xmax, .invert = xinvert} }
#define SOC_SINGLE_EXT_TLV(xname, xreg, xshift, xmax, xinvert,\
xhandler_get, xhandler_put, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
@@ -206,10 +214,6 @@ void snd_soc_jack_free_gpios(struct snd_soc_jack *jack, int count,
struct snd_soc_jack_gpio *gpios);
#endif
-/* codec IO */
-#define snd_soc_read(codec, reg) codec->read(codec, reg)
-#define snd_soc_write(codec, reg, value) codec->write(codec, reg, value)
-
/* codec register bit access */
int snd_soc_update_bits(struct snd_soc_codec *codec, unsigned short reg,
unsigned short mask, unsigned short value);
@@ -331,6 +335,7 @@ struct snd_soc_codec {
struct module *owner;
struct mutex mutex;
struct device *dev;
+ struct snd_soc_device *socdev;
struct list_head list;
@@ -364,6 +369,8 @@ struct snd_soc_codec {
enum snd_soc_bias_level bias_level;
enum snd_soc_bias_level suspend_bias_level;
struct delayed_work delayed_work;
+ struct list_head up_list;
+ struct list_head down_list;
/* codec DAI's */
struct snd_soc_dai *dai;
@@ -417,6 +424,12 @@ struct snd_soc_dai_link {
/* codec/machine specific init - e.g. add machine controls */
int (*init)(struct snd_soc_codec *codec);
+ /* Symmetry requirements */
+ unsigned int symmetric_rates:1;
+
+ /* Symmetry data - only valid if symmetry is being enforced */
+ unsigned int rate;
+
/* DAI pcm */
struct snd_pcm *pcm;
};
@@ -490,6 +503,19 @@ struct soc_enum {
void *dapm;
};
+/* codec IO */
+static inline unsigned int snd_soc_read(struct snd_soc_codec *codec,
+ unsigned int reg)
+{
+ return codec->read(codec, reg);
+}
+
+static inline unsigned int snd_soc_write(struct snd_soc_codec *codec,
+ unsigned int reg, unsigned int val)
+{
+ return codec->write(codec, reg, val);
+}
+
#include <sound/soc-dai.h>
#endif
diff --git a/include/sound/wm9081.h b/include/sound/wm9081.h
new file mode 100644
index 0000000..e173ddb
--- /dev/null
+++ b/include/sound/wm9081.h
@@ -0,0 +1,25 @@
+/*
+ * linux/sound/wm9081.h -- Platform data for WM9081
+ *
+ * Copyright 2009 Wolfson Microelectronics. PLC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_WM_9081_H
+#define __LINUX_SND_WM_9081_H
+
+struct wm9081_retune_mobile_setting {
+ const char *name;
+ unsigned int rate;
+ u16 config[20];
+};
+
+struct wm9081_retune_mobile_config {
+ struct wm9081_retune_mobile_setting *configs;
+ int num_configs;
+};
+
+#endif
diff --git a/include/trace/block.h b/include/trace/block.h
deleted file mode 100644
index 25b7068..0000000
--- a/include/trace/block.h
+++ /dev/null
@@ -1,76 +0,0 @@
-#ifndef _TRACE_BLOCK_H
-#define _TRACE_BLOCK_H
-
-#include <linux/blkdev.h>
-#include <linux/tracepoint.h>
-
-DECLARE_TRACE(block_rq_abort,
- TP_PROTO(struct request_queue *q, struct request *rq),
- TP_ARGS(q, rq));
-
-DECLARE_TRACE(block_rq_insert,
- TP_PROTO(struct request_queue *q, struct request *rq),
- TP_ARGS(q, rq));
-
-DECLARE_TRACE(block_rq_issue,
- TP_PROTO(struct request_queue *q, struct request *rq),
- TP_ARGS(q, rq));
-
-DECLARE_TRACE(block_rq_requeue,
- TP_PROTO(struct request_queue *q, struct request *rq),
- TP_ARGS(q, rq));
-
-DECLARE_TRACE(block_rq_complete,
- TP_PROTO(struct request_queue *q, struct request *rq),
- TP_ARGS(q, rq));
-
-DECLARE_TRACE(block_bio_bounce,
- TP_PROTO(struct request_queue *q, struct bio *bio),
- TP_ARGS(q, bio));
-
-DECLARE_TRACE(block_bio_complete,
- TP_PROTO(struct request_queue *q, struct bio *bio),
- TP_ARGS(q, bio));
-
-DECLARE_TRACE(block_bio_backmerge,
- TP_PROTO(struct request_queue *q, struct bio *bio),
- TP_ARGS(q, bio));
-
-DECLARE_TRACE(block_bio_frontmerge,
- TP_PROTO(struct request_queue *q, struct bio *bio),
- TP_ARGS(q, bio));
-
-DECLARE_TRACE(block_bio_queue,
- TP_PROTO(struct request_queue *q, struct bio *bio),
- TP_ARGS(q, bio));
-
-DECLARE_TRACE(block_getrq,
- TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
- TP_ARGS(q, bio, rw));
-
-DECLARE_TRACE(block_sleeprq,
- TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
- TP_ARGS(q, bio, rw));
-
-DECLARE_TRACE(block_plug,
- TP_PROTO(struct request_queue *q),
- TP_ARGS(q));
-
-DECLARE_TRACE(block_unplug_timer,
- TP_PROTO(struct request_queue *q),
- TP_ARGS(q));
-
-DECLARE_TRACE(block_unplug_io,
- TP_PROTO(struct request_queue *q),
- TP_ARGS(q));
-
-DECLARE_TRACE(block_split,
- TP_PROTO(struct request_queue *q, struct bio *bio, unsigned int pdu),
- TP_ARGS(q, bio, pdu));
-
-DECLARE_TRACE(block_remap,
- TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
- sector_t from, sector_t to),
- TP_ARGS(q, bio, dev, from, to));
-
-#endif
diff --git a/include/trace/define_trace.h b/include/trace/define_trace.h
new file mode 100644
index 0000000..f7a7ae1
--- /dev/null
+++ b/include/trace/define_trace.h
@@ -0,0 +1,75 @@
+/*
+ * Trace files that want to automate creationg of all tracepoints defined
+ * in their file should include this file. The following are macros that the
+ * trace file may define:
+ *
+ * TRACE_SYSTEM defines the system the tracepoint is for
+ *
+ * TRACE_INCLUDE_FILE if the file name is something other than TRACE_SYSTEM.h
+ * This macro may be defined to tell define_trace.h what file to include.
+ * Note, leave off the ".h".
+ *
+ * TRACE_INCLUDE_PATH if the path is something other than core kernel include/trace
+ * then this macro can define the path to use. Note, the path is relative to
+ * define_trace.h, not the file including it. Full path names for out of tree
+ * modules must be used.
+ */
+
+#ifdef CREATE_TRACE_POINTS
+
+/* Prevent recursion */
+#undef CREATE_TRACE_POINTS
+
+#include <linux/stringify.h>
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
+ DEFINE_TRACE(name)
+
+#undef DECLARE_TRACE
+#define DECLARE_TRACE(name, proto, args) \
+ DEFINE_TRACE(name)
+
+#undef TRACE_INCLUDE
+#undef __TRACE_INCLUDE
+
+#ifndef TRACE_INCLUDE_FILE
+# define TRACE_INCLUDE_FILE TRACE_SYSTEM
+# define UNDEF_TRACE_INCLUDE_FILE
+#endif
+
+#ifndef TRACE_INCLUDE_PATH
+# define __TRACE_INCLUDE(system) <trace/events/system.h>
+# define UNDEF_TRACE_INCLUDE_PATH
+#else
+# define __TRACE_INCLUDE(system) __stringify(TRACE_INCLUDE_PATH/system.h)
+#endif
+
+# define TRACE_INCLUDE(system) __TRACE_INCLUDE(system)
+
+/* Let the trace headers be reread */
+#define TRACE_HEADER_MULTI_READ
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#ifdef CONFIG_EVENT_TRACING
+#include <trace/ftrace.h>
+#endif
+
+#undef TRACE_HEADER_MULTI_READ
+
+/* Only undef what we defined in this file */
+#ifdef UNDEF_TRACE_INCLUDE_FILE
+# undef TRACE_INCLUDE_FILE
+# undef UNDEF_TRACE_INCLUDE_FILE
+#endif
+
+#ifdef UNDEF_TRACE_INCLUDE_PATH
+# undef TRACE_INCLUDE_PATH
+# undef UNDEF_TRACE_INCLUDE_PATH
+#endif
+
+/* We may be processing more files */
+#define CREATE_TRACE_POINTS
+
+#endif /* CREATE_TRACE_POINTS */
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
new file mode 100644
index 0000000..d6b05f4
--- /dev/null
+++ b/include/trace/events/block.h
@@ -0,0 +1,493 @@
+#if !defined(_TRACE_BLOCK_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_BLOCK_H
+
+#include <linux/blktrace_api.h>
+#include <linux/blkdev.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM block
+
+TRACE_EVENT(block_rq_abort,
+
+ TP_PROTO(struct request_queue *q, struct request *rq),
+
+ TP_ARGS(q, rq),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __field( int, errors )
+ __array( char, rwbs, 6 )
+ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
+ __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
+ __entry->errors = rq->errors;
+
+ blk_fill_rwbs_rq(__entry->rwbs, rq);
+ blk_dump_cmd(__get_str(cmd), rq);
+ ),
+
+ TP_printk("%d,%d %s (%s) %llu + %u [%d]",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs, __get_str(cmd),
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->errors)
+);
+
+TRACE_EVENT(block_rq_insert,
+
+ TP_PROTO(struct request_queue *q, struct request *rq),
+
+ TP_ARGS(q, rq),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __field( unsigned int, bytes )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
+ __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
+ __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0;
+
+ blk_fill_rwbs_rq(__entry->rwbs, rq);
+ blk_dump_cmd(__get_str(cmd), rq);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs, __entry->bytes, __get_str(cmd),
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
+);
+
+TRACE_EVENT(block_rq_issue,
+
+ TP_PROTO(struct request_queue *q, struct request *rq),
+
+ TP_ARGS(q, rq),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __field( unsigned int, bytes )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
+ __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
+ __entry->bytes = blk_pc_request(rq) ? blk_rq_bytes(rq) : 0;
+
+ blk_fill_rwbs_rq(__entry->rwbs, rq);
+ blk_dump_cmd(__get_str(cmd), rq);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %u (%s) %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs, __entry->bytes, __get_str(cmd),
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
+);
+
+TRACE_EVENT(block_rq_requeue,
+
+ TP_PROTO(struct request_queue *q, struct request *rq),
+
+ TP_ARGS(q, rq),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __field( int, errors )
+ __array( char, rwbs, 6 )
+ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
+ __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
+ __entry->errors = rq->errors;
+
+ blk_fill_rwbs_rq(__entry->rwbs, rq);
+ blk_dump_cmd(__get_str(cmd), rq);
+ ),
+
+ TP_printk("%d,%d %s (%s) %llu + %u [%d]",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs, __get_str(cmd),
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->errors)
+);
+
+TRACE_EVENT(block_rq_complete,
+
+ TP_PROTO(struct request_queue *q, struct request *rq),
+
+ TP_ARGS(q, rq),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __field( int, errors )
+ __array( char, rwbs, 6 )
+ __dynamic_array( char, cmd, blk_cmd_buf_len(rq) )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
+ __entry->sector = blk_pc_request(rq) ? 0 : blk_rq_pos(rq);
+ __entry->nr_sector = blk_pc_request(rq) ? 0 : blk_rq_sectors(rq);
+ __entry->errors = rq->errors;
+
+ blk_fill_rwbs_rq(__entry->rwbs, rq);
+ blk_dump_cmd(__get_str(cmd), rq);
+ ),
+
+ TP_printk("%d,%d %s (%s) %llu + %u [%d]",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->rwbs, __get_str(cmd),
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->errors)
+);
+TRACE_EVENT(block_bio_bounce,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio),
+
+ TP_ARGS(q, bio),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
+);
+
+TRACE_EVENT(block_bio_complete,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio),
+
+ TP_ARGS(q, bio),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned, nr_sector )
+ __field( int, error )
+ __array( char, rwbs, 6 )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u [%d]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->error)
+);
+
+TRACE_EVENT(block_bio_backmerge,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio),
+
+ TP_ARGS(q, bio),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
+);
+
+TRACE_EVENT(block_bio_frontmerge,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio),
+
+ TP_ARGS(q, bio),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned, nr_sector )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
+);
+
+TRACE_EVENT(block_bio_queue,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio),
+
+ TP_ARGS(q, bio),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
+);
+
+TRACE_EVENT(block_getrq,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
+
+ TP_ARGS(q, bio, rw),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
+ __entry->sector = bio ? bio->bi_sector : 0;
+ __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
+ blk_fill_rwbs(__entry->rwbs,
+ bio ? bio->bi_rw : 0, __entry->nr_sector);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
+);
+
+TRACE_EVENT(block_sleeprq,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio, int rw),
+
+ TP_ARGS(q, bio, rw),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
+ __entry->sector = bio ? bio->bi_sector : 0;
+ __entry->nr_sector = bio ? bio->bi_size >> 9 : 0;
+ blk_fill_rwbs(__entry->rwbs,
+ bio ? bio->bi_rw : 0, __entry->nr_sector);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector, __entry->comm)
+);
+
+TRACE_EVENT(block_plug,
+
+ TP_PROTO(struct request_queue *q),
+
+ TP_ARGS(q),
+
+ TP_STRUCT__entry(
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("[%s]", __entry->comm)
+);
+
+TRACE_EVENT(block_unplug_timer,
+
+ TP_PROTO(struct request_queue *q),
+
+ TP_ARGS(q),
+
+ TP_STRUCT__entry(
+ __field( int, nr_rq )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE];
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
+);
+
+TRACE_EVENT(block_unplug_io,
+
+ TP_PROTO(struct request_queue *q),
+
+ TP_ARGS(q),
+
+ TP_STRUCT__entry(
+ __field( int, nr_rq )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->nr_rq = q->rq.count[READ] + q->rq.count[WRITE];
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("[%s] %d", __entry->comm, __entry->nr_rq)
+);
+
+TRACE_EVENT(block_split,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio,
+ unsigned int new_sector),
+
+ TP_ARGS(q, bio, new_sector),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( sector_t, new_sector )
+ __array( char, rwbs, 6 )
+ __array( char, comm, TASK_COMM_LEN )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->new_sector = new_sector;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
+ ),
+
+ TP_printk("%d,%d %s %llu / %llu [%s]",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ (unsigned long long)__entry->new_sector,
+ __entry->comm)
+);
+
+TRACE_EVENT(block_remap,
+
+ TP_PROTO(struct request_queue *q, struct bio *bio, dev_t dev,
+ sector_t from),
+
+ TP_ARGS(q, bio, dev, from),
+
+ TP_STRUCT__entry(
+ __field( dev_t, dev )
+ __field( sector_t, sector )
+ __field( unsigned int, nr_sector )
+ __field( dev_t, old_dev )
+ __field( sector_t, old_sector )
+ __array( char, rwbs, 6 )
+ ),
+
+ TP_fast_assign(
+ __entry->dev = bio->bi_bdev->bd_dev;
+ __entry->sector = bio->bi_sector;
+ __entry->nr_sector = bio->bi_size >> 9;
+ __entry->old_dev = dev;
+ __entry->old_sector = from;
+ blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
+ ),
+
+ TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
+ MAJOR(__entry->dev), MINOR(__entry->dev), __entry->rwbs,
+ (unsigned long long)__entry->sector,
+ __entry->nr_sector,
+ MAJOR(__entry->old_dev), MINOR(__entry->old_dev),
+ (unsigned long long)__entry->old_sector)
+);
+
+#endif /* _TRACE_BLOCK_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+
diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
new file mode 100644
index 0000000..b0c7ede
--- /dev/null
+++ b/include/trace/events/irq.h
@@ -0,0 +1,145 @@
+#if !defined(_TRACE_IRQ_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IRQ_H
+
+#include <linux/tracepoint.h>
+#include <linux/interrupt.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM irq
+
+#define softirq_name(sirq) { sirq##_SOFTIRQ, #sirq }
+#define show_softirq_name(val) \
+ __print_symbolic(val, \
+ softirq_name(HI), \
+ softirq_name(TIMER), \
+ softirq_name(NET_TX), \
+ softirq_name(NET_RX), \
+ softirq_name(BLOCK), \
+ softirq_name(TASKLET), \
+ softirq_name(SCHED), \
+ softirq_name(HRTIMER), \
+ softirq_name(RCU))
+
+/**
+ * irq_handler_entry - called immediately before the irq action handler
+ * @irq: irq number
+ * @action: pointer to struct irqaction
+ *
+ * The struct irqaction pointed to by @action contains various
+ * information about the handler, including the device name,
+ * @action->name, and the device id, @action->dev_id. When used in
+ * conjunction with the irq_handler_exit tracepoint, we can figure
+ * out irq handler latencies.
+ */
+TRACE_EVENT(irq_handler_entry,
+
+ TP_PROTO(int irq, struct irqaction *action),
+
+ TP_ARGS(irq, action),
+
+ TP_STRUCT__entry(
+ __field( int, irq )
+ __string( name, action->name )
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ __assign_str(name, action->name);
+ ),
+
+ TP_printk("irq=%d handler=%s", __entry->irq, __get_str(name))
+);
+
+/**
+ * irq_handler_exit - called immediately after the irq action handler returns
+ * @irq: irq number
+ * @action: pointer to struct irqaction
+ * @ret: return value
+ *
+ * If the @ret value is set to IRQ_HANDLED, then we know that the corresponding
+ * @action->handler scuccessully handled this irq. Otherwise, the irq might be
+ * a shared irq line, or the irq was not handled successfully. Can be used in
+ * conjunction with the irq_handler_entry to understand irq handler latencies.
+ */
+TRACE_EVENT(irq_handler_exit,
+
+ TP_PROTO(int irq, struct irqaction *action, int ret),
+
+ TP_ARGS(irq, action, ret),
+
+ TP_STRUCT__entry(
+ __field( int, irq )
+ __field( int, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->irq = irq;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("irq=%d return=%s",
+ __entry->irq, __entry->ret ? "handled" : "unhandled")
+);
+
+/**
+ * softirq_entry - called immediately before the softirq handler
+ * @h: pointer to struct softirq_action
+ * @vec: pointer to first struct softirq_action in softirq_vec array
+ *
+ * The @h parameter, contains a pointer to the struct softirq_action
+ * which has a pointer to the action handler that is called. By subtracting
+ * the @vec pointer from the @h pointer, we can determine the softirq
+ * number. Also, when used in combination with the softirq_exit tracepoint
+ * we can determine the softirq latency.
+ */
+TRACE_EVENT(softirq_entry,
+
+ TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+
+ TP_ARGS(h, vec),
+
+ TP_STRUCT__entry(
+ __field( int, vec )
+ ),
+
+ TP_fast_assign(
+ __entry->vec = (int)(h - vec);
+ ),
+
+ TP_printk("softirq=%d action=%s", __entry->vec,
+ show_softirq_name(__entry->vec))
+);
+
+/**
+ * softirq_exit - called immediately after the softirq handler returns
+ * @h: pointer to struct softirq_action
+ * @vec: pointer to first struct softirq_action in softirq_vec array
+ *
+ * The @h parameter contains a pointer to the struct softirq_action
+ * that has handled the softirq. By subtracting the @vec pointer from
+ * the @h pointer, we can determine the softirq number. Also, when used in
+ * combination with the softirq_entry tracepoint we can determine the softirq
+ * latency.
+ */
+TRACE_EVENT(softirq_exit,
+
+ TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
+
+ TP_ARGS(h, vec),
+
+ TP_STRUCT__entry(
+ __field( int, vec )
+ ),
+
+ TP_fast_assign(
+ __entry->vec = (int)(h - vec);
+ ),
+
+ TP_printk("softirq=%d action=%s", __entry->vec,
+ show_softirq_name(__entry->vec))
+);
+
+#endif /* _TRACE_IRQ_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
new file mode 100644
index 0000000..9baba50
--- /dev/null
+++ b/include/trace/events/kmem.h
@@ -0,0 +1,231 @@
+#if !defined(_TRACE_KMEM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_KMEM_H
+
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM kmem
+
+/*
+ * The order of these masks is important. Matching masks will be seen
+ * first and the left over flags will end up showing by themselves.
+ *
+ * For example, if we have GFP_KERNEL before GFP_USER we wil get:
+ *
+ * GFP_KERNEL|GFP_HARDWALL
+ *
+ * Thus most bits set go first.
+ */
+#define show_gfp_flags(flags) \
+ (flags) ? __print_flags(flags, "|", \
+ {(unsigned long)GFP_HIGHUSER_MOVABLE, "GFP_HIGHUSER_MOVABLE"}, \
+ {(unsigned long)GFP_HIGHUSER, "GFP_HIGHUSER"}, \
+ {(unsigned long)GFP_USER, "GFP_USER"}, \
+ {(unsigned long)GFP_TEMPORARY, "GFP_TEMPORARY"}, \
+ {(unsigned long)GFP_KERNEL, "GFP_KERNEL"}, \
+ {(unsigned long)GFP_NOFS, "GFP_NOFS"}, \
+ {(unsigned long)GFP_ATOMIC, "GFP_ATOMIC"}, \
+ {(unsigned long)GFP_NOIO, "GFP_NOIO"}, \
+ {(unsigned long)__GFP_HIGH, "GFP_HIGH"}, \
+ {(unsigned long)__GFP_WAIT, "GFP_WAIT"}, \
+ {(unsigned long)__GFP_IO, "GFP_IO"}, \
+ {(unsigned long)__GFP_COLD, "GFP_COLD"}, \
+ {(unsigned long)__GFP_NOWARN, "GFP_NOWARN"}, \
+ {(unsigned long)__GFP_REPEAT, "GFP_REPEAT"}, \
+ {(unsigned long)__GFP_NOFAIL, "GFP_NOFAIL"}, \
+ {(unsigned long)__GFP_NORETRY, "GFP_NORETRY"}, \
+ {(unsigned long)__GFP_COMP, "GFP_COMP"}, \
+ {(unsigned long)__GFP_ZERO, "GFP_ZERO"}, \
+ {(unsigned long)__GFP_NOMEMALLOC, "GFP_NOMEMALLOC"}, \
+ {(unsigned long)__GFP_HARDWALL, "GFP_HARDWALL"}, \
+ {(unsigned long)__GFP_THISNODE, "GFP_THISNODE"}, \
+ {(unsigned long)__GFP_RECLAIMABLE, "GFP_RECLAIMABLE"}, \
+ {(unsigned long)__GFP_MOVABLE, "GFP_MOVABLE"} \
+ ) : "GFP_NOWAIT"
+
+TRACE_EVENT(kmalloc,
+
+ TP_PROTO(unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags),
+
+ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, call_site )
+ __field( const void *, ptr )
+ __field( size_t, bytes_req )
+ __field( size_t, bytes_alloc )
+ __field( gfp_t, gfp_flags )
+ ),
+
+ TP_fast_assign(
+ __entry->call_site = call_site;
+ __entry->ptr = ptr;
+ __entry->bytes_req = bytes_req;
+ __entry->bytes_alloc = bytes_alloc;
+ __entry->gfp_flags = gfp_flags;
+ ),
+
+ TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
+ __entry->call_site,
+ __entry->ptr,
+ __entry->bytes_req,
+ __entry->bytes_alloc,
+ show_gfp_flags(__entry->gfp_flags))
+);
+
+TRACE_EVENT(kmem_cache_alloc,
+
+ TP_PROTO(unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags),
+
+ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, call_site )
+ __field( const void *, ptr )
+ __field( size_t, bytes_req )
+ __field( size_t, bytes_alloc )
+ __field( gfp_t, gfp_flags )
+ ),
+
+ TP_fast_assign(
+ __entry->call_site = call_site;
+ __entry->ptr = ptr;
+ __entry->bytes_req = bytes_req;
+ __entry->bytes_alloc = bytes_alloc;
+ __entry->gfp_flags = gfp_flags;
+ ),
+
+ TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s",
+ __entry->call_site,
+ __entry->ptr,
+ __entry->bytes_req,
+ __entry->bytes_alloc,
+ show_gfp_flags(__entry->gfp_flags))
+);
+
+TRACE_EVENT(kmalloc_node,
+
+ TP_PROTO(unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags,
+ int node),
+
+ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, call_site )
+ __field( const void *, ptr )
+ __field( size_t, bytes_req )
+ __field( size_t, bytes_alloc )
+ __field( gfp_t, gfp_flags )
+ __field( int, node )
+ ),
+
+ TP_fast_assign(
+ __entry->call_site = call_site;
+ __entry->ptr = ptr;
+ __entry->bytes_req = bytes_req;
+ __entry->bytes_alloc = bytes_alloc;
+ __entry->gfp_flags = gfp_flags;
+ __entry->node = node;
+ ),
+
+ TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
+ __entry->call_site,
+ __entry->ptr,
+ __entry->bytes_req,
+ __entry->bytes_alloc,
+ show_gfp_flags(__entry->gfp_flags),
+ __entry->node)
+);
+
+TRACE_EVENT(kmem_cache_alloc_node,
+
+ TP_PROTO(unsigned long call_site,
+ const void *ptr,
+ size_t bytes_req,
+ size_t bytes_alloc,
+ gfp_t gfp_flags,
+ int node),
+
+ TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, call_site )
+ __field( const void *, ptr )
+ __field( size_t, bytes_req )
+ __field( size_t, bytes_alloc )
+ __field( gfp_t, gfp_flags )
+ __field( int, node )
+ ),
+
+ TP_fast_assign(
+ __entry->call_site = call_site;
+ __entry->ptr = ptr;
+ __entry->bytes_req = bytes_req;
+ __entry->bytes_alloc = bytes_alloc;
+ __entry->gfp_flags = gfp_flags;
+ __entry->node = node;
+ ),
+
+ TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
+ __entry->call_site,
+ __entry->ptr,
+ __entry->bytes_req,
+ __entry->bytes_alloc,
+ show_gfp_flags(__entry->gfp_flags),
+ __entry->node)
+);
+
+TRACE_EVENT(kfree,
+
+ TP_PROTO(unsigned long call_site, const void *ptr),
+
+ TP_ARGS(call_site, ptr),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, call_site )
+ __field( const void *, ptr )
+ ),
+
+ TP_fast_assign(
+ __entry->call_site = call_site;
+ __entry->ptr = ptr;
+ ),
+
+ TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
+);
+
+TRACE_EVENT(kmem_cache_free,
+
+ TP_PROTO(unsigned long call_site, const void *ptr),
+
+ TP_ARGS(call_site, ptr),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, call_site )
+ __field( const void *, ptr )
+ ),
+
+ TP_fast_assign(
+ __entry->call_site = call_site;
+ __entry->ptr = ptr;
+ ),
+
+ TP_printk("call_site=%lx ptr=%p", __entry->call_site, __entry->ptr)
+);
+#endif /* _TRACE_KMEM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/lockdep.h b/include/trace/events/lockdep.h
new file mode 100644
index 0000000..0e956c9
--- /dev/null
+++ b/include/trace/events/lockdep.h
@@ -0,0 +1,96 @@
+#if !defined(_TRACE_LOCKDEP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_LOCKDEP_H
+
+#include <linux/lockdep.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM lockdep
+
+#ifdef CONFIG_LOCKDEP
+
+TRACE_EVENT(lock_acquire,
+
+ TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
+ int trylock, int read, int check,
+ struct lockdep_map *next_lock, unsigned long ip),
+
+ TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, flags)
+ __string(name, lock->name)
+ ),
+
+ TP_fast_assign(
+ __entry->flags = (trylock ? 1 : 0) | (read ? 2 : 0);
+ __assign_str(name, lock->name);
+ ),
+
+ TP_printk("%s%s%s", (__entry->flags & 1) ? "try " : "",
+ (__entry->flags & 2) ? "read " : "",
+ __get_str(name))
+);
+
+TRACE_EVENT(lock_release,
+
+ TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip),
+
+ TP_ARGS(lock, nested, ip),
+
+ TP_STRUCT__entry(
+ __string(name, lock->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, lock->name);
+ ),
+
+ TP_printk("%s", __get_str(name))
+);
+
+#ifdef CONFIG_LOCK_STAT
+
+TRACE_EVENT(lock_contended,
+
+ TP_PROTO(struct lockdep_map *lock, unsigned long ip),
+
+ TP_ARGS(lock, ip),
+
+ TP_STRUCT__entry(
+ __string(name, lock->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, lock->name);
+ ),
+
+ TP_printk("%s", __get_str(name))
+);
+
+TRACE_EVENT(lock_acquired,
+ TP_PROTO(struct lockdep_map *lock, unsigned long ip, s64 waittime),
+
+ TP_ARGS(lock, ip, waittime),
+
+ TP_STRUCT__entry(
+ __string(name, lock->name)
+ __field(unsigned long, wait_usec)
+ __field(unsigned long, wait_nsec_rem)
+ ),
+ TP_fast_assign(
+ __assign_str(name, lock->name);
+ __entry->wait_nsec_rem = do_div(waittime, NSEC_PER_USEC);
+ __entry->wait_usec = (unsigned long) waittime;
+ ),
+ TP_printk("%s (%lu.%03lu us)", __get_str(name), __entry->wait_usec,
+ __entry->wait_nsec_rem)
+);
+
+#endif
+#endif
+
+#endif /* _TRACE_LOCKDEP_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/napi.h b/include/trace/events/napi.h
index a8989c4..a8989c4 100644
--- a/include/trace/napi.h
+++ b/include/trace/events/napi.h
diff --git a/include/trace/sched_event_types.h b/include/trace/events/sched.h
index 63547dc..24ab5bc 100644
--- a/include/trace/sched_event_types.h
+++ b/include/trace/events/sched.h
@@ -1,9 +1,8 @@
+#if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCHED_H
-/* use <trace/sched.h> instead */
-#ifndef TRACE_EVENT
-# error Do not include this file directly.
-# error Unless you know what you are doing.
-#endif
+#include <linux/sched.h>
+#include <linux/tracepoint.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM sched
@@ -157,6 +156,7 @@ TRACE_EVENT(sched_switch,
__array( char, prev_comm, TASK_COMM_LEN )
__field( pid_t, prev_pid )
__field( int, prev_prio )
+ __field( long, prev_state )
__array( char, next_comm, TASK_COMM_LEN )
__field( pid_t, next_pid )
__field( int, next_prio )
@@ -166,13 +166,19 @@ TRACE_EVENT(sched_switch,
memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
__entry->prev_pid = prev->pid;
__entry->prev_prio = prev->prio;
+ __entry->prev_state = prev->state;
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
__entry->next_prio = next->prio;
),
- TP_printk("task %s:%d [%d] ==> %s:%d [%d]",
+ TP_printk("task %s:%d [%d] (%s) ==> %s:%d [%d]",
__entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
+ __entry->prev_state ?
+ __print_flags(__entry->prev_state, "|",
+ { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
+ { 16, "Z" }, { 32, "X" }, { 64, "x" },
+ { 128, "W" }) : "R",
__entry->next_comm, __entry->next_pid, __entry->next_prio)
);
@@ -181,9 +187,9 @@ TRACE_EVENT(sched_switch,
*/
TRACE_EVENT(sched_migrate_task,
- TP_PROTO(struct task_struct *p, int orig_cpu, int dest_cpu),
+ TP_PROTO(struct task_struct *p, int dest_cpu),
- TP_ARGS(p, orig_cpu, dest_cpu),
+ TP_ARGS(p, dest_cpu),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -197,7 +203,7 @@ TRACE_EVENT(sched_migrate_task,
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
__entry->prio = p->prio;
- __entry->orig_cpu = orig_cpu;
+ __entry->orig_cpu = task_cpu(p);
__entry->dest_cpu = dest_cpu;
),
@@ -334,4 +340,7 @@ TRACE_EVENT(sched_signal_send,
__entry->sig, __entry->comm, __entry->pid)
);
-#undef TRACE_SYSTEM
+#endif /* _TRACE_SCHED_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/skb.h b/include/trace/events/skb.h
new file mode 100644
index 0000000..1e8fabb
--- /dev/null
+++ b/include/trace/events/skb.h
@@ -0,0 +1,40 @@
+#if !defined(_TRACE_SKB_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SKB_H
+
+#include <linux/skbuff.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM skb
+
+/*
+ * Tracepoint for free an sk_buff:
+ */
+TRACE_EVENT(kfree_skb,
+
+ TP_PROTO(struct sk_buff *skb, void *location),
+
+ TP_ARGS(skb, location),
+
+ TP_STRUCT__entry(
+ __field( void *, skbaddr )
+ __field( unsigned short, protocol )
+ __field( void *, location )
+ ),
+
+ TP_fast_assign(
+ __entry->skbaddr = skb;
+ if (skb) {
+ __entry->protocol = ntohs(skb->protocol);
+ }
+ __entry->location = location;
+ ),
+
+ TP_printk("skbaddr=%p protocol=%u location=%p",
+ __entry->skbaddr, __entry->protocol, __entry->location)
+);
+
+#endif /* _TRACE_SKB_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
new file mode 100644
index 0000000..035f1bf
--- /dev/null
+++ b/include/trace/events/workqueue.h
@@ -0,0 +1,100 @@
+#if !defined(_TRACE_WORKQUEUE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_WORKQUEUE_H
+
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM workqueue
+
+TRACE_EVENT(workqueue_insertion,
+
+ TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
+
+ TP_ARGS(wq_thread, work),
+
+ TP_STRUCT__entry(
+ __array(char, thread_comm, TASK_COMM_LEN)
+ __field(pid_t, thread_pid)
+ __field(work_func_t, func)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
+ __entry->thread_pid = wq_thread->pid;
+ __entry->func = work->func;
+ ),
+
+ TP_printk("thread=%s:%d func=%pF", __entry->thread_comm,
+ __entry->thread_pid, __entry->func)
+);
+
+TRACE_EVENT(workqueue_execution,
+
+ TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
+
+ TP_ARGS(wq_thread, work),
+
+ TP_STRUCT__entry(
+ __array(char, thread_comm, TASK_COMM_LEN)
+ __field(pid_t, thread_pid)
+ __field(work_func_t, func)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
+ __entry->thread_pid = wq_thread->pid;
+ __entry->func = work->func;
+ ),
+
+ TP_printk("thread=%s:%d func=%pF", __entry->thread_comm,
+ __entry->thread_pid, __entry->func)
+);
+
+/* Trace the creation of one workqueue thread on a cpu */
+TRACE_EVENT(workqueue_creation,
+
+ TP_PROTO(struct task_struct *wq_thread, int cpu),
+
+ TP_ARGS(wq_thread, cpu),
+
+ TP_STRUCT__entry(
+ __array(char, thread_comm, TASK_COMM_LEN)
+ __field(pid_t, thread_pid)
+ __field(int, cpu)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
+ __entry->thread_pid = wq_thread->pid;
+ __entry->cpu = cpu;
+ ),
+
+ TP_printk("thread=%s:%d cpu=%d", __entry->thread_comm,
+ __entry->thread_pid, __entry->cpu)
+);
+
+TRACE_EVENT(workqueue_destruction,
+
+ TP_PROTO(struct task_struct *wq_thread),
+
+ TP_ARGS(wq_thread),
+
+ TP_STRUCT__entry(
+ __array(char, thread_comm, TASK_COMM_LEN)
+ __field(pid_t, thread_pid)
+ ),
+
+ TP_fast_assign(
+ memcpy(__entry->thread_comm, wq_thread->comm, TASK_COMM_LEN);
+ __entry->thread_pid = wq_thread->pid;
+ ),
+
+ TP_printk("thread=%s:%d", __entry->thread_comm, __entry->thread_pid)
+);
+
+#endif /* _TRACE_WORKQUEUE_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
new file mode 100644
index 0000000..1867553
--- /dev/null
+++ b/include/trace/ftrace.h
@@ -0,0 +1,591 @@
+/*
+ * Stage 1 of the trace events.
+ *
+ * Override the macros in <trace/trace_events.h> to include the following:
+ *
+ * struct ftrace_raw_<call> {
+ * struct trace_entry ent;
+ * <type> <item>;
+ * <type2> <item2>[<len>];
+ * [...]
+ * };
+ *
+ * The <type> <item> is created by the __field(type, item) macro or
+ * the __array(type2, item2, len) macro.
+ * We simply do "type item;", and that will create the fields
+ * in the structure.
+ */
+
+#include <linux/ftrace_event.h>
+
+#undef __field
+#define __field(type, item) type item;
+
+#undef __array
+#define __array(type, item, len) type item[len];
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) unsigned short __data_loc_##item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef TP_STRUCT__entry
+#define TP_STRUCT__entry(args...) args
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
+ struct ftrace_raw_##name { \
+ struct trace_entry ent; \
+ tstruct \
+ char __data[0]; \
+ }; \
+ static struct ftrace_event_call event_##name
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+
+/*
+ * Stage 2 of the trace events.
+ *
+ * Include the following:
+ *
+ * struct ftrace_data_offsets_<call> {
+ * int <item1>;
+ * int <item2>;
+ * [...]
+ * };
+ *
+ * The __dynamic_array() macro will create each int <item>, this is
+ * to keep the offset of each array from the beginning of the event.
+ */
+
+#undef __field
+#define __field(type, item);
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) int item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
+ struct ftrace_data_offsets_##call { \
+ tstruct; \
+ };
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/*
+ * Setup the showing format of trace point.
+ *
+ * int
+ * ftrace_format_##call(struct trace_seq *s)
+ * {
+ * struct ftrace_raw_##call field;
+ * int ret;
+ *
+ * ret = trace_seq_printf(s, #type " " #item ";"
+ * " offset:%u; size:%u;\n",
+ * offsetof(struct ftrace_raw_##call, item),
+ * sizeof(field.type));
+ *
+ * }
+ */
+
+#undef TP_STRUCT__entry
+#define TP_STRUCT__entry(args...) args
+
+#undef __field
+#define __field(type, item) \
+ ret = trace_seq_printf(s, "\tfield:" #type " " #item ";\t" \
+ "offset:%u;\tsize:%u;\n", \
+ (unsigned int)offsetof(typeof(field), item), \
+ (unsigned int)sizeof(field.item)); \
+ if (!ret) \
+ return 0;
+
+#undef __array
+#define __array(type, item, len) \
+ ret = trace_seq_printf(s, "\tfield:" #type " " #item "[" #len "];\t" \
+ "offset:%u;\tsize:%u;\n", \
+ (unsigned int)offsetof(typeof(field), item), \
+ (unsigned int)sizeof(field.item)); \
+ if (!ret) \
+ return 0;
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ ret = trace_seq_printf(s, "\tfield:__data_loc " #item ";\t" \
+ "offset:%u;\tsize:%u;\n", \
+ (unsigned int)offsetof(typeof(field), \
+ __data_loc_##item), \
+ (unsigned int)sizeof(field.__data_loc_##item)); \
+ if (!ret) \
+ return 0;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef __entry
+#define __entry REC
+
+#undef __print_symbolic
+#undef __get_dynamic_array
+#undef __get_str
+
+#undef TP_printk
+#define TP_printk(fmt, args...) "%s, %s\n", #fmt, __stringify(args)
+
+#undef TP_fast_assign
+#define TP_fast_assign(args...) args
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
+static int \
+ftrace_format_##call(struct trace_seq *s) \
+{ \
+ struct ftrace_raw_##call field __attribute__((unused)); \
+ int ret = 0; \
+ \
+ tstruct; \
+ \
+ trace_seq_printf(s, "\nprint fmt: " print); \
+ \
+ return ret; \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/*
+ * Stage 3 of the trace events.
+ *
+ * Override the macros in <trace/trace_events.h> to include the following:
+ *
+ * enum print_line_t
+ * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
+ * {
+ * struct trace_seq *s = &iter->seq;
+ * struct ftrace_raw_<call> *field; <-- defined in stage 1
+ * struct trace_entry *entry;
+ * struct trace_seq *p;
+ * int ret;
+ *
+ * entry = iter->ent;
+ *
+ * if (entry->type != event_<call>.id) {
+ * WARN_ON_ONCE(1);
+ * return TRACE_TYPE_UNHANDLED;
+ * }
+ *
+ * field = (typeof(field))entry;
+ *
+ * p = get_cpu_var(ftrace_event_seq);
+ * trace_seq_init(p);
+ * ret = trace_seq_printf(s, <TP_printk> "\n");
+ * put_cpu();
+ * if (!ret)
+ * return TRACE_TYPE_PARTIAL_LINE;
+ *
+ * return TRACE_TYPE_HANDLED;
+ * }
+ *
+ * This is the method used to print the raw event to the trace
+ * output format. Note, this is not needed if the data is read
+ * in binary.
+ */
+
+#undef __entry
+#define __entry field
+
+#undef TP_printk
+#define TP_printk(fmt, args...) fmt "\n", args
+
+#undef __get_dynamic_array
+#define __get_dynamic_array(field) \
+ ((void *)__entry + __entry->__data_loc_##field)
+
+#undef __get_str
+#define __get_str(field) (char *)__get_dynamic_array(field)
+
+#undef __print_flags
+#define __print_flags(flag, delim, flag_array...) \
+ ({ \
+ static const struct trace_print_flags flags[] = \
+ { flag_array, { -1, NULL }}; \
+ ftrace_print_flags_seq(p, delim, flag, flags); \
+ })
+
+#undef __print_symbolic
+#define __print_symbolic(value, symbol_array...) \
+ ({ \
+ static const struct trace_print_flags symbols[] = \
+ { symbol_array, { -1, NULL }}; \
+ ftrace_print_symbols_seq(p, value, symbols); \
+ })
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
+enum print_line_t \
+ftrace_raw_output_##call(struct trace_iterator *iter, int flags) \
+{ \
+ struct trace_seq *s = &iter->seq; \
+ struct ftrace_raw_##call *field; \
+ struct trace_entry *entry; \
+ struct trace_seq *p; \
+ int ret; \
+ \
+ entry = iter->ent; \
+ \
+ if (entry->type != event_##call.id) { \
+ WARN_ON_ONCE(1); \
+ return TRACE_TYPE_UNHANDLED; \
+ } \
+ \
+ field = (typeof(field))entry; \
+ \
+ p = &get_cpu_var(ftrace_event_seq); \
+ trace_seq_init(p); \
+ ret = trace_seq_printf(s, #call ": " print); \
+ put_cpu(); \
+ if (!ret) \
+ return TRACE_TYPE_PARTIAL_LINE; \
+ \
+ return TRACE_TYPE_HANDLED; \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef __field
+#define __field(type, item) \
+ ret = trace_define_field(event_call, #type, #item, \
+ offsetof(typeof(field), item), \
+ sizeof(field.item), is_signed_type(type)); \
+ if (ret) \
+ return ret;
+
+#undef __array
+#define __array(type, item, len) \
+ BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
+ ret = trace_define_field(event_call, #type "[" #len "]", #item, \
+ offsetof(typeof(field), item), \
+ sizeof(field.item), 0); \
+ if (ret) \
+ return ret;
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ ret = trace_define_field(event_call, "__data_loc" "[" #type "]", #item,\
+ offsetof(typeof(field), __data_loc_##item), \
+ sizeof(field.__data_loc_##item), 0);
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1)
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, func, print) \
+int \
+ftrace_define_fields_##call(void) \
+{ \
+ struct ftrace_raw_##call field; \
+ struct ftrace_event_call *event_call = &event_##call; \
+ int ret; \
+ \
+ __common_field(int, type, 1); \
+ __common_field(unsigned char, flags, 0); \
+ __common_field(unsigned char, preempt_count, 0); \
+ __common_field(int, pid, 1); \
+ __common_field(int, tgid, 1); \
+ \
+ tstruct; \
+ \
+ return ret; \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/*
+ * remember the offset of each array from the beginning of the event.
+ */
+
+#undef __entry
+#define __entry entry
+
+#undef __field
+#define __field(type, item)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ __data_offsets->item = __data_size + \
+ offsetof(typeof(*entry), __data); \
+ __data_size += (len) * sizeof(type);
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, strlen(src) + 1) \
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
+static inline int ftrace_get_offsets_##call( \
+ struct ftrace_data_offsets_##call *__data_offsets, proto) \
+{ \
+ int __data_size = 0; \
+ struct ftrace_raw_##call __maybe_unused *entry; \
+ \
+ tstruct; \
+ \
+ return __data_size; \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+/*
+ * Stage 4 of the trace events.
+ *
+ * Override the macros in <trace/trace_events.h> to include the following:
+ *
+ * static void ftrace_event_<call>(proto)
+ * {
+ * event_trace_printk(_RET_IP_, "<call>: " <fmt>);
+ * }
+ *
+ * static int ftrace_reg_event_<call>(void)
+ * {
+ * int ret;
+ *
+ * ret = register_trace_<call>(ftrace_event_<call>);
+ * if (!ret)
+ * pr_info("event trace: Could not activate trace point "
+ * "probe to <call>");
+ * return ret;
+ * }
+ *
+ * static void ftrace_unreg_event_<call>(void)
+ * {
+ * unregister_trace_<call>(ftrace_event_<call>);
+ * }
+ *
+ *
+ * For those macros defined with TRACE_EVENT:
+ *
+ * static struct ftrace_event_call event_<call>;
+ *
+ * static void ftrace_raw_event_<call>(proto)
+ * {
+ * struct ring_buffer_event *event;
+ * struct ftrace_raw_<call> *entry; <-- defined in stage 1
+ * unsigned long irq_flags;
+ * int pc;
+ *
+ * local_save_flags(irq_flags);
+ * pc = preempt_count();
+ *
+ * event = trace_current_buffer_lock_reserve(event_<call>.id,
+ * sizeof(struct ftrace_raw_<call>),
+ * irq_flags, pc);
+ * if (!event)
+ * return;
+ * entry = ring_buffer_event_data(event);
+ *
+ * <assign>; <-- Here we assign the entries by the __field and
+ * __array macros.
+ *
+ * trace_current_buffer_unlock_commit(event, irq_flags, pc);
+ * }
+ *
+ * static int ftrace_raw_reg_event_<call>(void)
+ * {
+ * int ret;
+ *
+ * ret = register_trace_<call>(ftrace_raw_event_<call>);
+ * if (!ret)
+ * pr_info("event trace: Could not activate trace point "
+ * "probe to <call>");
+ * return ret;
+ * }
+ *
+ * static void ftrace_unreg_event_<call>(void)
+ * {
+ * unregister_trace_<call>(ftrace_raw_event_<call>);
+ * }
+ *
+ * static struct trace_event ftrace_event_type_<call> = {
+ * .trace = ftrace_raw_output_<call>, <-- stage 2
+ * };
+ *
+ * static int ftrace_raw_init_event_<call>(void)
+ * {
+ * int id;
+ *
+ * id = register_ftrace_event(&ftrace_event_type_<call>);
+ * if (!id)
+ * return -ENODEV;
+ * event_<call>.id = id;
+ * return 0;
+ * }
+ *
+ * static struct ftrace_event_call __used
+ * __attribute__((__aligned__(4)))
+ * __attribute__((section("_ftrace_events"))) event_<call> = {
+ * .name = "<call>",
+ * .system = "<system>",
+ * .raw_init = ftrace_raw_init_event_<call>,
+ * .regfunc = ftrace_reg_event_<call>,
+ * .unregfunc = ftrace_unreg_event_<call>,
+ * .show_format = ftrace_format_<call>,
+ * }
+ *
+ */
+
+#undef TP_FMT
+#define TP_FMT(fmt, args...) fmt "\n", ##args
+
+#ifdef CONFIG_EVENT_PROFILE
+#define _TRACE_PROFILE(call, proto, args) \
+static void ftrace_profile_##call(proto) \
+{ \
+ extern void perf_tpcounter_event(int); \
+ perf_tpcounter_event(event_##call.id); \
+} \
+ \
+static int ftrace_profile_enable_##call(struct ftrace_event_call *event_call) \
+{ \
+ int ret = 0; \
+ \
+ if (!atomic_inc_return(&event_call->profile_count)) \
+ ret = register_trace_##call(ftrace_profile_##call); \
+ \
+ return ret; \
+} \
+ \
+static void ftrace_profile_disable_##call(struct ftrace_event_call *event_call)\
+{ \
+ if (atomic_add_negative(-1, &event_call->profile_count)) \
+ unregister_trace_##call(ftrace_profile_##call); \
+}
+
+#define _TRACE_PROFILE_INIT(call) \
+ .profile_count = ATOMIC_INIT(-1), \
+ .profile_enable = ftrace_profile_enable_##call, \
+ .profile_disable = ftrace_profile_disable_##call,
+
+#else
+#define _TRACE_PROFILE(call, proto, args)
+#define _TRACE_PROFILE_INIT(call)
+#endif
+
+#undef __entry
+#define __entry entry
+
+#undef __field
+#define __field(type, item)
+
+#undef __array
+#define __array(type, item, len)
+
+#undef __dynamic_array
+#define __dynamic_array(type, item, len) \
+ __entry->__data_loc_##item = __data_offsets.item;
+
+#undef __string
+#define __string(item, src) __dynamic_array(char, item, -1) \
+
+#undef __assign_str
+#define __assign_str(dst, src) \
+ strcpy(__get_str(dst), src);
+
+#undef TRACE_EVENT
+#define TRACE_EVENT(call, proto, args, tstruct, assign, print) \
+_TRACE_PROFILE(call, PARAMS(proto), PARAMS(args)) \
+ \
+static struct ftrace_event_call event_##call; \
+ \
+static void ftrace_raw_event_##call(proto) \
+{ \
+ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
+ struct ftrace_event_call *event_call = &event_##call; \
+ struct ring_buffer_event *event; \
+ struct ftrace_raw_##call *entry; \
+ unsigned long irq_flags; \
+ int __data_size; \
+ int pc; \
+ \
+ local_save_flags(irq_flags); \
+ pc = preempt_count(); \
+ \
+ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
+ \
+ event = trace_current_buffer_lock_reserve(event_##call.id, \
+ sizeof(*entry) + __data_size, \
+ irq_flags, pc); \
+ if (!event) \
+ return; \
+ entry = ring_buffer_event_data(event); \
+ \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ if (!filter_current_check_discard(event_call, entry, event)) \
+ trace_nowake_buffer_unlock_commit(event, irq_flags, pc); \
+} \
+ \
+static int ftrace_raw_reg_event_##call(void) \
+{ \
+ int ret; \
+ \
+ ret = register_trace_##call(ftrace_raw_event_##call); \
+ if (ret) \
+ pr_info("event trace: Could not activate trace point " \
+ "probe to " #call "\n"); \
+ return ret; \
+} \
+ \
+static void ftrace_raw_unreg_event_##call(void) \
+{ \
+ unregister_trace_##call(ftrace_raw_event_##call); \
+} \
+ \
+static struct trace_event ftrace_event_type_##call = { \
+ .trace = ftrace_raw_output_##call, \
+}; \
+ \
+static int ftrace_raw_init_event_##call(void) \
+{ \
+ int id; \
+ \
+ id = register_ftrace_event(&ftrace_event_type_##call); \
+ if (!id) \
+ return -ENODEV; \
+ event_##call.id = id; \
+ INIT_LIST_HEAD(&event_##call.fields); \
+ init_preds(&event_##call); \
+ return 0; \
+} \
+ \
+static struct ftrace_event_call __used \
+__attribute__((__aligned__(4))) \
+__attribute__((section("_ftrace_events"))) event_##call = { \
+ .name = #call, \
+ .system = __stringify(TRACE_SYSTEM), \
+ .event = &ftrace_event_type_##call, \
+ .raw_init = ftrace_raw_init_event_##call, \
+ .regfunc = ftrace_raw_reg_event_##call, \
+ .unregfunc = ftrace_raw_unreg_event_##call, \
+ .show_format = ftrace_format_##call, \
+ .define_fields = ftrace_define_fields_##call, \
+ _TRACE_PROFILE_INIT(call) \
+}
+
+#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
+
+#undef _TRACE_PROFILE
+#undef _TRACE_PROFILE_INIT
+
diff --git a/include/trace/irq.h b/include/trace/irq.h
deleted file mode 100644
index ff5d449..0000000
--- a/include/trace/irq.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _TRACE_IRQ_H
-#define _TRACE_IRQ_H
-
-#include <linux/interrupt.h>
-#include <linux/tracepoint.h>
-
-#include <trace/irq_event_types.h>
-
-#endif
diff --git a/include/trace/irq_event_types.h b/include/trace/irq_event_types.h
deleted file mode 100644
index 85964eb..0000000
--- a/include/trace/irq_event_types.h
+++ /dev/null
@@ -1,55 +0,0 @@
-
-/* use <trace/irq.h> instead */
-#ifndef TRACE_FORMAT
-# error Do not include this file directly.
-# error Unless you know what you are doing.
-#endif
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM irq
-
-/*
- * Tracepoint for entry of interrupt handler:
- */
-TRACE_FORMAT(irq_handler_entry,
- TP_PROTO(int irq, struct irqaction *action),
- TP_ARGS(irq, action),
- TP_FMT("irq=%d handler=%s", irq, action->name)
- );
-
-/*
- * Tracepoint for return of an interrupt handler:
- */
-TRACE_EVENT(irq_handler_exit,
-
- TP_PROTO(int irq, struct irqaction *action, int ret),
-
- TP_ARGS(irq, action, ret),
-
- TP_STRUCT__entry(
- __field( int, irq )
- __field( int, ret )
- ),
-
- TP_fast_assign(
- __entry->irq = irq;
- __entry->ret = ret;
- ),
-
- TP_printk("irq=%d return=%s",
- __entry->irq, __entry->ret ? "handled" : "unhandled")
-);
-
-TRACE_FORMAT(softirq_entry,
- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
- TP_ARGS(h, vec),
- TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec])
- );
-
-TRACE_FORMAT(softirq_exit,
- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
- TP_ARGS(h, vec),
- TP_FMT("softirq=%d action=%s", (int)(h - vec), softirq_to_name[h-vec])
- );
-
-#undef TRACE_SYSTEM
diff --git a/include/trace/kmemtrace.h b/include/trace/kmemtrace.h
deleted file mode 100644
index 28ee69f..0000000
--- a/include/trace/kmemtrace.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2008 Eduard - Gabriel Munteanu
- *
- * This file is released under GPL version 2.
- */
-
-#ifndef _LINUX_KMEMTRACE_H
-#define _LINUX_KMEMTRACE_H
-
-#ifdef __KERNEL__
-
-#include <linux/tracepoint.h>
-#include <linux/types.h>
-
-#ifdef CONFIG_KMEMTRACE
-extern void kmemtrace_init(void);
-#else
-static inline void kmemtrace_init(void)
-{
-}
-#endif
-
-DECLARE_TRACE(kmalloc,
- TP_PROTO(unsigned long call_site,
- const void *ptr,
- size_t bytes_req,
- size_t bytes_alloc,
- gfp_t gfp_flags),
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags));
-DECLARE_TRACE(kmem_cache_alloc,
- TP_PROTO(unsigned long call_site,
- const void *ptr,
- size_t bytes_req,
- size_t bytes_alloc,
- gfp_t gfp_flags),
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags));
-DECLARE_TRACE(kmalloc_node,
- TP_PROTO(unsigned long call_site,
- const void *ptr,
- size_t bytes_req,
- size_t bytes_alloc,
- gfp_t gfp_flags,
- int node),
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node));
-DECLARE_TRACE(kmem_cache_alloc_node,
- TP_PROTO(unsigned long call_site,
- const void *ptr,
- size_t bytes_req,
- size_t bytes_alloc,
- gfp_t gfp_flags,
- int node),
- TP_ARGS(call_site, ptr, bytes_req, bytes_alloc, gfp_flags, node));
-DECLARE_TRACE(kfree,
- TP_PROTO(unsigned long call_site, const void *ptr),
- TP_ARGS(call_site, ptr));
-DECLARE_TRACE(kmem_cache_free,
- TP_PROTO(unsigned long call_site, const void *ptr),
- TP_ARGS(call_site, ptr));
-
-#endif /* __KERNEL__ */
-
-#endif /* _LINUX_KMEMTRACE_H */
-
diff --git a/include/trace/lockdep.h b/include/trace/lockdep.h
deleted file mode 100644
index 5ca67df..0000000
--- a/include/trace/lockdep.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _TRACE_LOCKDEP_H
-#define _TRACE_LOCKDEP_H
-
-#include <linux/lockdep.h>
-#include <linux/tracepoint.h>
-
-#include <trace/lockdep_event_types.h>
-
-#endif
diff --git a/include/trace/lockdep_event_types.h b/include/trace/lockdep_event_types.h
deleted file mode 100644
index adccfcd..0000000
--- a/include/trace/lockdep_event_types.h
+++ /dev/null
@@ -1,44 +0,0 @@
-
-#ifndef TRACE_FORMAT
-# error Do not include this file directly.
-# error Unless you know what you are doing.
-#endif
-
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM lock
-
-#ifdef CONFIG_LOCKDEP
-
-TRACE_FORMAT(lock_acquire,
- TP_PROTO(struct lockdep_map *lock, unsigned int subclass,
- int trylock, int read, int check,
- struct lockdep_map *next_lock, unsigned long ip),
- TP_ARGS(lock, subclass, trylock, read, check, next_lock, ip),
- TP_FMT("%s%s%s", trylock ? "try " : "",
- read ? "read " : "", lock->name)
- );
-
-TRACE_FORMAT(lock_release,
- TP_PROTO(struct lockdep_map *lock, int nested, unsigned long ip),
- TP_ARGS(lock, nested, ip),
- TP_FMT("%s", lock->name)
- );
-
-#ifdef CONFIG_LOCK_STAT
-
-TRACE_FORMAT(lock_contended,
- TP_PROTO(struct lockdep_map *lock, unsigned long ip),
- TP_ARGS(lock, ip),
- TP_FMT("%s", lock->name)
- );
-
-TRACE_FORMAT(lock_acquired,
- TP_PROTO(struct lockdep_map *lock, unsigned long ip),
- TP_ARGS(lock, ip),
- TP_FMT("%s", lock->name)
- );
-
-#endif
-#endif
-
-#undef TRACE_SYSTEM
diff --git a/include/trace/sched.h b/include/trace/sched.h
deleted file mode 100644
index 4e372a1..0000000
--- a/include/trace/sched.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#ifndef _TRACE_SCHED_H
-#define _TRACE_SCHED_H
-
-#include <linux/sched.h>
-#include <linux/tracepoint.h>
-
-#include <trace/sched_event_types.h>
-
-#endif
diff --git a/include/trace/skb.h b/include/trace/skb.h
deleted file mode 100644
index b66206d..0000000
--- a/include/trace/skb.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifndef _TRACE_SKB_H_
-#define _TRACE_SKB_H_
-
-#include <linux/skbuff.h>
-#include <linux/tracepoint.h>
-
-DECLARE_TRACE(kfree_skb,
- TP_PROTO(struct sk_buff *skb, void *location),
- TP_ARGS(skb, location));
-
-#endif
diff --git a/include/trace/trace_event_types.h b/include/trace/trace_event_types.h
deleted file mode 100644
index df56f56..0000000
--- a/include/trace/trace_event_types.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/* trace/<type>_event_types.h here */
-
-#include <trace/sched_event_types.h>
-#include <trace/irq_event_types.h>
-#include <trace/lockdep_event_types.h>
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
deleted file mode 100644
index fd13750..0000000
--- a/include/trace/trace_events.h
+++ /dev/null
@@ -1,5 +0,0 @@
-/* trace/<type>.h here */
-
-#include <trace/sched.h>
-#include <trace/irq.h>
-#include <trace/lockdep.h>
diff --git a/include/trace/workqueue.h b/include/trace/workqueue.h
deleted file mode 100644
index 7626523..0000000
--- a/include/trace/workqueue.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#ifndef __TRACE_WORKQUEUE_H
-#define __TRACE_WORKQUEUE_H
-
-#include <linux/tracepoint.h>
-#include <linux/workqueue.h>
-#include <linux/sched.h>
-
-DECLARE_TRACE(workqueue_insertion,
- TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
- TP_ARGS(wq_thread, work));
-
-DECLARE_TRACE(workqueue_execution,
- TP_PROTO(struct task_struct *wq_thread, struct work_struct *work),
- TP_ARGS(wq_thread, work));
-
-/* Trace the creation of one workqueue thread on a cpu */
-DECLARE_TRACE(workqueue_creation,
- TP_PROTO(struct task_struct *wq_thread, int cpu),
- TP_ARGS(wq_thread, cpu));
-
-DECLARE_TRACE(workqueue_destruction,
- TP_PROTO(struct task_struct *wq_thread),
- TP_ARGS(wq_thread));
-
-#endif /* __TRACE_WORKQUEUE_H */
diff --git a/include/video/pxa168fb.h b/include/video/pxa168fb.h
new file mode 100644
index 0000000..b5cc72f
--- /dev/null
+++ b/include/video/pxa168fb.h
@@ -0,0 +1,127 @@
+/*
+ * linux/arch/arm/mach-mmp/include/mach/pxa168fb.h
+ *
+ * Copyright (C) 2009 Marvell International Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ASM_MACH_PXA168FB_H
+#define __ASM_MACH_PXA168FB_H
+
+#include <linux/fb.h>
+#include <linux/interrupt.h>
+
+/* Dumb interface */
+#define PIN_MODE_DUMB_24 0
+#define PIN_MODE_DUMB_18_SPI 1
+#define PIN_MODE_DUMB_18_GPIO 2
+#define PIN_MODE_DUMB_16_SPI 3
+#define PIN_MODE_DUMB_16_GPIO 4
+#define PIN_MODE_DUMB_12_SPI_GPIO 5
+#define PIN_MODE_SMART_18_SPI 6
+#define PIN_MODE_SMART_16_SPI 7
+#define PIN_MODE_SMART_8_SPI_GPIO 8
+
+/* Dumb interface pin allocation */
+#define DUMB_MODE_RGB565 0
+#define DUMB_MODE_RGB565_UPPER 1
+#define DUMB_MODE_RGB666 2
+#define DUMB_MODE_RGB666_UPPER 3
+#define DUMB_MODE_RGB444 4
+#define DUMB_MODE_RGB444_UPPER 5
+#define DUMB_MODE_RGB888 6
+
+/* default fb buffer size WVGA-32bits */
+#define DEFAULT_FB_SIZE (800 * 480 * 4)
+
+/*
+ * Buffer pixel format
+ * bit0 is for rb swap.
+ * bit12 is for Y UorV swap
+ */
+#define PIX_FMT_RGB565 0
+#define PIX_FMT_BGR565 1
+#define PIX_FMT_RGB1555 2
+#define PIX_FMT_BGR1555 3
+#define PIX_FMT_RGB888PACK 4
+#define PIX_FMT_BGR888PACK 5
+#define PIX_FMT_RGB888UNPACK 6
+#define PIX_FMT_BGR888UNPACK 7
+#define PIX_FMT_RGBA888 8
+#define PIX_FMT_BGRA888 9
+#define PIX_FMT_YUV422PACK 10
+#define PIX_FMT_YVU422PACK 11
+#define PIX_FMT_YUV422PLANAR 12
+#define PIX_FMT_YVU422PLANAR 13
+#define PIX_FMT_YUV420PLANAR 14
+#define PIX_FMT_YVU420PLANAR 15
+#define PIX_FMT_PSEUDOCOLOR 20
+#define PIX_FMT_UYVY422PACK (0x1000|PIX_FMT_YUV422PACK)
+
+/*
+ * PXA LCD controller private state.
+ */
+struct pxa168fb_info {
+ struct device *dev;
+ struct clk *clk;
+ struct fb_info *info;
+
+ void __iomem *reg_base;
+ dma_addr_t fb_start_dma;
+ u32 pseudo_palette[16];
+
+ int pix_fmt;
+ unsigned is_blanked:1;
+ unsigned panel_rbswap:1;
+ unsigned active:1;
+};
+
+/*
+ * PXA fb machine information
+ */
+struct pxa168fb_mach_info {
+ char id[16];
+
+ int num_modes;
+ struct fb_videomode *modes;
+
+ /*
+ * Pix_fmt
+ */
+ unsigned pix_fmt;
+
+ /*
+ * I/O pin allocation.
+ */
+ unsigned io_pin_allocation_mode:4;
+
+ /*
+ * Dumb panel -- assignment of R/G/B component info to the 24
+ * available external data lanes.
+ */
+ unsigned dumb_mode:4;
+ unsigned panel_rgb_reverse_lanes:1;
+
+ /*
+ * Dumb panel -- GPIO output data.
+ */
+ unsigned gpio_output_mask:8;
+ unsigned gpio_output_data:8;
+
+ /*
+ * Dumb panel -- configurable output signal polarity.
+ */
+ unsigned invert_composite_blank:1;
+ unsigned invert_pix_val_ena:1;
+ unsigned invert_pixclock:1;
+ unsigned invert_vsync:1;
+ unsigned invert_hsync:1;
+ unsigned panel_rbswap:1;
+ unsigned active:1;
+ unsigned enable_lcd:1;
+};
+
+#endif /* __ASM_MACH_PXA168FB_H */
diff --git a/include/xen/Kbuild b/include/xen/Kbuild
new file mode 100644
index 0000000..4e65c16
--- /dev/null
+++ b/include/xen/Kbuild
@@ -0,0 +1 @@
+header-y += evtchn.h
diff --git a/include/xen/events.h b/include/xen/events.h
index 0d5f1ad..e68d59a 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -53,4 +53,7 @@ bool xen_test_irq_pending(int irq);
irq will be disabled so it won't deliver an interrupt. */
void xen_poll_irq(int irq);
+/* Determine the IRQ which is bound to an event channel */
+unsigned irq_from_evtchn(unsigned int evtchn);
+
#endif /* _XEN_EVENTS_H */
diff --git a/include/xen/evtchn.h b/include/xen/evtchn.h
new file mode 100644
index 0000000..14e833e
--- /dev/null
+++ b/include/xen/evtchn.h
@@ -0,0 +1,88 @@
+/******************************************************************************
+ * evtchn.h
+ *
+ * Interface to /dev/xen/evtchn.
+ *
+ * Copyright (c) 2003-2005, K A Fraser
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation; or, when distributed
+ * separately from the Linux kernel or incorporated into other
+ * software packages, subject to the following license:
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this source file (the "Software"), to deal in the Software without
+ * restriction, including without limitation the rights to use, copy, modify,
+ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
+ * and to permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ */
+
+#ifndef __LINUX_PUBLIC_EVTCHN_H__
+#define __LINUX_PUBLIC_EVTCHN_H__
+
+/*
+ * Bind a fresh port to VIRQ @virq.
+ * Return allocated port.
+ */
+#define IOCTL_EVTCHN_BIND_VIRQ \
+ _IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq))
+struct ioctl_evtchn_bind_virq {
+ unsigned int virq;
+};
+
+/*
+ * Bind a fresh port to remote <@remote_domain, @remote_port>.
+ * Return allocated port.
+ */
+#define IOCTL_EVTCHN_BIND_INTERDOMAIN \
+ _IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain))
+struct ioctl_evtchn_bind_interdomain {
+ unsigned int remote_domain, remote_port;
+};
+
+/*
+ * Allocate a fresh port for binding to @remote_domain.
+ * Return allocated port.
+ */
+#define IOCTL_EVTCHN_BIND_UNBOUND_PORT \
+ _IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port))
+struct ioctl_evtchn_bind_unbound_port {
+ unsigned int remote_domain;
+};
+
+/*
+ * Unbind previously allocated @port.
+ */
+#define IOCTL_EVTCHN_UNBIND \
+ _IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind))
+struct ioctl_evtchn_unbind {
+ unsigned int port;
+};
+
+/*
+ * Unbind previously allocated @port.
+ */
+#define IOCTL_EVTCHN_NOTIFY \
+ _IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify))
+struct ioctl_evtchn_notify {
+ unsigned int port;
+};
+
+/* Clear and reinitialise the event buffer. Clear error condition. */
+#define IOCTL_EVTCHN_RESET \
+ _IOC(_IOC_NONE, 'E', 5, 0)
+
+#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
diff --git a/include/xen/interface/version.h b/include/xen/interface/version.h
index 453235e..e8b6519 100644
--- a/include/xen/interface/version.h
+++ b/include/xen/interface/version.h
@@ -57,4 +57,7 @@ struct xen_feature_info {
/* Declares the features reported by XENVER_get_features. */
#include "features.h"
+/* arg == NULL; returns host memory page size. */
+#define XENVER_pagesize 7
+
#endif /* __XEN_PUBLIC_VERSION_H__ */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index f87f961..b9763ba 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -91,8 +91,7 @@ struct xenbus_driver {
void (*otherend_changed)(struct xenbus_device *dev,
enum xenbus_state backend_state);
int (*remove)(struct xenbus_device *dev);
- int (*suspend)(struct xenbus_device *dev);
- int (*suspend_cancel)(struct xenbus_device *dev);
+ int (*suspend)(struct xenbus_device *dev, pm_message_t state);
int (*resume)(struct xenbus_device *dev);
int (*uevent)(struct xenbus_device *, char **, int, char *, int);
struct device_driver driver;