diff options
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/cpu.h | 5 | ||||
-rw-r--r-- | include/linux/cpuset.h | 47 | ||||
-rw-r--r-- | include/linux/fs.h | 11 | ||||
-rw-r--r-- | include/linux/init_task.h | 8 | ||||
-rw-r--r-- | include/linux/irqdesc.h | 1 | ||||
-rw-r--r-- | include/linux/memcontrol.h | 3 | ||||
-rw-r--r-- | include/linux/mfd/ezx-pcap.h | 1 | ||||
-rw-r--r-- | include/linux/migrate.h | 23 | ||||
-rw-r--r-- | include/linux/mmzone.h | 14 | ||||
-rw-r--r-- | include/linux/random.h | 19 | ||||
-rw-r--r-- | include/linux/sched.h | 2 | ||||
-rw-r--r-- | include/linux/swap.h | 7 | ||||
-rw-r--r-- | include/linux/usb.h | 21 | ||||
-rw-r--r-- | include/trace/events/random.h | 134 | ||||
-rw-r--r-- | include/trace/events/vmscan.h | 85 |
15 files changed, 328 insertions, 53 deletions
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 97f1ca7..111797a 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -66,8 +66,9 @@ enum { /* migration should happen before other stuff but after perf */ CPU_PRI_PERF = 20, CPU_PRI_MIGRATION = 10, - /* prepare workqueues for other notifiers */ - CPU_PRI_WORKQUEUE = 5, + /* bring up workqueues before normal notifiers and down after */ + CPU_PRI_WORKQUEUE_UP = 5, + CPU_PRI_WORKQUEUE_DOWN = -5, }; #ifdef CONFIG_SMP diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index e9eaec5..7a7e5fd 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -89,42 +89,33 @@ extern void rebuild_sched_domains(void); extern void cpuset_print_task_mems_allowed(struct task_struct *p); /* - * reading current mems_allowed and mempolicy in the fastpath must protected - * by get_mems_allowed() + * get_mems_allowed is required when making decisions involving mems_allowed + * such as during page allocation. mems_allowed can be updated in parallel + * and depending on the new value an operation can fail potentially causing + * process failure. A retry loop with get_mems_allowed and put_mems_allowed + * prevents these artificial failures. */ -static inline void get_mems_allowed(void) +static inline unsigned int get_mems_allowed(void) { - current->mems_allowed_change_disable++; - - /* - * ensure that reading mems_allowed and mempolicy happens after the - * update of ->mems_allowed_change_disable. - * - * the write-side task finds ->mems_allowed_change_disable is not 0, - * and knows the read-side task is reading mems_allowed or mempolicy, - * so it will clear old bits lazily. - */ - smp_mb(); + return read_seqcount_begin(¤t->mems_allowed_seq); } -static inline void put_mems_allowed(void) +/* + * If this returns false, the operation that took place after get_mems_allowed + * may have failed. It is up to the caller to retry the operation if + * appropriate. + */ +static inline bool put_mems_allowed(unsigned int seq) { - /* - * ensure that reading mems_allowed and mempolicy before reducing - * mems_allowed_change_disable. - * - * the write-side task will know that the read-side task is still - * reading mems_allowed or mempolicy, don't clears old bits in the - * nodemask. - */ - smp_mb(); - --ACCESS_ONCE(current->mems_allowed_change_disable); + return !read_seqcount_retry(¤t->mems_allowed_seq, seq); } static inline void set_mems_allowed(nodemask_t nodemask) { task_lock(current); + write_seqcount_begin(¤t->mems_allowed_seq); current->mems_allowed = nodemask; + write_seqcount_end(¤t->mems_allowed_seq); task_unlock(current); } @@ -234,12 +225,14 @@ static inline void set_mems_allowed(nodemask_t nodemask) { } -static inline void get_mems_allowed(void) +static inline unsigned int get_mems_allowed(void) { + return 0; } -static inline void put_mems_allowed(void) +static inline bool put_mems_allowed(unsigned int seq) { + return true; } #endif /* !CONFIG_CPUSETS */ diff --git a/include/linux/fs.h b/include/linux/fs.h index 96b1035..212ea7b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -523,6 +523,7 @@ enum positive_aop_returns { struct page; struct address_space; struct writeback_control; +enum migrate_mode; struct iov_iter { const struct iovec *iov; @@ -607,9 +608,12 @@ struct address_space_operations { loff_t offset, unsigned long nr_segs); int (*get_xip_mem)(struct address_space *, pgoff_t, int, void **, unsigned long *); - /* migrate the contents of a page to the specified target */ + /* + * migrate the contents of a page to the specified target. If sync + * is false, it must not block. + */ int (*migratepage) (struct address_space *, - struct page *, struct page *); + struct page *, struct page *, enum migrate_mode); int (*launder_page) (struct page *); int (*is_partially_uptodate) (struct page *, read_descriptor_t *, unsigned long); @@ -2478,7 +2482,8 @@ extern int generic_check_addressable(unsigned, u64); #ifdef CONFIG_MIGRATION extern int buffer_migrate_page(struct address_space *, - struct page *, struct page *); + struct page *, struct page *, + enum migrate_mode); #else #define buffer_migrate_page NULL #endif diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 580f70c..5e41a8e 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -30,6 +30,13 @@ extern struct fs_struct init_fs; #define INIT_THREADGROUP_FORK_LOCK(sig) #endif +#ifdef CONFIG_CPUSETS +#define INIT_CPUSET_SEQ \ + .mems_allowed_seq = SEQCNT_ZERO, +#else +#define INIT_CPUSET_SEQ +#endif + #define INIT_SIGNALS(sig) { \ .nr_threads = 1, \ .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ @@ -193,6 +200,7 @@ extern struct cred init_cred; INIT_FTRACE_GRAPH \ INIT_TRACE_RECURSION \ INIT_TASK_RCU_PREEMPT(tsk) \ + INIT_CPUSET_SEQ \ } diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h index 2d921b3..d0a3100 100644 --- a/include/linux/irqdesc.h +++ b/include/linux/irqdesc.h @@ -38,7 +38,6 @@ struct timer_rand_state; */ struct irq_desc { struct irq_data irq_data; - struct timer_rand_state *timer_rand_state; unsigned int __percpu *kstat_irqs; irq_flow_handler_t handle_irq; #ifdef CONFIG_IRQ_PREFLOW_FASTEOI diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 313a00e..4a8da84 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h @@ -35,7 +35,8 @@ enum mem_cgroup_page_stat_item { extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, struct list_head *dst, unsigned long *scanned, int order, - int mode, struct zone *z, + isolate_mode_t mode, + struct zone *z, struct mem_cgroup *mem_cont, int active, int file); diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h index 40c37216..32a1b5c 100644 --- a/include/linux/mfd/ezx-pcap.h +++ b/include/linux/mfd/ezx-pcap.h @@ -16,6 +16,7 @@ struct pcap_subdev { struct pcap_platform_data { unsigned int irq_base; unsigned int config; + int gpio; void (*init) (void *); /* board specific init */ int num_subdevs; struct pcap_subdev *subdevs; diff --git a/include/linux/migrate.h b/include/linux/migrate.h index e39aeec..eaf8674 100644 --- a/include/linux/migrate.h +++ b/include/linux/migrate.h @@ -6,18 +6,31 @@ typedef struct page *new_page_t(struct page *, unsigned long private, int **); +/* + * MIGRATE_ASYNC means never block + * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking + * on most operations but not ->writepage as the potential stall time + * is too significant + * MIGRATE_SYNC will block when migrating pages + */ +enum migrate_mode { + MIGRATE_ASYNC, + MIGRATE_SYNC_LIGHT, + MIGRATE_SYNC, +}; + #ifdef CONFIG_MIGRATION #define PAGE_MIGRATION 1 extern void putback_lru_pages(struct list_head *l); extern int migrate_page(struct address_space *, - struct page *, struct page *); + struct page *, struct page *, enum migrate_mode); extern int migrate_pages(struct list_head *l, new_page_t x, unsigned long private, bool offlining, - bool sync); + enum migrate_mode mode); extern int migrate_huge_pages(struct list_head *l, new_page_t x, unsigned long private, bool offlining, - bool sync); + enum migrate_mode mode); extern int fail_migrate_page(struct address_space *, struct page *, struct page *); @@ -36,10 +49,10 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping, static inline void putback_lru_pages(struct list_head *l) {} static inline int migrate_pages(struct list_head *l, new_page_t x, unsigned long private, bool offlining, - bool sync) { return -ENOSYS; } + enum migrate_mode mode) { return -ENOSYS; } static inline int migrate_huge_pages(struct list_head *l, new_page_t x, unsigned long private, bool offlining, - bool sync) { return -ENOSYS; } + enum migrate_mode mode) { return -ENOSYS; } static inline int migrate_prep(void) { return -ENOSYS; } static inline int migrate_prep_local(void) { return -ENOSYS; } diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index aa2d80b..b32f3f9 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -158,6 +158,20 @@ static inline int is_unevictable_lru(enum lru_list l) return (l == LRU_UNEVICTABLE); } +/* Isolate inactive pages */ +#define ISOLATE_INACTIVE ((__force isolate_mode_t)0x1) +/* Isolate active pages */ +#define ISOLATE_ACTIVE ((__force isolate_mode_t)0x2) +/* Isolate clean file */ +#define ISOLATE_CLEAN ((__force isolate_mode_t)0x4) +/* Isolate unmapped file */ +#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8) +/* Isolate for asynchronous migration */ +#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10) + +/* LRU Isolation modes. */ +typedef unsigned __bitwise__ isolate_mode_t; + enum zone_watermarks { WMARK_MIN, WMARK_LOW, diff --git a/include/linux/random.h b/include/linux/random.h index d13059f..ac621ce 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -48,13 +48,13 @@ struct rnd_state { #ifdef __KERNEL__ -extern void rand_initialize_irq(int irq); - +extern void add_device_randomness(const void *, unsigned int); extern void add_input_randomness(unsigned int type, unsigned int code, unsigned int value); -extern void add_interrupt_randomness(int irq); +extern void add_interrupt_randomness(int irq, int irq_flags); extern void get_random_bytes(void *buf, int nbytes); +extern void get_random_bytes_arch(void *buf, int nbytes); void generate_random_uuid(unsigned char uuid_out[16]); #ifndef MODULE @@ -91,6 +91,19 @@ static inline void prandom32_seed(struct rnd_state *state, u64 seed) state->s3 = __seed(i, 15); } +#ifdef CONFIG_ARCH_RANDOM +# include <asm/archrandom.h> +#else +static inline int arch_get_random_long(unsigned long *v) +{ + return 0; +} +static inline int arch_get_random_int(unsigned int *v) +{ + return 0; +} +#endif + #endif /* __KERNEL___ */ #endif /* _LINUX_RANDOM_H */ diff --git a/include/linux/sched.h b/include/linux/sched.h index 35895e3..189b442 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1484,7 +1484,7 @@ struct task_struct { #endif #ifdef CONFIG_CPUSETS nodemask_t mems_allowed; /* Protected by alloc_lock */ - int mems_allowed_change_disable; + seqcount_t mems_allowed_seq; /* Seqence no to catch updates */ int cpuset_mem_spread_rotor; int cpuset_slab_spread_rotor; #endif diff --git a/include/linux/swap.h b/include/linux/swap.h index a273468..e73799d 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -243,11 +243,6 @@ static inline void lru_cache_add_file(struct page *page) __lru_cache_add(page, LRU_INACTIVE_FILE); } -/* LRU Isolation modes. */ -#define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */ -#define ISOLATE_ACTIVE 1 /* Isolate active pages. */ -#define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */ - /* linux/mm/vmscan.c */ extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, gfp_t gfp_mask, nodemask_t *mask); @@ -259,7 +254,7 @@ extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, unsigned int swappiness, struct zone *zone, unsigned long *nr_scanned); -extern int __isolate_lru_page(struct page *page, int mode, int file); +extern int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file); extern unsigned long shrink_all_memory(unsigned long nr_pages); extern int vm_swappiness; extern int remove_mapping(struct address_space *mapping, struct page *page); diff --git a/include/linux/usb.h b/include/linux/usb.h index b08e04c..6cd1576 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -757,6 +757,27 @@ static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size) .bInterfaceSubClass = (sc), \ .bInterfaceProtocol = (pr) +/** + * USB_VENDOR_AND_INTERFACE_INFO - describe a specific usb vendor with a class of usb interfaces + * @vend: the 16 bit USB Vendor ID + * @cl: bInterfaceClass value + * @sc: bInterfaceSubClass value + * @pr: bInterfaceProtocol value + * + * This macro is used to create a struct usb_device_id that matches a + * specific vendor with a specific class of interfaces. + * + * This is especially useful when explicitly matching devices that have + * vendor specific bDeviceClass values, but standards-compliant interfaces. + */ +#define USB_VENDOR_AND_INTERFACE_INFO(vend, cl, sc, pr) \ + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \ + | USB_DEVICE_ID_MATCH_VENDOR, \ + .idVendor = (vend), \ + .bInterfaceClass = (cl), \ + .bInterfaceSubClass = (sc), \ + .bInterfaceProtocol = (pr) + /* ----------------------------------------------------------------------- */ /* Stuff for dynamic usb ids */ diff --git a/include/trace/events/random.h b/include/trace/events/random.h new file mode 100644 index 0000000..422df19 --- /dev/null +++ b/include/trace/events/random.h @@ -0,0 +1,134 @@ +#undef TRACE_SYSTEM +#define TRACE_SYSTEM random + +#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_RANDOM_H + +#include <linux/writeback.h> +#include <linux/tracepoint.h> + +DECLARE_EVENT_CLASS(random__mix_pool_bytes, + TP_PROTO(const char *pool_name, int bytes, unsigned long IP), + + TP_ARGS(pool_name, bytes, IP), + + TP_STRUCT__entry( + __field( const char *, pool_name ) + __field( int, bytes ) + __field(unsigned long, IP ) + ), + + TP_fast_assign( + __entry->pool_name = pool_name; + __entry->bytes = bytes; + __entry->IP = IP; + ), + + TP_printk("%s pool: bytes %d caller %pF", + __entry->pool_name, __entry->bytes, (void *)__entry->IP) +); + +DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes, + TP_PROTO(const char *pool_name, int bytes, unsigned long IP), + + TP_ARGS(pool_name, bytes, IP) +); + +DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock, + TP_PROTO(const char *pool_name, int bytes, unsigned long IP), + + TP_ARGS(pool_name, bytes, IP) +); + +TRACE_EVENT(credit_entropy_bits, + TP_PROTO(const char *pool_name, int bits, int entropy_count, + int entropy_total, unsigned long IP), + + TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP), + + TP_STRUCT__entry( + __field( const char *, pool_name ) + __field( int, bits ) + __field( int, entropy_count ) + __field( int, entropy_total ) + __field(unsigned long, IP ) + ), + + TP_fast_assign( + __entry->pool_name = pool_name; + __entry->bits = bits; + __entry->entropy_count = entropy_count; + __entry->entropy_total = entropy_total; + __entry->IP = IP; + ), + + TP_printk("%s pool: bits %d entropy_count %d entropy_total %d " + "caller %pF", __entry->pool_name, __entry->bits, + __entry->entropy_count, __entry->entropy_total, + (void *)__entry->IP) +); + +TRACE_EVENT(get_random_bytes, + TP_PROTO(int nbytes, unsigned long IP), + + TP_ARGS(nbytes, IP), + + TP_STRUCT__entry( + __field( int, nbytes ) + __field(unsigned long, IP ) + ), + + TP_fast_assign( + __entry->nbytes = nbytes; + __entry->IP = IP; + ), + + TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP) +); + +DECLARE_EVENT_CLASS(random__extract_entropy, + TP_PROTO(const char *pool_name, int nbytes, int entropy_count, + unsigned long IP), + + TP_ARGS(pool_name, nbytes, entropy_count, IP), + + TP_STRUCT__entry( + __field( const char *, pool_name ) + __field( int, nbytes ) + __field( int, entropy_count ) + __field(unsigned long, IP ) + ), + + TP_fast_assign( + __entry->pool_name = pool_name; + __entry->nbytes = nbytes; + __entry->entropy_count = entropy_count; + __entry->IP = IP; + ), + + TP_printk("%s pool: nbytes %d entropy_count %d caller %pF", + __entry->pool_name, __entry->nbytes, __entry->entropy_count, + (void *)__entry->IP) +); + + +DEFINE_EVENT(random__extract_entropy, extract_entropy, + TP_PROTO(const char *pool_name, int nbytes, int entropy_count, + unsigned long IP), + + TP_ARGS(pool_name, nbytes, entropy_count, IP) +); + +DEFINE_EVENT(random__extract_entropy, extract_entropy_user, + TP_PROTO(const char *pool_name, int nbytes, int entropy_count, + unsigned long IP), + + TP_ARGS(pool_name, nbytes, entropy_count, IP) +); + + + +#endif /* _TRACE_RANDOM_H */ + +/* This part must be outside protection */ +#include <trace/define_trace.h> diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h index b2c33bd..edc4b3d 100644 --- a/include/trace/events/vmscan.h +++ b/include/trace/events/vmscan.h @@ -179,6 +179,83 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re TP_ARGS(nr_reclaimed) ); +TRACE_EVENT(mm_shrink_slab_start, + TP_PROTO(struct shrinker *shr, struct shrink_control *sc, + long nr_objects_to_shrink, unsigned long pgs_scanned, + unsigned long lru_pgs, unsigned long cache_items, + unsigned long long delta, unsigned long total_scan), + + TP_ARGS(shr, sc, nr_objects_to_shrink, pgs_scanned, lru_pgs, + cache_items, delta, total_scan), + + TP_STRUCT__entry( + __field(struct shrinker *, shr) + __field(void *, shrink) + __field(long, nr_objects_to_shrink) + __field(gfp_t, gfp_flags) + __field(unsigned long, pgs_scanned) + __field(unsigned long, lru_pgs) + __field(unsigned long, cache_items) + __field(unsigned long long, delta) + __field(unsigned long, total_scan) + ), + + TP_fast_assign( + __entry->shr = shr; + __entry->shrink = shr->shrink; + __entry->nr_objects_to_shrink = nr_objects_to_shrink; + __entry->gfp_flags = sc->gfp_mask; + __entry->pgs_scanned = pgs_scanned; + __entry->lru_pgs = lru_pgs; + __entry->cache_items = cache_items; + __entry->delta = delta; + __entry->total_scan = total_scan; + ), + + TP_printk("%pF %p: objects to shrink %ld gfp_flags %s pgs_scanned %ld lru_pgs %ld cache items %ld delta %lld total_scan %ld", + __entry->shrink, + __entry->shr, + __entry->nr_objects_to_shrink, + show_gfp_flags(__entry->gfp_flags), + __entry->pgs_scanned, + __entry->lru_pgs, + __entry->cache_items, + __entry->delta, + __entry->total_scan) +); + +TRACE_EVENT(mm_shrink_slab_end, + TP_PROTO(struct shrinker *shr, int shrinker_retval, + long unused_scan_cnt, long new_scan_cnt), + + TP_ARGS(shr, shrinker_retval, unused_scan_cnt, new_scan_cnt), + + TP_STRUCT__entry( + __field(struct shrinker *, shr) + __field(void *, shrink) + __field(long, unused_scan) + __field(long, new_scan) + __field(int, retval) + __field(long, total_scan) + ), + + TP_fast_assign( + __entry->shr = shr; + __entry->shrink = shr->shrink; + __entry->unused_scan = unused_scan_cnt; + __entry->new_scan = new_scan_cnt; + __entry->retval = shrinker_retval; + __entry->total_scan = new_scan_cnt - unused_scan_cnt; + ), + + TP_printk("%pF %p: unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d", + __entry->shrink, + __entry->shr, + __entry->unused_scan, + __entry->new_scan, + __entry->total_scan, + __entry->retval) +); DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template, @@ -189,7 +266,7 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template, unsigned long nr_lumpy_taken, unsigned long nr_lumpy_dirty, unsigned long nr_lumpy_failed, - int isolate_mode), + isolate_mode_t isolate_mode), TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode), @@ -201,7 +278,7 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template, __field(unsigned long, nr_lumpy_taken) __field(unsigned long, nr_lumpy_dirty) __field(unsigned long, nr_lumpy_failed) - __field(int, isolate_mode) + __field(isolate_mode_t, isolate_mode) ), TP_fast_assign( @@ -235,7 +312,7 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate, unsigned long nr_lumpy_taken, unsigned long nr_lumpy_dirty, unsigned long nr_lumpy_failed, - int isolate_mode), + isolate_mode_t isolate_mode), TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode) @@ -250,7 +327,7 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate, unsigned long nr_lumpy_taken, unsigned long nr_lumpy_dirty, unsigned long nr_lumpy_failed, - int isolate_mode), + isolate_mode_t isolate_mode), TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode) |