summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/asm-generic/preempt.h5
-rw-r--r--include/asm-generic/qspinlock.h4
-rw-r--r--include/linux/cgroup-defs.h27
-rw-r--r--include/linux/init_task.h8
-rw-r--r--include/linux/mm.h21
-rw-r--r--include/linux/preempt.h19
-rw-r--r--include/linux/sched.h18
-rw-r--r--include/linux/security.h2
-rw-r--r--include/net/netfilter/br_netfilter.h2
-rw-r--r--include/net/netfilter/nf_tables.h2
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/xen/interface/sched.h8
12 files changed, 73 insertions, 44 deletions
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index d0a7a4753..0bec580a4 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -71,9 +71,10 @@ static __always_inline bool __preempt_count_dec_and_test(void)
/*
* Returns true when we need to resched and can (barring IRQ state).
*/
-static __always_inline bool should_resched(void)
+static __always_inline bool should_resched(int preempt_offset)
{
- return unlikely(!preempt_count() && tif_need_resched());
+ return unlikely(preempt_count() == preempt_offset &&
+ tif_need_resched());
}
#ifdef CONFIG_PREEMPT
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
index 83bfb87f5..e2aadbc71 100644
--- a/include/asm-generic/qspinlock.h
+++ b/include/asm-generic/qspinlock.h
@@ -111,8 +111,8 @@ static inline void queued_spin_unlock_wait(struct qspinlock *lock)
cpu_relax();
}
-#ifndef virt_queued_spin_lock
-static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock)
+#ifndef virt_spin_lock
+static __always_inline bool virt_spin_lock(struct qspinlock *lock)
{
return false;
}
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 93755a629..430c876ad 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -463,31 +463,8 @@ struct cgroup_subsys {
unsigned int depends_on;
};
-extern struct percpu_rw_semaphore cgroup_threadgroup_rwsem;
-
-/**
- * cgroup_threadgroup_change_begin - threadgroup exclusion for cgroups
- * @tsk: target task
- *
- * Called from threadgroup_change_begin() and allows cgroup operations to
- * synchronize against threadgroup changes using a percpu_rw_semaphore.
- */
-static inline void cgroup_threadgroup_change_begin(struct task_struct *tsk)
-{
- percpu_down_read(&cgroup_threadgroup_rwsem);
-}
-
-/**
- * cgroup_threadgroup_change_end - threadgroup exclusion for cgroups
- * @tsk: target task
- *
- * Called from threadgroup_change_end(). Counterpart of
- * cgroup_threadcgroup_change_begin().
- */
-static inline void cgroup_threadgroup_change_end(struct task_struct *tsk)
-{
- percpu_up_read(&cgroup_threadgroup_rwsem);
-}
+void cgroup_threadgroup_change_begin(struct task_struct *tsk);
+void cgroup_threadgroup_change_end(struct task_struct *tsk);
#else /* CONFIG_CGROUPS */
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index e8493fee8..bb9b075f0 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -25,6 +25,13 @@
extern struct files_struct init_files;
extern struct fs_struct init_fs;
+#ifdef CONFIG_CGROUPS
+#define INIT_GROUP_RWSEM(sig) \
+ .group_rwsem = __RWSEM_INITIALIZER(sig.group_rwsem),
+#else
+#define INIT_GROUP_RWSEM(sig)
+#endif
+
#ifdef CONFIG_CPUSETS
#define INIT_CPUSET_SEQ(tsk) \
.mems_allowed_seq = SEQCNT_ZERO(tsk.mems_allowed_seq),
@@ -48,6 +55,7 @@ extern struct fs_struct init_fs;
}, \
.cred_guard_mutex = \
__MUTEX_INITIALIZER(sig.cred_guard_mutex), \
+ INIT_GROUP_RWSEM(sig) \
}
extern struct nsproxy init_nsproxy;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 8a6e1fc37..6293566d7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -916,6 +916,27 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
#endif
}
+#ifdef CONFIG_MEMCG
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+ return page->mem_cgroup;
+}
+
+static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
+{
+ page->mem_cgroup = memcg;
+}
+#else
+static inline struct mem_cgroup *page_memcg(struct page *page)
+{
+ return NULL;
+}
+
+static inline void set_page_memcg(struct page *page, struct mem_cgroup *memcg)
+{
+}
+#endif
+
/*
* Some inline functions in vmstat.h depend on page_zone()
*/
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 84991f185..bea8dd8ff 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -84,13 +84,21 @@
*/
#define in_nmi() (preempt_count() & NMI_MASK)
+/*
+ * The preempt_count offset after preempt_disable();
+ */
#if defined(CONFIG_PREEMPT_COUNT)
-# define PREEMPT_DISABLE_OFFSET 1
+# define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
#else
-# define PREEMPT_DISABLE_OFFSET 0
+# define PREEMPT_DISABLE_OFFSET 0
#endif
/*
+ * The preempt_count offset after spin_lock()
+ */
+#define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
+
+/*
* The preempt_count offset needed for things like:
*
* spin_lock_bh()
@@ -103,7 +111,7 @@
*
* Work as expected.
*/
-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
+#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
/*
* Are we running in atomic context? WARNING: this macro cannot
@@ -124,7 +132,8 @@
#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
-#define preempt_count_dec_and_test() ({ preempt_count_sub(1); should_resched(); })
+#define preempt_count_dec_and_test() \
+ ({ preempt_count_sub(1); should_resched(0); })
#else
#define preempt_count_add(val) __preempt_count_add(val)
#define preempt_count_sub(val) __preempt_count_sub(val)
@@ -184,7 +193,7 @@ do { \
#define preempt_check_resched() \
do { \
- if (should_resched()) \
+ if (should_resched(0)) \
__preempt_schedule(); \
} while (0)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 04b5ada46..bfca8aa21 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -754,6 +754,18 @@ struct signal_struct {
unsigned audit_tty_log_passwd;
struct tty_audit_buf *tty_audit_buf;
#endif
+#ifdef CONFIG_CGROUPS
+ /*
+ * group_rwsem prevents new tasks from entering the threadgroup and
+ * member tasks from exiting,a more specifically, setting of
+ * PF_EXITING. fork and exit paths are protected with this rwsem
+ * using threadgroup_change_begin/end(). Users which require
+ * threadgroup to remain stable should use threadgroup_[un]lock()
+ * which also takes care of exec path. Currently, cgroup is the
+ * only user.
+ */
+ struct rw_semaphore group_rwsem;
+#endif
oom_flags_t oom_flags;
short oom_score_adj; /* OOM kill score adjustment */
@@ -2897,12 +2909,6 @@ extern int _cond_resched(void);
extern int __cond_resched_lock(spinlock_t *lock);
-#ifdef CONFIG_PREEMPT_COUNT
-#define PREEMPT_LOCK_OFFSET PREEMPT_OFFSET
-#else
-#define PREEMPT_LOCK_OFFSET 0
-#endif
-
#define cond_resched_lock(lock) ({ \
___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
__cond_resched_lock(lock); \
diff --git a/include/linux/security.h b/include/linux/security.h
index 79d85ddf8..2f4c1f7aa 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -946,7 +946,7 @@ static inline int security_task_prctl(int option, unsigned long arg2,
unsigned long arg4,
unsigned long arg5)
{
- return cap_task_prctl(option, arg2, arg3, arg3, arg5);
+ return cap_task_prctl(option, arg2, arg3, arg4, arg5);
}
static inline void security_task_to_inode(struct task_struct *p, struct inode *inode)
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index bab824bde..d4c6b5f30 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -59,7 +59,7 @@ static inline unsigned int
br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, struct sk_buff *skb,
const struct nf_hook_state *state)
{
- return NF_DROP;
+ return NF_ACCEPT;
}
#endif
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 2a246680a..aa8bee72c 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -125,7 +125,7 @@ static inline enum nft_data_types nft_dreg_to_type(enum nft_registers reg)
static inline enum nft_registers nft_type_to_reg(enum nft_data_types type)
{
- return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1;
+ return type == NFT_DATA_VERDICT ? NFT_REG_VERDICT : NFT_REG_1 * NFT_REG_SIZE / NFT_REG32_SIZE;
}
unsigned int nft_parse_register(const struct nlattr *attr);
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 0aedbb2c1..7e7f8875a 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -776,7 +776,6 @@ struct iscsi_np {
enum iscsi_timer_flags_table np_login_timer_flags;
u32 np_exports;
enum np_flags_table np_flags;
- unsigned char np_ip[IPV6_ADDRESS_SPACE];
u16 np_port;
spinlock_t np_thread_lock;
struct completion np_restart_comp;
diff --git a/include/xen/interface/sched.h b/include/xen/interface/sched.h
index 9ce083960..f18490985 100644
--- a/include/xen/interface/sched.h
+++ b/include/xen/interface/sched.h
@@ -107,5 +107,13 @@ struct sched_watchdog {
#define SHUTDOWN_suspend 2 /* Clean up, save suspend info, kill. */
#define SHUTDOWN_crash 3 /* Tell controller we've crashed. */
#define SHUTDOWN_watchdog 4 /* Restart because watchdog time expired. */
+/*
+ * Domain asked to perform 'soft reset' for it. The expected behavior is to
+ * reset internal Xen state for the domain returning it to the point where it
+ * was created but leaving the domain's memory contents and vCPU contexts
+ * intact. This will allow the domain to start over and set up all Xen specific
+ * interfaces again.
+ */
+#define SHUTDOWN_soft_reset 5
#endif /* __XEN_PUBLIC_SCHED_H__ */