summaryrefslogtreecommitdiff
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile4
-rw-r--r--kernel/capability.c113
-rw-r--r--kernel/exit.c7
-rw-r--r--kernel/fork.c24
-rw-r--r--kernel/hrtimer.c9
-rw-r--r--kernel/kallsyms.c11
-rw-r--r--kernel/kprobes.c9
-rw-r--r--kernel/latency.c280
-rw-r--r--kernel/notifier.c1
-rw-r--r--kernel/params.c2
-rw-r--r--kernel/pm_qos_params.c425
-rw-r--r--kernel/posix-timers.c9
-rw-r--r--kernel/power/Kconfig9
-rw-r--r--kernel/power/disk.c4
-rw-r--r--kernel/power/snapshot.c4
-rw-r--r--kernel/printk.c36
-rw-r--r--kernel/ptrace.c11
-rw-r--r--kernel/relay.c24
-rw-r--r--kernel/signal.c110
-rw-r--r--kernel/srcu.c3
-rw-r--r--kernel/stop_machine.c6
-rw-r--r--kernel/sys.c37
-rw-r--r--kernel/sys_ni.c7
-rw-r--r--kernel/sysctl.c61
-rw-r--r--kernel/sysctl_check.c7
-rw-r--r--kernel/test_kprobes.c16
-rw-r--r--kernel/time.c13
-rw-r--r--kernel/time/clocksource.c19
-rw-r--r--kernel/timer.c10
29 files changed, 755 insertions, 516 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index db9af707ff5b..135a1b943446 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -8,8 +8,8 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o extable.o params.o posix-timers.o \
kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
- hrtimer.o rwsem.o latency.o nsproxy.o srcu.o \
- utsname.o notifier.o ksysfs.o
+ hrtimer.o rwsem.o nsproxy.o srcu.o \
+ utsname.o notifier.o ksysfs.o pm_qos_params.o
obj-$(CONFIG_SYSCTL) += sysctl_check.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/kernel/capability.c b/kernel/capability.c
index efbd9cdce132..39e8193b41ea 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -22,6 +22,37 @@
static DEFINE_SPINLOCK(task_capability_lock);
/*
+ * Leveraged for setting/resetting capabilities
+ */
+
+const kernel_cap_t __cap_empty_set = CAP_EMPTY_SET;
+const kernel_cap_t __cap_full_set = CAP_FULL_SET;
+const kernel_cap_t __cap_init_eff_set = CAP_INIT_EFF_SET;
+
+EXPORT_SYMBOL(__cap_empty_set);
+EXPORT_SYMBOL(__cap_full_set);
+EXPORT_SYMBOL(__cap_init_eff_set);
+
+/*
+ * More recent versions of libcap are available from:
+ *
+ * http://www.kernel.org/pub/linux/libs/security/linux-privs/
+ */
+
+static void warn_legacy_capability_use(void)
+{
+ static int warned;
+ if (!warned) {
+ char name[sizeof(current->comm)];
+
+ printk(KERN_INFO "warning: `%s' uses 32-bit capabilities"
+ " (legacy support in use)\n",
+ get_task_comm(name, current));
+ warned = 1;
+ }
+}
+
+/*
* For sys_getproccap() and sys_setproccap(), any of the three
* capability set pointers may be NULL -- indicating that that set is
* uninteresting and/or not to be changed.
@@ -42,12 +73,21 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
pid_t pid;
__u32 version;
struct task_struct *target;
- struct __user_cap_data_struct data;
+ unsigned tocopy;
+ kernel_cap_t pE, pI, pP;
if (get_user(version, &header->version))
return -EFAULT;
- if (version != _LINUX_CAPABILITY_VERSION) {
+ switch (version) {
+ case _LINUX_CAPABILITY_VERSION_1:
+ warn_legacy_capability_use();
+ tocopy = _LINUX_CAPABILITY_U32S_1;
+ break;
+ case _LINUX_CAPABILITY_VERSION_2:
+ tocopy = _LINUX_CAPABILITY_U32S_2;
+ break;
+ default:
if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
return -EFAULT;
return -EINVAL;
@@ -71,14 +111,47 @@ asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
} else
target = current;
- ret = security_capget(target, &data.effective, &data.inheritable, &data.permitted);
+ ret = security_capget(target, &pE, &pI, &pP);
out:
read_unlock(&tasklist_lock);
spin_unlock(&task_capability_lock);
- if (!ret && copy_to_user(dataptr, &data, sizeof data))
- return -EFAULT;
+ if (!ret) {
+ struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S];
+ unsigned i;
+
+ for (i = 0; i < tocopy; i++) {
+ kdata[i].effective = pE.cap[i];
+ kdata[i].permitted = pP.cap[i];
+ kdata[i].inheritable = pI.cap[i];
+ }
+
+ /*
+ * Note, in the case, tocopy < _LINUX_CAPABILITY_U32S,
+ * we silently drop the upper capabilities here. This
+ * has the effect of making older libcap
+ * implementations implicitly drop upper capability
+ * bits when they perform a: capget/modify/capset
+ * sequence.
+ *
+ * This behavior is considered fail-safe
+ * behavior. Upgrading the application to a newer
+ * version of libcap will enable access to the newer
+ * capabilities.
+ *
+ * An alternative would be to return an error here
+ * (-ERANGE), but that causes legacy applications to
+ * unexpectidly fail; the capget/modify/capset aborts
+ * before modification is attempted and the application
+ * fails.
+ */
+
+ if (copy_to_user(dataptr, kdata, tocopy
+ * sizeof(struct __user_cap_data_struct))) {
+ return -EFAULT;
+ }
+ }
return ret;
}
@@ -167,6 +240,8 @@ static inline int cap_set_all(kernel_cap_t *effective,
*/
asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
{
+ struct __user_cap_data_struct kdata[_LINUX_CAPABILITY_U32S];
+ unsigned i, tocopy;
kernel_cap_t inheritable, permitted, effective;
__u32 version;
struct task_struct *target;
@@ -176,7 +251,15 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
if (get_user(version, &header->version))
return -EFAULT;
- if (version != _LINUX_CAPABILITY_VERSION) {
+ switch (version) {
+ case _LINUX_CAPABILITY_VERSION_1:
+ warn_legacy_capability_use();
+ tocopy = _LINUX_CAPABILITY_U32S_1;
+ break;
+ case _LINUX_CAPABILITY_VERSION_2:
+ tocopy = _LINUX_CAPABILITY_U32S_2;
+ break;
+ default:
if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
return -EFAULT;
return -EINVAL;
@@ -188,10 +271,22 @@ asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
if (pid && pid != task_pid_vnr(current) && !capable(CAP_SETPCAP))
return -EPERM;
- if (copy_from_user(&effective, &data->effective, sizeof(effective)) ||
- copy_from_user(&inheritable, &data->inheritable, sizeof(inheritable)) ||
- copy_from_user(&permitted, &data->permitted, sizeof(permitted)))
+ if (copy_from_user(&kdata, data, tocopy
+ * sizeof(struct __user_cap_data_struct))) {
return -EFAULT;
+ }
+
+ for (i = 0; i < tocopy; i++) {
+ effective.cap[i] = kdata[i].effective;
+ permitted.cap[i] = kdata[i].permitted;
+ inheritable.cap[i] = kdata[i].inheritable;
+ }
+ while (i < _LINUX_CAPABILITY_U32S) {
+ effective.cap[i] = 0;
+ permitted.cap[i] = 0;
+ inheritable.cap[i] = 0;
+ i++;
+ }
spin_lock(&task_capability_lock);
read_lock(&tasklist_lock);
diff --git a/kernel/exit.c b/kernel/exit.c
index 9e459fefda77..eb9934a82fc1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1083,11 +1083,12 @@ do_group_exit(int exit_code)
struct signal_struct *const sig = current->signal;
struct sighand_struct *const sighand = current->sighand;
spin_lock_irq(&sighand->siglock);
- if (sig->flags & SIGNAL_GROUP_EXIT)
+ if (signal_group_exit(sig))
/* Another thread got here before we took the lock. */
exit_code = sig->group_exit_code;
else {
sig->group_exit_code = exit_code;
+ sig->flags = SIGNAL_GROUP_EXIT;
zap_other_threads(current);
}
spin_unlock_irq(&sighand->siglock);
@@ -1589,8 +1590,6 @@ repeat:
goto repeat;
if (retval != 0) /* He released the lock. */
goto end;
- } else if (p->exit_state == EXIT_DEAD) {
- continue;
} else if (p->exit_state == EXIT_ZOMBIE) {
/*
* Eligible but we cannot release it yet:
@@ -1605,7 +1604,7 @@ repeat:
/* He released the lock. */
if (retval != 0)
goto end;
- } else {
+ } else if (p->exit_state != EXIT_DEAD) {
check_continued:
/*
* It's running now, so it might later
diff --git a/kernel/fork.c b/kernel/fork.c
index 05e0b6f4365b..3995297567a9 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -325,7 +325,7 @@ static inline int mm_alloc_pgd(struct mm_struct * mm)
static inline void mm_free_pgd(struct mm_struct * mm)
{
- pgd_free(mm->pgd);
+ pgd_free(mm, mm->pgd);
}
#else
#define dup_mmap(mm, oldmm) (0)
@@ -1118,6 +1118,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
#ifdef CONFIG_SECURITY
p->security = NULL;
#endif
+ p->cap_bset = current->cap_bset;
p->io_context = NULL;
p->audit_context = NULL;
cgroup_fork(p);
@@ -1398,7 +1399,7 @@ fork_out:
return ERR_PTR(retval);
}
-noinline struct pt_regs * __devinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
+noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_regs *regs)
{
memset(regs, 0, sizeof(struct pt_regs));
return regs;
@@ -1450,6 +1451,23 @@ long do_fork(unsigned long clone_flags,
int trace = 0;
long nr;
+ /*
+ * We hope to recycle these flags after 2.6.26
+ */
+ if (unlikely(clone_flags & CLONE_STOPPED)) {
+ static int __read_mostly count = 100;
+
+ if (count > 0 && printk_ratelimit()) {
+ char comm[TASK_COMM_LEN];
+
+ count--;
+ printk(KERN_INFO "fork(): process `%s' used deprecated "
+ "clone flags 0x%lx\n",
+ get_task_comm(comm, current),
+ clone_flags & CLONE_STOPPED);
+ }
+ }
+
if (unlikely(current->ptrace)) {
trace = fork_traceflag (clone_flags);
if (trace)
@@ -1492,7 +1510,7 @@ long do_fork(unsigned long clone_flags,
if (!(clone_flags & CLONE_STOPPED))
wake_up_new_task(p, clone_flags);
else
- p->state = TASK_STOPPED;
+ __set_task_state(p, TASK_STOPPED);
if (unlikely (trace)) {
current->ptrace_message = nr;
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index 1069998fe25f..668f3967eb39 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -306,7 +306,7 @@ EXPORT_SYMBOL_GPL(ktime_sub_ns);
/*
* Divide a ktime value by a nanosecond value
*/
-unsigned long ktime_divns(const ktime_t kt, s64 div)
+u64 ktime_divns(const ktime_t kt, s64 div)
{
u64 dclc, inc, dns;
int sft = 0;
@@ -321,7 +321,7 @@ unsigned long ktime_divns(const ktime_t kt, s64 div)
dclc >>= sft;
do_div(dclc, (unsigned long) div);
- return (unsigned long) dclc;
+ return dclc;
}
#endif /* BITS_PER_LONG >= 64 */
@@ -656,10 +656,9 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
* Forward the timer expiry so it will expire in the future.
* Returns the number of overruns.
*/
-unsigned long
-hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
+u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
{
- unsigned long orun = 1;
+ u64 orun = 1;
ktime_t delta;
delta = ktime_sub(now, timer->expires);
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 7dadc71ce516..f091d13def00 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -53,14 +53,6 @@ static inline int is_kernel_inittext(unsigned long addr)
return 0;
}
-static inline int is_kernel_extratext(unsigned long addr)
-{
- if (addr >= (unsigned long)_sextratext
- && addr <= (unsigned long)_eextratext)
- return 1;
- return 0;
-}
-
static inline int is_kernel_text(unsigned long addr)
{
if (addr >= (unsigned long)_stext && addr <= (unsigned long)_etext)
@@ -80,8 +72,7 @@ static int is_ksym_addr(unsigned long addr)
if (all_var)
return is_kernel(addr);
- return is_kernel_text(addr) || is_kernel_inittext(addr) ||
- is_kernel_extratext(addr);
+ return is_kernel_text(addr) || is_kernel_inittext(addr);
}
/* expand a compressed symbol data into the resulting uncompressed string,
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index d0493eafea3e..7a86e6432338 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -699,6 +699,12 @@ static int __kprobes pre_handler_kretprobe(struct kprobe *p,
struct kretprobe_instance, uflist);
ri->rp = rp;
ri->task = current;
+
+ if (rp->entry_handler && rp->entry_handler(ri, regs)) {
+ spin_unlock_irqrestore(&kretprobe_lock, flags);
+ return 0;
+ }
+
arch_prepare_kretprobe(ri, regs);
/* XXX(hch): why is there no hlist_move_head? */
@@ -745,7 +751,8 @@ int __kprobes register_kretprobe(struct kretprobe *rp)
INIT_HLIST_HEAD(&rp->used_instances);
INIT_HLIST_HEAD(&rp->free_instances);
for (i = 0; i < rp->maxactive; i++) {
- inst = kmalloc(sizeof(struct kretprobe_instance), GFP_KERNEL);
+ inst = kmalloc(sizeof(struct kretprobe_instance) +
+ rp->data_size, GFP_KERNEL);
if (inst == NULL) {
free_rp_inst(rp);
return -ENOMEM;
diff --git a/kernel/latency.c b/kernel/latency.c
deleted file mode 100644
index e63fcacb61a7..000000000000
--- a/kernel/latency.c
+++ /dev/null
@@ -1,280 +0,0 @@
-/*
- * latency.c: Explicit system-wide latency-expectation infrastructure
- *
- * The purpose of this infrastructure is to allow device drivers to set
- * latency constraint they have and to collect and summarize these
- * expectations globally. The cummulated result can then be used by
- * power management and similar users to make decisions that have
- * tradoffs with a latency component.
- *
- * An example user of this are the x86 C-states; each higher C state saves
- * more power, but has a higher exit latency. For the idle loop power
- * code to make a good decision which C-state to use, information about
- * acceptable latencies is required.
- *
- * An example announcer of latency is an audio driver that knowns it
- * will get an interrupt when the hardware has 200 usec of samples
- * left in the DMA buffer; in that case the driver can set a latency
- * constraint of, say, 150 usec.
- *
- * Multiple drivers can each announce their maximum accepted latency,
- * to keep these appart, a string based identifier is used.
- *
- *
- * (C) Copyright 2006 Intel Corporation
- * Author: Arjan van de Ven <arjan@linux.intel.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; version 2
- * of the License.
- */
-
-#include <linux/latency.h>
-#include <linux/list.h>
-#include <linux/spinlock.h>
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/notifier.h>
-#include <linux/jiffies.h>
-#include <asm/atomic.h>
-
-struct latency_info {
- struct list_head list;
- int usecs;
- char *identifier;
-};
-
-/*
- * locking rule: all modifications to current_max_latency and
- * latency_list need to be done while holding the latency_lock.
- * latency_lock needs to be taken _irqsave.
- */
-static atomic_t current_max_latency;
-static DEFINE_SPINLOCK(latency_lock);
-
-static LIST_HEAD(latency_list);
-static BLOCKING_NOTIFIER_HEAD(latency_notifier);
-
-/*
- * This function returns the maximum latency allowed, which
- * happens to be the minimum of all maximum latencies on the
- * list.
- */
-static int __find_max_latency(void)
-{
- int min = INFINITE_LATENCY;
- struct latency_info *info;
-
- list_for_each_entry(info, &latency_list, list) {
- if (info->usecs < min)
- min = info->usecs;
- }
- return min;
-}
-
-/**
- * set_acceptable_latency - sets the maximum latency acceptable
- * @identifier: string that identifies this driver
- * @usecs: maximum acceptable latency for this driver
- *
- * This function informs the kernel that this device(driver)
- * can accept at most usecs latency. This setting is used for
- * power management and similar tradeoffs.
- *
- * This function sleeps and can only be called from process
- * context.
- * Calling this function with an existing identifier is valid
- * and will cause the existing latency setting to be changed.
- */
-void set_acceptable_latency(char *identifier, int usecs)
-{
- struct latency_info *info, *iter;
- unsigned long flags;
- int found_old = 0;
-
- info = kzalloc(sizeof(struct latency_info), GFP_KERNEL);
- if (!info)
- return;
- info->usecs = usecs;
- info->identifier = kstrdup(identifier, GFP_KERNEL);
- if (!info->identifier)
- goto free_info;
-
- spin_lock_irqsave(&latency_lock, flags);
- list_for_each_entry(iter, &latency_list, list) {
- if (strcmp(iter->identifier, identifier)==0) {
- found_old = 1;
- iter->usecs = usecs;
- break;
- }
- }
- if (!found_old)
- list_add(&info->list, &latency_list);
-
- if (usecs < atomic_read(&current_max_latency))
- atomic_set(&current_max_latency, usecs);
-
- spin_unlock_irqrestore(&latency_lock, flags);
-
- blocking_notifier_call_chain(&latency_notifier,
- atomic_read(&current_max_latency), NULL);
-
- /*
- * if we inserted the new one, we're done; otherwise there was
- * an existing one so we need to free the redundant data
- */
- if (!found_old)
- return;
-
- kfree(info->identifier);
-free_info:
- kfree(info);
-}
-EXPORT_SYMBOL_GPL(set_acceptable_latency);
-
-/**
- * modify_acceptable_latency - changes the maximum latency acceptable
- * @identifier: string that identifies this driver
- * @usecs: maximum acceptable latency for this driver
- *
- * This function informs the kernel that this device(driver)
- * can accept at most usecs latency. This setting is used for
- * power management and similar tradeoffs.
- *
- * This function does not sleep and can be called in any context.
- * Trying to use a non-existing identifier silently gets ignored.
- *
- * Due to the atomic nature of this function, the modified latency
- * value will only be used for future decisions; past decisions
- * can still lead to longer latencies in the near future.
- */
-void modify_acceptable_latency(char *identifier, int usecs)
-{
- struct latency_info *iter;
- unsigned long flags;
-
- spin_lock_irqsave(&latency_lock, flags);
- list_for_each_entry(iter, &latency_list, list) {
- if (strcmp(iter->identifier, identifier) == 0) {
- iter->usecs = usecs;
- break;
- }
- }
- if (usecs < atomic_read(&current_max_latency))
- atomic_set(&current_max_latency, usecs);
- spin_unlock_irqrestore(&latency_lock, flags);
-}
-EXPORT_SYMBOL_GPL(modify_acceptable_latency);
-
-/**
- * remove_acceptable_latency - removes the maximum latency acceptable
- * @identifier: string that identifies this driver
- *
- * This function removes a previously set maximum latency setting
- * for the driver and frees up any resources associated with the
- * bookkeeping needed for this.
- *
- * This function does not sleep and can be called in any context.
- * Trying to use a non-existing identifier silently gets ignored.
- */
-void remove_acceptable_latency(char *identifier)
-{
- unsigned long flags;
- int newmax = 0;
- struct latency_info *iter, *temp;
-
- spin_lock_irqsave(&latency_lock, flags);
-
- list_for_each_entry_safe(iter, temp, &latency_list, list) {
- if (strcmp(iter->identifier, identifier) == 0) {
- list_del(&iter->list);
- newmax = iter->usecs;
- kfree(iter->identifier);
- kfree(iter);
- break;
- }
- }
-
- /* If we just deleted the system wide value, we need to
- * recalculate with a full search
- */
- if (newmax == atomic_read(&current_max_latency)) {
- newmax = __find_max_latency();
- atomic_set(&current_max_latency, newmax);
- }
- spin_unlock_irqrestore(&latency_lock, flags);
-}
-EXPORT_SYMBOL_GPL(remove_acceptable_latency);
-
-/**
- * system_latency_constraint - queries the system wide latency maximum
- *
- * This function returns the system wide maximum latency in
- * microseconds.
- *
- * This function does not sleep and can be called in any context.
- */
-int system_latency_constraint(void)
-{
- return atomic_read(&current_max_latency);
-}
-EXPORT_SYMBOL_GPL(system_latency_constraint);
-
-/**
- * synchronize_acceptable_latency - recalculates all latency decisions
- *
- * This function will cause a callback to various kernel pieces that
- * will make those pieces rethink their latency decisions. This implies
- * that if there are overlong latencies in hardware state already, those
- * latencies get taken right now. When this call completes no overlong
- * latency decisions should be active anymore.
- *
- * Typical usecase of this is after a modify_acceptable_latency() call,
- * which in itself is non-blocking and non-synchronizing.
- *
- * This function blocks and should not be called with locks held.
- */
-
-void synchronize_acceptable_latency(void)
-{
- blocking_notifier_call_chain(&latency_notifier,
- atomic_read(&current_max_latency), NULL);
-}
-EXPORT_SYMBOL_GPL(synchronize_acceptable_latency);
-
-/*
- * Latency notifier: this notifier gets called when a non-atomic new
- * latency value gets set. The expectation nof the caller of the
- * non-atomic set is that when the call returns, future latencies
- * are within bounds, so the functions on the notifier list are
- * expected to take the overlong latencies immediately, inside the
- * callback, and not make a overlong latency decision anymore.
- *
- * The callback gets called when the new latency value is made
- * active so system_latency_constraint() returns the new latency.
- */
-int register_latency_notifier(struct notifier_block * nb)
-{
- return blocking_notifier_chain_register(&latency_notifier, nb);
-}
-EXPORT_SYMBOL_GPL(register_latency_notifier);
-
-int unregister_latency_notifier(struct notifier_block * nb)
-{
- return blocking_notifier_chain_unregister(&latency_notifier, nb);
-}
-EXPORT_SYMBOL_GPL(unregister_latency_notifier);
-
-static __init int latency_init(void)
-{
- atomic_set(&current_max_latency, INFINITE_LATENCY);
- /*
- * we don't want by default to have longer latencies than 2 ticks,
- * since that would cause lost ticks
- */
- set_acceptable_latency("kernel", 2*1000000/HZ);
- return 0;
-}
-
-module_init(latency_init);
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 4253f472f060..643360d1bb14 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -4,6 +4,7 @@
#include <linux/notifier.h>
#include <linux/rcupdate.h>
#include <linux/vmalloc.h>
+#include <linux/reboot.h>
/*
* Notifier list for kernel code which wants to be called
diff --git a/kernel/params.c b/kernel/params.c
index 42fe5e6126c0..e28c70628bb7 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -272,7 +272,7 @@ static int param_array(const char *name,
unsigned int min, unsigned int max,
void *elem, int elemsize,
int (*set)(const char *, struct kernel_param *kp),
- int *num)
+ unsigned int *num)
{
int ret;
struct kernel_param kp;
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
new file mode 100644
index 000000000000..0afe32be4c85
--- /dev/null
+++ b/kernel/pm_qos_params.c
@@ -0,0 +1,425 @@
+/*
+ * This module exposes the interface to kernel space for specifying
+ * QoS dependencies. It provides infrastructure for registration of:
+ *
+ * Dependents on a QoS value : register requirements
+ * Watchers of QoS value : get notified when target QoS value changes
+ *
+ * This QoS design is best effort based. Dependents register their QoS needs.
+ * Watchers register to keep track of the current QoS needs of the system.
+ *
+ * There are 3 basic classes of QoS parameter: latency, timeout, throughput
+ * each have defined units:
+ * latency: usec
+ * timeout: usec <-- currently not used.
+ * throughput: kbs (kilo byte / sec)
+ *
+ * There are lists of pm_qos_objects each one wrapping requirements, notifiers
+ *
+ * User mode requirements on a QOS parameter register themselves to the
+ * subsystem by opening the device node /dev/... and writing there request to
+ * the node. As long as the process holds a file handle open to the node the
+ * client continues to be accounted for. Upon file release the usermode
+ * requirement is removed and a new qos target is computed. This way when the
+ * requirement that the application has is cleaned up when closes the file
+ * pointer or exits the pm_qos_object will get an opportunity to clean up.
+ *
+ * mark gross mgross@linux.intel.com
+ */
+
+#include <linux/pm_qos_params.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/miscdevice.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/init.h>
+
+#include <linux/uaccess.h>
+
+/*
+ * locking rule: all changes to target_value or requirements or notifiers lists
+ * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
+ * held, taken with _irqsave. One lock to rule them all
+ */
+struct requirement_list {
+ struct list_head list;
+ union {
+ s32 value;
+ s32 usec;
+ s32 kbps;
+ };
+ char *name;
+};
+
+static s32 max_compare(s32 v1, s32 v2);
+static s32 min_compare(s32 v1, s32 v2);
+
+struct pm_qos_object {
+ struct requirement_list requirements;
+ struct blocking_notifier_head *notifiers;
+ struct miscdevice pm_qos_power_miscdev;
+ char *name;
+ s32 default_value;
+ s32 target_value;
+ s32 (*comparitor)(s32, s32);
+};
+
+static struct pm_qos_object null_pm_qos;
+static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
+static struct pm_qos_object cpu_dma_pm_qos = {
+ .requirements = {LIST_HEAD_INIT(cpu_dma_pm_qos.requirements.list)},
+ .notifiers = &cpu_dma_lat_notifier,
+ .name = "cpu_dma_latency",
+ .default_value = 2000 * USEC_PER_SEC,
+ .target_value = 2000 * USEC_PER_SEC,
+ .comparitor = min_compare
+};
+
+static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
+static struct pm_qos_object network_lat_pm_qos = {
+ .requirements = {LIST_HEAD_INIT(network_lat_pm_qos.requirements.list)},
+ .notifiers = &network_lat_notifier,
+ .name = "network_latency",
+ .default_value = 2000 * USEC_PER_SEC,
+ .target_value = 2000 * USEC_PER_SEC,
+ .comparitor = min_compare
+};
+
+
+static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
+static struct pm_qos_object network_throughput_pm_qos = {
+ .requirements =
+ {LIST_HEAD_INIT(network_throughput_pm_qos.requirements.list)},
+ .notifiers = &network_throughput_notifier,
+ .name = "network_throughput",
+ .default_value = 0,
+ .target_value = 0,
+ .comparitor = max_compare
+};
+
+
+static struct pm_qos_object *pm_qos_array[] = {
+ &null_pm_qos,
+ &cpu_dma_pm_qos,
+ &network_lat_pm_qos,
+ &network_throughput_pm_qos
+};
+
+static DEFINE_SPINLOCK(pm_qos_lock);
+
+static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos);
+static int pm_qos_power_open(struct inode *inode, struct file *filp);
+static int pm_qos_power_release(struct inode *inode, struct file *filp);
+
+static const struct file_operations pm_qos_power_fops = {
+ .write = pm_qos_power_write,
+ .open = pm_qos_power_open,
+ .release = pm_qos_power_release,
+};
+
+/* static helper functions */
+static s32 max_compare(s32 v1, s32 v2)
+{
+ return max(v1, v2);
+}
+
+static s32 min_compare(s32 v1, s32 v2)
+{
+ return min(v1, v2);
+}
+
+
+static void update_target(int target)
+{
+ s32 extreme_value;
+ struct requirement_list *node;
+ unsigned long flags;
+ int call_notifier = 0;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ extreme_value = pm_qos_array[target]->default_value;
+ list_for_each_entry(node,
+ &pm_qos_array[target]->requirements.list, list) {
+ extreme_value = pm_qos_array[target]->comparitor(
+ extreme_value, node->value);
+ }
+ if (pm_qos_array[target]->target_value != extreme_value) {
+ call_notifier = 1;
+ pm_qos_array[target]->target_value = extreme_value;
+ pr_debug(KERN_ERR "new target for qos %d is %d\n", target,
+ pm_qos_array[target]->target_value);
+ }
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+ if (call_notifier)
+ blocking_notifier_call_chain(pm_qos_array[target]->notifiers,
+ (unsigned long) extreme_value, NULL);
+}
+
+static int register_pm_qos_misc(struct pm_qos_object *qos)
+{
+ qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
+ qos->pm_qos_power_miscdev.name = qos->name;
+ qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
+
+ return misc_register(&qos->pm_qos_power_miscdev);
+}
+
+static int find_pm_qos_object_by_minor(int minor)
+{
+ int pm_qos_class;
+
+ for (pm_qos_class = 0;
+ pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
+ if (minor ==
+ pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
+ return pm_qos_class;
+ }
+ return -1;
+}
+
+/**
+ * pm_qos_requirement - returns current system wide qos expectation
+ * @pm_qos_class: identification of which qos value is requested
+ *
+ * This function returns the current target value in an atomic manner.
+ */
+int pm_qos_requirement(int pm_qos_class)
+{
+ int ret_val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ ret_val = pm_qos_array[pm_qos_class]->target_value;
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+
+ return ret_val;
+}
+EXPORT_SYMBOL_GPL(pm_qos_requirement);
+
+/**
+ * pm_qos_add_requirement - inserts new qos request into the list
+ * @pm_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ * @value: defines the qos request
+ *
+ * This function inserts a new entry in the pm_qos_class list of requested qos
+ * performance charactoistics. It recomputes the agregate QoS expectations for
+ * the pm_qos_class of parrameters.
+ */
+int pm_qos_add_requirement(int pm_qos_class, char *name, s32 value)
+{
+ struct requirement_list *dep;
+ unsigned long flags;
+
+ dep = kzalloc(sizeof(struct requirement_list), GFP_KERNEL);
+ if (dep) {
+ if (value == PM_QOS_DEFAULT_VALUE)
+ dep->value = pm_qos_array[pm_qos_class]->default_value;
+ else
+ dep->value = value;
+ dep->name = kstrdup(name, GFP_KERNEL);
+ if (!dep->name)
+ goto cleanup;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ list_add(&dep->list,
+ &pm_qos_array[pm_qos_class]->requirements.list);
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+ update_target(pm_qos_class);
+
+ return 0;
+ }
+
+cleanup:
+ kfree(dep);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(pm_qos_add_requirement);
+
+/**
+ * pm_qos_update_requirement - modifies an existing qos request
+ * @pm_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ * @value: defines the qos request
+ *
+ * Updates an existing qos requierement for the pm_qos_class of parameters along
+ * with updating the target pm_qos_class value.
+ *
+ * If the named request isn't in the lest then no change is made.
+ */
+int pm_qos_update_requirement(int pm_qos_class, char *name, s32 new_value)
+{
+ unsigned long flags;
+ struct requirement_list *node;
+ int pending_update = 0;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ list_for_each_entry(node,
+ &pm_qos_array[pm_qos_class]->requirements.list, list) {
+ if (strcmp(node->name, name) == 0) {
+ if (new_value == PM_QOS_DEFAULT_VALUE)
+ node->value =
+ pm_qos_array[pm_qos_class]->default_value;
+ else
+ node->value = new_value;
+ pending_update = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+ if (pending_update)
+ update_target(pm_qos_class);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(pm_qos_update_requirement);
+
+/**
+ * pm_qos_remove_requirement - modifies an existing qos request
+ * @pm_qos_class: identifies which list of qos request to us
+ * @name: identifies the request
+ *
+ * Will remove named qos request from pm_qos_class list of parrameters and
+ * recompute the current target value for the pm_qos_class.
+ */
+void pm_qos_remove_requirement(int pm_qos_class, char *name)
+{
+ unsigned long flags;
+ struct requirement_list *node;
+ int pending_update = 0;
+
+ spin_lock_irqsave(&pm_qos_lock, flags);
+ list_for_each_entry(node,
+ &pm_qos_array[pm_qos_class]->requirements.list, list) {
+ if (strcmp(node->name, name) == 0) {
+ kfree(node->name);
+ list_del(&node->list);
+ kfree(node);
+ pending_update = 1;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&pm_qos_lock, flags);
+ if (pending_update)
+ update_target(pm_qos_class);
+}
+EXPORT_SYMBOL_GPL(pm_qos_remove_requirement);
+
+/**
+ * pm_qos_add_notifier - sets notification entry for changes to target value
+ * @pm_qos_class: identifies which qos target changes should be notified.
+ * @notifier: notifier block managed by caller.
+ *
+ * will register the notifier into a notification chain that gets called
+ * uppon changes to the pm_qos_class target value.
+ */
+ int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
+{
+ int retval;
+
+ retval = blocking_notifier_chain_register(
+ pm_qos_array[pm_qos_class]->notifiers, notifier);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
+
+/**
+ * pm_qos_remove_notifier - deletes notification entry from chain.
+ * @pm_qos_class: identifies which qos target changes are notified.
+ * @notifier: notifier block to be removed.
+ *
+ * will remove the notifier from the notification chain that gets called
+ * uppon changes to the pm_qos_class target value.
+ */
+int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
+{
+ int retval;
+
+ retval = blocking_notifier_chain_unregister(
+ pm_qos_array[pm_qos_class]->notifiers, notifier);
+
+ return retval;
+}
+EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
+
+#define PID_NAME_LEN sizeof("process_1234567890")
+static char name[PID_NAME_LEN];
+
+static int pm_qos_power_open(struct inode *inode, struct file *filp)
+{
+ int ret;
+ long pm_qos_class;
+
+ pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
+ if (pm_qos_class >= 0) {
+ filp->private_data = (void *)pm_qos_class;
+ sprintf(name, "process_%d", current->pid);
+ ret = pm_qos_add_requirement(pm_qos_class, name,
+ PM_QOS_DEFAULT_VALUE);
+ if (ret >= 0)
+ return 0;
+ }
+
+ return -EPERM;
+}
+
+static int pm_qos_power_release(struct inode *inode, struct file *filp)
+{
+ int pm_qos_class;
+
+ pm_qos_class = (long)filp->private_data;
+ sprintf(name, "process_%d", current->pid);
+ pm_qos_remove_requirement(pm_qos_class, name);
+
+ return 0;
+}
+
+static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ s32 value;
+ int pm_qos_class;
+
+ pm_qos_class = (long)filp->private_data;
+ if (count != sizeof(s32))
+ return -EINVAL;
+ if (copy_from_user(&value, buf, sizeof(s32)))
+ return -EFAULT;
+ sprintf(name, "process_%d", current->pid);
+ pm_qos_update_requirement(pm_qos_class, name, value);
+
+ return sizeof(s32);
+}
+
+
+static int __init pm_qos_power_init(void)
+{
+ int ret = 0;
+
+ ret = register_pm_qos_misc(&cpu_dma_pm_qos);
+ if (ret < 0) {
+ printk(KERN_ERR "pm_qos_param: cpu_dma_latency setup failed\n");
+ return ret;
+ }
+ ret = register_pm_qos_misc(&network_lat_pm_qos);
+ if (ret < 0) {
+ printk(KERN_ERR "pm_qos_param: network_latency setup failed\n");
+ return ret;
+ }
+ ret = register_pm_qos_misc(&network_throughput_pm_qos);
+ if (ret < 0)
+ printk(KERN_ERR
+ "pm_qos_param: network_throughput setup failed\n");
+
+ return ret;
+}
+
+late_initcall(pm_qos_power_init);
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 36d563fd9e3b..122d5c787fe2 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -256,8 +256,9 @@ static void schedule_next_timer(struct k_itimer *timr)
if (timr->it.real.interval.tv64 == 0)
return;
- timr->it_overrun += hrtimer_forward(timer, timer->base->get_time(),
- timr->it.real.interval);
+ timr->it_overrun += (unsigned int) hrtimer_forward(timer,
+ timer->base->get_time(),
+ timr->it.real.interval);
timr->it_overrun_last = timr->it_overrun;
timr->it_overrun = -1;
@@ -386,7 +387,7 @@ static enum hrtimer_restart posix_timer_fn(struct hrtimer *timer)
now = ktime_add(now, kj);
}
#endif
- timr->it_overrun +=
+ timr->it_overrun += (unsigned int)
hrtimer_forward(timer, now,
timr->it.real.interval);
ret = HRTIMER_RESTART;
@@ -662,7 +663,7 @@ common_timer_get(struct k_itimer *timr, struct itimerspec *cur_setting)
*/
if (iv.tv64 && (timr->it_requeue_pending & REQUEUE_PENDING ||
(timr->it_sigev_notify & ~SIGEV_THREAD_ID) == SIGEV_NONE))
- timr->it_overrun += hrtimer_forward(timer, now, iv);
+ timr->it_overrun += (unsigned int) hrtimer_forward(timer, now, iv);
remaining = ktime_sub(timer->expires, now);
/* Return 0 only, when the timer is expired and not pending */
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index ef9b802738a5..79833170bb9c 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -74,8 +74,8 @@ config PM_TRACE_RTC
RTC across reboots, so that you can debug a machine that just hangs
during suspend (or more commonly, during resume).
- To use this debugging feature you should attempt to suspend the machine,
- then reboot it, then run
+ To use this debugging feature you should attempt to suspend the
+ machine, reboot it and then run
dmesg -s 1000000 | grep 'hash matches'
@@ -123,7 +123,10 @@ config HIBERNATION
called "hibernation" in user interfaces. STD checkpoints the
system and powers it off; and restores that checkpoint on reboot.
- You can suspend your machine with 'echo disk > /sys/power/state'.
+ You can suspend your machine with 'echo disk > /sys/power/state'
+ after placing resume=/dev/swappartition on the kernel command line
+ in your bootloader's configuration file.
+
Alternatively, you can use the additional userland tools available
from <http://suspend.sf.net>.
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index d09da0895174..859a8e59773a 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -26,7 +26,7 @@
static int noresume = 0;
-char resume_file[256] = CONFIG_PM_STD_PARTITION;
+static char resume_file[256] = CONFIG_PM_STD_PARTITION;
dev_t swsusp_resume_device;
sector_t swsusp_resume_block;
@@ -185,7 +185,7 @@ static void platform_restore_cleanup(int platform_mode)
* reappears in this routine after a restore.
*/
-int create_image(int platform_mode)
+static int create_image(int platform_mode)
{
int error;
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index f6a5df934f8d..95250d7c8d91 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1203,7 +1203,7 @@ asmlinkage int swsusp_save(void)
printk(KERN_INFO "PM: Creating hibernation image: \n");
- drain_local_pages();
+ drain_local_pages(NULL);
nr_pages = count_data_pages();
nr_highmem = count_highmem_pages();
printk(KERN_INFO "PM: Need to copy %u pages\n", nr_pages + nr_highmem);
@@ -1221,7 +1221,7 @@ asmlinkage int swsusp_save(void)
/* During allocating of suspend pagedir, new cold pages may appear.
* Kill them.
*/
- drain_local_pages();
+ drain_local_pages(NULL);
copy_data_pages(&copy_bm, &orig_bm);
/*
diff --git a/kernel/printk.c b/kernel/printk.c
index 29ae1e99cde0..4a090621f379 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -93,16 +93,16 @@ static int console_locked, console_suspended;
*/
static DEFINE_SPINLOCK(logbuf_lock);
-#define LOG_BUF_MASK (log_buf_len-1)
+#define LOG_BUF_MASK (log_buf_len-1)
#define LOG_BUF(idx) (log_buf[(idx) & LOG_BUF_MASK])
/*
* The indices into log_buf are not constrained to log_buf_len - they
* must be masked before subscripting
*/
-static unsigned long log_start; /* Index into log_buf: next char to be read by syslog() */
-static unsigned long con_start; /* Index into log_buf: next char to be sent to consoles */
-static unsigned long log_end; /* Index into log_buf: most-recently-written-char + 1 */
+static unsigned log_start; /* Index into log_buf: next char to be read by syslog() */
+static unsigned con_start; /* Index into log_buf: next char to be sent to consoles */
+static unsigned log_end; /* Index into log_buf: most-recently-written-char + 1 */
/*
* Array of consoles built from command line options (console=)
@@ -128,17 +128,17 @@ static int console_may_schedule;
static char __log_buf[__LOG_BUF_LEN];
static char *log_buf = __log_buf;
static int log_buf_len = __LOG_BUF_LEN;
-static unsigned long logged_chars; /* Number of chars produced since last read+clear operation */
+static unsigned logged_chars; /* Number of chars produced since last read+clear operation */
static int __init log_buf_len_setup(char *str)
{
- unsigned long size = memparse(str, &str);
+ unsigned size = memparse(str, &str);
unsigned long flags;
if (size)
size = roundup_pow_of_two(size);
if (size > log_buf_len) {
- unsigned long start, dest_idx, offset;
+ unsigned start, dest_idx, offset;
char *new_log_buf;
new_log_buf = alloc_bootmem(size);
@@ -295,7 +295,7 @@ int log_buf_read(int idx)
*/
int do_syslog(int type, char __user *buf, int len)
{
- unsigned long i, j, limit, count;
+ unsigned i, j, limit, count;
int do_clear = 0;
char c;
int error = 0;
@@ -436,7 +436,7 @@ asmlinkage long sys_syslog(int type, char __user *buf, int len)
/*
* Call the console drivers on a range of log_buf
*/
-static void __call_console_drivers(unsigned long start, unsigned long end)
+static void __call_console_drivers(unsigned start, unsigned end)
{
struct console *con;
@@ -463,8 +463,8 @@ early_param("ignore_loglevel", ignore_loglevel_setup);
/*
* Write out chars from start to end - 1 inclusive
*/
-static void _call_console_drivers(unsigned long start,
- unsigned long end, int msg_log_level)
+static void _call_console_drivers(unsigned start,
+ unsigned end, int msg_log_level)
{
if ((msg_log_level < console_loglevel || ignore_loglevel) &&
console_drivers && start != end) {
@@ -484,12 +484,12 @@ static void _call_console_drivers(unsigned long start,
* log_buf[start] to log_buf[end - 1].
* The console_sem must be held.
*/
-static void call_console_drivers(unsigned long start, unsigned long end)
+static void call_console_drivers(unsigned start, unsigned end)
{
- unsigned long cur_index, start_print;
+ unsigned cur_index, start_print;
static int msg_level = -1;
- BUG_ON(((long)(start - end)) > 0);
+ BUG_ON(((int)(start - end)) > 0);
cur_index = start;
start_print = start;
@@ -790,7 +790,7 @@ asmlinkage long sys_syslog(int type, char __user *buf, int len)
return -ENOSYS;
}
-static void call_console_drivers(unsigned long start, unsigned long end)
+static void call_console_drivers(unsigned start, unsigned end)
{
}
@@ -983,8 +983,8 @@ void wake_up_klogd(void)
void release_console_sem(void)
{
unsigned long flags;
- unsigned long _con_start, _log_end;
- unsigned long wake_klogd = 0;
+ unsigned _con_start, _log_end;
+ unsigned wake_klogd = 0;
if (console_suspended) {
up(&secondary_console_sem);
@@ -1275,7 +1275,7 @@ void tty_write_message(struct tty_struct *tty, char *msg)
int __printk_ratelimit(int ratelimit_jiffies, int ratelimit_burst)
{
static DEFINE_SPINLOCK(ratelimit_lock);
- static unsigned long toks = 10 * 5 * HZ;
+ static unsigned toks = 10 * 5 * HZ;
static unsigned long last_msg;
static int missed;
unsigned long flags;
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index b0d4ab4dfd3d..628b03ab88a5 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -20,6 +20,7 @@
#include <linux/signal.h>
#include <linux/audit.h>
#include <linux/pid_namespace.h>
+#include <linux/syscalls.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
@@ -53,7 +54,7 @@ void ptrace_untrace(struct task_struct *child)
spin_lock(&child->sighand->siglock);
if (task_is_traced(child)) {
if (child->signal->flags & SIGNAL_STOP_STOPPED) {
- child->state = TASK_STOPPED;
+ __set_task_state(child, TASK_STOPPED);
} else {
signal_wake_up(child, 1);
}
@@ -103,18 +104,16 @@ int ptrace_check_attach(struct task_struct *child, int kill)
&& child->signal != NULL) {
ret = 0;
spin_lock_irq(&child->sighand->siglock);
- if (task_is_stopped(child)) {
+ if (task_is_stopped(child))
child->state = TASK_TRACED;
- } else if (!task_is_traced(child) && !kill) {
+ else if (!task_is_traced(child) && !kill)
ret = -ESRCH;
- }
spin_unlock_irq(&child->sighand->siglock);
}
read_unlock(&tasklist_lock);
- if (!ret && !kill) {
+ if (!ret && !kill)
wait_task_inactive(child);
- }
/* All systems go.. */
return ret;
diff --git a/kernel/relay.c b/kernel/relay.c
index 7c0373322f18..d080b9d161a7 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -37,37 +37,31 @@ static void relay_file_mmap_close(struct vm_area_struct *vma)
}
/*
- * nopage() vm_op implementation for relay file mapping.
+ * fault() vm_op implementation for relay file mapping.
*/
-static struct page *relay_buf_nopage(struct vm_area_struct *vma,
- unsigned long address,
- int *type)
+static int relay_buf_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct page *page;
struct rchan_buf *buf = vma->vm_private_data;
- unsigned long offset = address - vma->vm_start;
+ pgoff_t pgoff = vmf->pgoff;
- if (address > vma->vm_end)
- return NOPAGE_SIGBUS; /* Disallow mremap */
if (!buf)
- return NOPAGE_OOM;
+ return VM_FAULT_OOM;
- page = vmalloc_to_page(buf->start + offset);
+ page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT));
if (!page)
- return NOPAGE_OOM;
+ return VM_FAULT_SIGBUS;
get_page(page);
+ vmf->page = page;
- if (type)
- *type = VM_FAULT_MINOR;
-
- return page;
+ return 0;
}
/*
* vm_ops for relay file mappings.
*/
static struct vm_operations_struct relay_file_mmap_ops = {
- .nopage = relay_buf_nopage,
+ .fault = relay_buf_fault,
.close = relay_file_mmap_close,
};
diff --git a/kernel/signal.c b/kernel/signal.c
index 4333b6dbb424..5d30ff561847 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -911,27 +911,6 @@ __group_complete_signal(int sig, struct task_struct *p)
} while_each_thread(p, t);
return;
}
-
- /*
- * There will be a core dump. We make all threads other
- * than the chosen one go into a group stop so that nothing
- * happens until it gets scheduled, takes the signal off
- * the shared queue, and does the core dump. This is a
- * little more complicated than strictly necessary, but it
- * keeps the signal state that winds up in the core dump
- * unchanged from the death state, e.g. which thread had
- * the core-dump signal unblocked.
- */
- rm_from_queue(SIG_KERNEL_STOP_MASK, &t->pending);
- rm_from_queue(SIG_KERNEL_STOP_MASK, &p->signal->shared_pending);
- p->signal->group_stop_count = 0;
- p->signal->group_exit_task = t;
- p = t;
- do {
- p->signal->group_stop_count++;
- signal_wake_up(t, t == p);
- } while_each_thread(p, t);
- return;
}
/*
@@ -978,7 +957,6 @@ void zap_other_threads(struct task_struct *p)
{
struct task_struct *t;
- p->signal->flags = SIGNAL_GROUP_EXIT;
p->signal->group_stop_count = 0;
for (t = next_thread(p); t != p; t = next_thread(t)) {
@@ -1600,6 +1578,17 @@ static inline int may_ptrace_stop(void)
}
/*
+ * Return nonzero if there is a SIGKILL that should be waking us up.
+ * Called with the siglock held.
+ */
+static int sigkill_pending(struct task_struct *tsk)
+{
+ return ((sigismember(&tsk->pending.signal, SIGKILL) ||
+ sigismember(&tsk->signal->shared_pending.signal, SIGKILL)) &&
+ !unlikely(sigismember(&tsk->blocked, SIGKILL)));
+}
+
+/*
* This must be called with current->sighand->siglock held.
*
* This should be the path for all ptrace stops.
@@ -1612,6 +1601,26 @@ static inline int may_ptrace_stop(void)
*/
static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
{
+ int killed = 0;
+
+ if (arch_ptrace_stop_needed(exit_code, info)) {
+ /*
+ * The arch code has something special to do before a
+ * ptrace stop. This is allowed to block, e.g. for faults
+ * on user stack pages. We can't keep the siglock while
+ * calling arch_ptrace_stop, so we must release it now.
+ * To preserve proper semantics, we must do this before
+ * any signal bookkeeping like checking group_stop_count.
+ * Meanwhile, a SIGKILL could come in before we retake the
+ * siglock. That must prevent us from sleeping in TASK_TRACED.
+ * So after regaining the lock, we must check for SIGKILL.
+ */
+ spin_unlock_irq(&current->sighand->siglock);
+ arch_ptrace_stop(exit_code, info);
+ spin_lock_irq(&current->sighand->siglock);
+ killed = sigkill_pending(current);
+ }
+
/*
* If there is a group stop in progress,
* we must participate in the bookkeeping.
@@ -1623,11 +1632,11 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
current->exit_code = exit_code;
/* Let the debugger run. */
- set_current_state(TASK_TRACED);
+ __set_current_state(TASK_TRACED);
spin_unlock_irq(&current->sighand->siglock);
try_to_freeze();
read_lock(&tasklist_lock);
- if (may_ptrace_stop()) {
+ if (!unlikely(killed) && may_ptrace_stop()) {
do_notify_parent_cldstop(current, CLD_TRAPPED);
read_unlock(&tasklist_lock);
schedule();
@@ -1709,9 +1718,6 @@ static int do_signal_stop(int signr)
struct signal_struct *sig = current->signal;
int stop_count;
- if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED))
- return 0;
-
if (sig->group_stop_count > 0) {
/*
* There is a group stop in progress. We don't need to
@@ -1719,12 +1725,15 @@ static int do_signal_stop(int signr)
*/
stop_count = --sig->group_stop_count;
} else {
+ struct task_struct *t;
+
+ if (!likely(sig->flags & SIGNAL_STOP_DEQUEUED) ||
+ unlikely(sig->group_exit_task))
+ return 0;
/*
* There is no group stop already in progress.
* We must initiate one now.
*/
- struct task_struct *t;
-
sig->group_exit_code = signr;
stop_count = 0;
@@ -1752,47 +1761,6 @@ static int do_signal_stop(int signr)
return 1;
}
-/*
- * Do appropriate magic when group_stop_count > 0.
- * We return nonzero if we stopped, after releasing the siglock.
- * We return zero if we still hold the siglock and should look
- * for another signal without checking group_stop_count again.
- */
-static int handle_group_stop(void)
-{
- int stop_count;
-
- if (current->signal->group_exit_task == current) {
- /*
- * Group stop is so we can do a core dump,
- * We are the initiating thread, so get on with it.
- */
- current->signal->group_exit_task = NULL;
- return 0;
- }
-
- if (current->signal->flags & SIGNAL_GROUP_EXIT)
- /*
- * Group stop is so another thread can do a core dump,
- * or else we are racing against a death signal.
- * Just punt the stop so we can get the next signal.
- */
- return 0;
-
- /*
- * There is a group stop in progress. We stop
- * without any associated signal being in our queue.
- */
- stop_count = --current->signal->group_stop_count;
- if (stop_count == 0)
- current->signal->flags = SIGNAL_STOP_STOPPED;
- current->exit_code = current->signal->group_exit_code;
- set_current_state(TASK_STOPPED);
- spin_unlock_irq(&current->sighand->siglock);
- finish_stop(stop_count);
- return 1;
-}
-
int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka,
struct pt_regs *regs, void *cookie)
{
@@ -1807,7 +1775,7 @@ relock:
struct k_sigaction *ka;
if (unlikely(current->signal->group_stop_count > 0) &&
- handle_group_stop())
+ do_signal_stop(0))
goto relock;
signr = dequeue_signal(current, mask, info);
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 3507cabe963b..b0aeeaf22ce4 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -74,7 +74,7 @@ static int srcu_readers_active_idx(struct srcu_struct *sp, int idx)
* severe errors when invoked on an active srcu_struct. That said, it
* can be useful as an error check at cleanup time.
*/
-int srcu_readers_active(struct srcu_struct *sp)
+static int srcu_readers_active(struct srcu_struct *sp)
{
return srcu_readers_active_idx(sp, 0) + srcu_readers_active_idx(sp, 1);
}
@@ -255,4 +255,3 @@ EXPORT_SYMBOL_GPL(srcu_read_lock);
EXPORT_SYMBOL_GPL(srcu_read_unlock);
EXPORT_SYMBOL_GPL(synchronize_srcu);
EXPORT_SYMBOL_GPL(srcu_batches_completed);
-EXPORT_SYMBOL_GPL(srcu_readers_active);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 51b5ee53571a..6f4e0e13f70c 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -29,7 +29,6 @@ enum stopmachine_state {
static enum stopmachine_state stopmachine_state;
static unsigned int stopmachine_num_threads;
static atomic_t stopmachine_thread_ack;
-static DECLARE_MUTEX(stopmachine_mutex);
static int stopmachine(void *cpu)
{
@@ -170,6 +169,7 @@ static int do_stop(void *_smdata)
struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
unsigned int cpu)
{
+ static DEFINE_MUTEX(stopmachine_mutex);
struct stop_machine_data smdata;
struct task_struct *p;
@@ -177,7 +177,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
smdata.data = data;
init_completion(&smdata.done);
- down(&stopmachine_mutex);
+ mutex_lock(&stopmachine_mutex);
/* If they don't care which CPU fn runs on, bind to any online one. */
if (cpu == NR_CPUS)
@@ -193,7 +193,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
wake_up_process(p);
wait_for_completion(&smdata.done);
}
- up(&stopmachine_mutex);
+ mutex_unlock(&stopmachine_mutex);
return p;
}
diff --git a/kernel/sys.c b/kernel/sys.c
index d1fe71eb4546..e3c08d4324de 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -315,7 +315,7 @@ static void kernel_kexec(void)
#endif
}
-void kernel_shutdown_prepare(enum system_states state)
+static void kernel_shutdown_prepare(enum system_states state)
{
blocking_notifier_call_chain(&reboot_notifier_list,
(state == SYSTEM_HALT)?SYS_HALT:SYS_POWER_OFF, NULL);
@@ -1145,16 +1145,16 @@ static int groups_to_user(gid_t __user *grouplist,
struct group_info *group_info)
{
int i;
- int count = group_info->ngroups;
+ unsigned int count = group_info->ngroups;
for (i = 0; i < group_info->nblocks; i++) {
- int cp_count = min(NGROUPS_PER_BLOCK, count);
- int off = i * NGROUPS_PER_BLOCK;
- int len = cp_count * sizeof(*grouplist);
+ unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
+ unsigned int len = cp_count * sizeof(*grouplist);
- if (copy_to_user(grouplist+off, group_info->blocks[i], len))
+ if (copy_to_user(grouplist, group_info->blocks[i], len))
return -EFAULT;
+ grouplist += NGROUPS_PER_BLOCK;
count -= cp_count;
}
return 0;
@@ -1165,16 +1165,16 @@ static int groups_from_user(struct group_info *group_info,
gid_t __user *grouplist)
{
int i;
- int count = group_info->ngroups;
+ unsigned int count = group_info->ngroups;
for (i = 0; i < group_info->nblocks; i++) {
- int cp_count = min(NGROUPS_PER_BLOCK, count);
- int off = i * NGROUPS_PER_BLOCK;
- int len = cp_count * sizeof(*grouplist);
+ unsigned int cp_count = min(NGROUPS_PER_BLOCK, count);
+ unsigned int len = cp_count * sizeof(*grouplist);
- if (copy_from_user(group_info->blocks[i], grouplist+off, len))
+ if (copy_from_user(group_info->blocks[i], grouplist, len))
return -EFAULT;
+ grouplist += NGROUPS_PER_BLOCK;
count -= cp_count;
}
return 0;
@@ -1472,7 +1472,7 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
!capable(CAP_SYS_RESOURCE))
return -EPERM;
- if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > NR_OPEN)
+ if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open)
return -EPERM;
retval = security_task_setrlimit(resource, &new_rlim);
@@ -1637,7 +1637,7 @@ asmlinkage long sys_umask(int mask)
mask = xchg(&current->fs->umask, mask & S_IRWXUGO);
return mask;
}
-
+
asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
@@ -1742,6 +1742,17 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
error = prctl_set_seccomp(arg2);
break;
+ case PR_CAPBSET_READ:
+ if (!cap_valid(arg2))
+ return -EINVAL;
+ return !!cap_raised(current->cap_bset, arg2);
+ case PR_CAPBSET_DROP:
+#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
+ return cap_prctl_drop(arg2);
+#else
+ return -EINVAL;
+#endif
+
default:
error = -EINVAL;
break;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index beee5b3b68a2..5b9b467de070 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -154,7 +154,10 @@ cond_syscall(sys_ioprio_get);
/* New file descriptors */
cond_syscall(sys_signalfd);
-cond_syscall(sys_timerfd);
cond_syscall(compat_sys_signalfd);
-cond_syscall(compat_sys_timerfd);
+cond_syscall(sys_timerfd_create);
+cond_syscall(sys_timerfd_settime);
+cond_syscall(sys_timerfd_gettime);
+cond_syscall(compat_sys_timerfd_settime);
+cond_syscall(compat_sys_timerfd_gettime);
cond_syscall(sys_eventfd);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7cb1ac3e6fff..86daaa26d120 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -84,8 +84,11 @@ extern int sysctl_stat_interval;
extern int latencytop_enabled;
/* Constants used for minimum and maximum */
-#ifdef CONFIG_DETECT_SOFTLOCKUP
+#if defined(CONFIG_DETECT_SOFTLOCKUP) || defined(CONFIG_HIGHMEM)
static int one = 1;
+#endif
+
+#ifdef CONFIG_DETECT_SOFTLOCKUP
static int sixty = 60;
#endif
@@ -416,15 +419,6 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
-#ifdef CONFIG_SECURITY_CAPABILITIES
- {
- .procname = "cap-bound",
- .data = &cap_bset,
- .maxlen = sizeof(kernel_cap_t),
- .mode = 0600,
- .proc_handler = &proc_dointvec_bset,
- },
-#endif /* def CONFIG_SECURITY_CAPABILITIES */
#ifdef CONFIG_BLK_DEV_INITRD
{
.ctl_name = KERN_REALROOTDEV,
@@ -1150,6 +1144,19 @@ static struct ctl_table vm_table[] = {
.extra1 = &zero,
},
#endif
+#ifdef CONFIG_HIGHMEM
+ {
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "highmem_is_dirtyable",
+ .data = &vm_highmem_is_dirtyable,
+ .maxlen = sizeof(vm_highmem_is_dirtyable),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
/*
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
@@ -1196,6 +1203,14 @@ static struct ctl_table fs_table[] = {
.proc_handler = &proc_dointvec,
},
{
+ .ctl_name = CTL_UNNUMBERED,
+ .procname = "nr_open",
+ .data = &sysctl_nr_open,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
.ctl_name = FS_DENTRY,
.procname = "dentry-state",
.data = &dentry_stat,
@@ -2080,26 +2095,6 @@ static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp,
return 0;
}
-#ifdef CONFIG_SECURITY_CAPABILITIES
-/*
- * init may raise the set.
- */
-
-int proc_dointvec_bset(struct ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- int op;
-
- if (write && !capable(CAP_SYS_MODULE)) {
- return -EPERM;
- }
-
- op = is_global_init(current) ? OP_SET : OP_AND;
- return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
- do_proc_dointvec_bset_conv,&op);
-}
-#endif /* def CONFIG_SECURITY_CAPABILITIES */
-
/*
* Taint values can only be increased
*/
@@ -2513,12 +2508,6 @@ int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
return -ENOSYS;
}
-int proc_dointvec_bset(struct ctl_table *table, int write, struct file *filp,
- void __user *buffer, size_t *lenp, loff_t *ppos)
-{
- return -ENOSYS;
-}
-
int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index c3206fa50048..006365b69eaf 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -37,10 +37,6 @@ static struct trans_ctl_table trans_kern_table[] = {
{ KERN_NODENAME, "hostname" },
{ KERN_DOMAINNAME, "domainname" },
-#ifdef CONFIG_SECURITY_CAPABILITIES
- { KERN_CAP_BSET, "cap-bound" },
-#endif /* def CONFIG_SECURITY_CAPABILITIES */
-
{ KERN_PANIC, "panic" },
{ KERN_REALROOTDEV, "real-root-dev" },
@@ -1498,9 +1494,6 @@ int sysctl_check_table(struct nsproxy *namespaces, struct ctl_table *table)
(table->strategy == sysctl_ms_jiffies) ||
(table->proc_handler == proc_dostring) ||
(table->proc_handler == proc_dointvec) ||
-#ifdef CONFIG_SECURITY_CAPABILITIES
- (table->proc_handler == proc_dointvec_bset) ||
-#endif /* def CONFIG_SECURITY_CAPABILITIES */
(table->proc_handler == proc_dointvec_minmax) ||
(table->proc_handler == proc_dointvec_jiffies) ||
(table->proc_handler == proc_dointvec_userhz_jiffies) ||
diff --git a/kernel/test_kprobes.c b/kernel/test_kprobes.c
index 88cdb109e13c..06b6395b45b2 100644
--- a/kernel/test_kprobes.c
+++ b/kernel/test_kprobes.c
@@ -135,6 +135,12 @@ static int test_jprobe(void)
#ifdef CONFIG_KRETPROBES
static u32 krph_val;
+static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
+{
+ krph_val = (rand1 / div_factor);
+ return 0;
+}
+
static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
{
unsigned long ret = regs_return_value(regs);
@@ -144,13 +150,19 @@ static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs)
printk(KERN_ERR "Kprobe smoke test failed: "
"incorrect value in kretprobe handler\n");
}
+ if (krph_val == 0) {
+ handler_errors++;
+ printk(KERN_ERR "Kprobe smoke test failed: "
+ "call to kretprobe entry handler failed\n");
+ }
- krph_val = (rand1 / div_factor);
+ krph_val = rand1;
return 0;
}
static struct kretprobe rp = {
.handler = return_handler,
+ .entry_handler = entry_handler,
.kp.symbol_name = "kprobe_target"
};
@@ -167,7 +179,7 @@ static int test_kretprobe(void)
ret = kprobe_target(rand1);
unregister_kretprobe(&rp);
- if (krph_val == 0) {
+ if (krph_val != rand1) {
printk(KERN_ERR "Kprobe smoke test failed: "
"kretprobe handler not called\n");
handler_errors++;
diff --git a/kernel/time.c b/kernel/time.c
index 4064c0566e77..33af3e55570d 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -566,7 +566,11 @@ EXPORT_SYMBOL(jiffies_to_timeval);
clock_t jiffies_to_clock_t(long x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
+# if HZ < USER_HZ
+ return x * (USER_HZ / HZ);
+# else
return x / (HZ / USER_HZ);
+# endif
#else
u64 tmp = (u64)x * TICK_NSEC;
do_div(tmp, (NSEC_PER_SEC / USER_HZ));
@@ -599,7 +603,14 @@ EXPORT_SYMBOL(clock_t_to_jiffies);
u64 jiffies_64_to_clock_t(u64 x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
+# if HZ < USER_HZ
+ x *= USER_HZ;
+ do_div(x, HZ);
+# elif HZ > USER_HZ
do_div(x, HZ / USER_HZ);
+# else
+ /* Nothing to do */
+# endif
#else
/*
* There are better ways that don't overflow early,
@@ -611,7 +622,6 @@ u64 jiffies_64_to_clock_t(u64 x)
#endif
return x;
}
-
EXPORT_SYMBOL(jiffies_64_to_clock_t);
u64 nsec_to_clock_t(u64 x)
@@ -646,7 +656,6 @@ u64 get_jiffies_64(void)
} while (read_seqretry(&xtime_lock, seq));
return ret;
}
-
EXPORT_SYMBOL(get_jiffies_64);
#endif
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 6e9259a5d501..81afb3927ecc 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -363,15 +363,13 @@ void clocksource_unregister(struct clocksource *cs)
static ssize_t
sysfs_show_current_clocksources(struct sys_device *dev, char *buf)
{
- char *curr = buf;
+ ssize_t count = 0;
spin_lock_irq(&clocksource_lock);
- curr += sprintf(curr, "%s ", curr_clocksource->name);
+ count = snprintf(buf, PAGE_SIZE, "%s\n", curr_clocksource->name);
spin_unlock_irq(&clocksource_lock);
- curr += sprintf(curr, "\n");
-
- return curr - buf;
+ return count;
}
/**
@@ -439,17 +437,20 @@ static ssize_t
sysfs_show_available_clocksources(struct sys_device *dev, char *buf)
{
struct clocksource *src;
- char *curr = buf;
+ ssize_t count = 0;
spin_lock_irq(&clocksource_lock);
list_for_each_entry(src, &clocksource_list, list) {
- curr += sprintf(curr, "%s ", src->name);
+ count += snprintf(buf + count,
+ max((ssize_t)PAGE_SIZE - count, (ssize_t)0),
+ "%s ", src->name);
}
spin_unlock_irq(&clocksource_lock);
- curr += sprintf(curr, "\n");
+ count += snprintf(buf + count,
+ max((ssize_t)PAGE_SIZE - count, (ssize_t)0), "\n");
- return curr - buf;
+ return count;
}
/*
diff --git a/kernel/timer.c b/kernel/timer.c
index 9fbb472b8cf0..70b29b59343f 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -818,12 +818,14 @@ unsigned long next_timer_interrupt(void)
#ifndef CONFIG_VIRT_CPU_ACCOUNTING
void account_process_tick(struct task_struct *p, int user_tick)
{
+ cputime_t one_jiffy = jiffies_to_cputime(1);
+
if (user_tick) {
- account_user_time(p, jiffies_to_cputime(1));
- account_user_time_scaled(p, jiffies_to_cputime(1));
+ account_user_time(p, one_jiffy);
+ account_user_time_scaled(p, cputime_to_scaled(one_jiffy));
} else {
- account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
- account_system_time_scaled(p, jiffies_to_cputime(1));
+ account_system_time(p, HARDIRQ_OFFSET, one_jiffy);
+ account_system_time_scaled(p, cputime_to_scaled(one_jiffy));
}
}
#endif