genetic-zaphod-cpu-scheduler/zaphod-v6.2.patch

3919 lines
112 KiB
Diff
Raw Normal View History

2024-04-29 13:35:35 +00:00
Index: 2.6.11/fs/proc/array.c
===================================================================
--- 2.6.11.orig/fs/proc/array.c 2005-04-14 19:29:15.616973568 -0500
+++ 2.6.11/fs/proc/array.c 2005-04-15 10:46:00.818575480 -0500
@@ -170,7 +170,7 @@
"Uid:\t%d\t%d\t%d\t%d\n"
"Gid:\t%d\t%d\t%d\t%d\n",
get_task_state(p),
- (p->sleep_avg/1024)*100/(1020000000/1024),
+ map_proportion_rnd(avg_sleep_rate(p), 100),
p->tgid,
p->pid, pid_alive(p) ? p->group_leader->real_parent->tgid : 0,
pid_alive(p) && p->ptrace ? p->parent->pid : 0,
Index: 2.6.11/fs/proc/base.c
===================================================================
--- 2.6.11.orig/fs/proc/base.c 2005-04-14 19:29:15.617973940 -0500
+++ 2.6.11/fs/proc/base.c 2005-04-15 10:46:00.728542000 -0500
@@ -32,6 +32,7 @@
#include <linux/mount.h>
#include <linux/security.h>
#include <linux/ptrace.h>
+#include <linux/sched_cpustats.h>
#include "internal.h"
/*
@@ -95,6 +96,9 @@
#ifdef CONFIG_SCHEDSTATS
PROC_TID_SCHEDSTAT,
#endif
+ PROC_TID_CPU_STATS,
+ PROC_TID_CPU_RATE_CAP,
+ PROC_TID_CPU_RATE_HARD_CAP,
#ifdef CONFIG_SECURITY
PROC_TID_ATTR,
PROC_TID_ATTR_CURRENT,
@@ -178,6 +182,9 @@
#ifdef CONFIG_AUDITSYSCALL
E(PROC_TID_LOGINUID, "loginuid", S_IFREG|S_IWUSR|S_IRUGO),
#endif
+ E(PROC_TID_CPU_STATS, "cpustats", S_IFREG|S_IRUGO),
+ E(PROC_TID_CPU_RATE_CAP, "cpu_rate_cap", S_IFREG|S_IRUGO|S_IWUSR),
+ E(PROC_TID_CPU_RATE_HARD_CAP, "cpu_rate_hard_cap", S_IFREG|S_IRUGO|S_IWUSR),
{0,0,NULL,0}
};
@@ -808,6 +815,98 @@
};
#endif
+static ssize_t cpu_rate_cap_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task = PROC_I(file->f_dentry->d_inode)->task;
+ char buffer[64];
+ size_t len;
+ unsigned long hcppt = proportion_to_ppt(task->cpu_rate_cap);
+
+ if (*ppos)
+ return 0;
+ *ppos = len = sprintf(buffer, "%lu\n", hcppt);
+ if (copy_to_user(buf, buffer, len))
+ return -EFAULT;
+
+ return len;
+}
+
+static ssize_t cpu_rate_cap_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task = PROC_I(file->f_dentry->d_inode)->task;
+ char buffer[128] = "";
+ char *endptr = NULL;
+ unsigned long hcppt;
+ int res;
+
+
+ if ((count > 63) || *ppos)
+ return -EFBIG;
+ if (copy_from_user(buffer, buf, count))
+ return -EFAULT;
+ hcppt = simple_strtoul(buffer, &endptr, 0);
+ if ((endptr == buffer) || (hcppt == ULONG_MAX))
+ return -EINVAL;
+
+ if ((res = set_cpu_rate_cap(task, hcppt)) != 0)
+ return res;
+
+ return count;
+}
+
+struct file_operations proc_cpu_rate_cap_operations = {
+ read: cpu_rate_cap_read,
+ write: cpu_rate_cap_write,
+};
+
+ssize_t cpu_rate_hard_cap_read(struct file * file, char * buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task = PROC_I(file->f_dentry->d_inode)->task;
+ char buffer[64];
+ size_t len;
+ unsigned long long hcppt = proportion_to_ppt(task->cpu_rate_hard_cap);
+
+ if (*ppos)
+ return 0;
+ *ppos = len = sprintf(buffer, "%llu\n", hcppt);
+ if (copy_to_user(buf, buffer, len))
+ return -EFAULT;
+
+ return len;
+}
+
+ssize_t cpu_rate_hard_cap_write(struct file * file, const char * buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_struct *task = PROC_I(file->f_dentry->d_inode)->task;
+ char buffer[128] = "";
+ char *endptr = NULL;
+ unsigned long long hcppt;
+ int res;
+
+
+ if ((count > 63) || *ppos)
+ return -EFBIG;
+ if (copy_from_user(buffer, buf, count))
+ return -EFAULT;
+ hcppt = simple_strtoul(buffer, &endptr, 0);
+ if ((endptr == buffer) || (hcppt == ULONG_MAX))
+ return -EINVAL;
+
+ if ((res = set_cpu_rate_hard_cap(task, hcppt)) != 0)
+ return res;
+
+ return count;
+}
+
+struct file_operations proc_cpu_rate_hard_cap_operations = {
+ read: cpu_rate_hard_cap_read,
+ write: cpu_rate_hard_cap_write,
+};
+
static int proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode = dentry->d_inode;
@@ -1498,6 +1597,16 @@
inode->i_fop = &proc_loginuid_operations;
break;
#endif
+ case PROC_TID_CPU_STATS:
+ inode->i_fop = &proc_info_file_operations;
+ ei->op.proc_read = task_sched_cpustats;
+ break;
+ case PROC_TID_CPU_RATE_CAP:
+ inode->i_fop = &proc_cpu_rate_cap_operations;
+ break;
+ case PROC_TID_CPU_RATE_HARD_CAP:
+ inode->i_fop = &proc_cpu_rate_hard_cap_operations;
+ break;
default:
printk("procfs: impossible type (%d)",p->type);
iput(inode);
Index: 2.6.11/fs/proc/proc_misc.c
===================================================================
--- 2.6.11.orig/fs/proc/proc_misc.c 2005-04-14 19:29:15.617973940 -0500
+++ 2.6.11/fs/proc/proc_misc.c 2005-04-15 10:46:00.541472436 -0500
@@ -44,6 +44,7 @@
#include <linux/jiffies.h>
#include <linux/sysrq.h>
#include <linux/vmalloc.h>
+#include <linux/sched_cpustats.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/io.h>
@@ -566,6 +567,7 @@
{"cmdline", cmdline_read_proc},
{"locks", locks_read_proc},
{"execdomains", execdomains_read_proc},
+ {"cpustats", cpustats_read_proc},
{NULL,}
};
for (p = simple_ones; p->name; p++)
Index: 2.6.11/include/linux/init_task.h
===================================================================
--- 2.6.11.orig/include/linux/init_task.h 2005-04-14 19:29:15.617973940 -0500
+++ 2.6.11/include/linux/init_task.h 2005-04-15 10:46:00.870568836 -0500
@@ -92,6 +92,9 @@
.real_timer = { \
.function = it_real_fn \
}, \
+ .sinbin_timer = { \
+ .function = sinbin_release_fn \
+ }, \
.group_info = &init_groups, \
.cap_effective = CAP_INIT_EFF_SET, \
.cap_inheritable = CAP_INIT_INH_SET, \
@@ -112,6 +115,11 @@
.proc_lock = SPIN_LOCK_UNLOCKED, \
.switch_lock = SPIN_LOCK_UNLOCKED, \
.journal_info = NULL, \
+ INIT_CPUSTATS, \
+ .cpu_rate_cap = PROPORTION_ONE, \
+ .cpu_rate_hard_cap = PROPORTION_ONE, \
+ .rq = NULL, \
+ SCHED_ZAPHOD_INIT, \
}
Index: 2.6.11/include/linux/sched.h
===================================================================
--- 2.6.11.orig/include/linux/sched.h 2005-04-14 19:29:15.618974312 -0500
+++ 2.6.11/include/linux/sched.h 2005-04-15 10:50:05.428013764 -0500
@@ -32,6 +32,8 @@
#include <linux/pid.h>
#include <linux/percpu.h>
#include <linux/topology.h>
+#include <linux/sched_cpustats.h>
+#include <linux/sched_zaphod.h>
struct exec_domain;
@@ -203,7 +205,6 @@
extern void arch_unmap_area(struct vm_area_struct *area);
extern void arch_unmap_area_topdown(struct vm_area_struct *area);
-
struct mm_struct {
struct vm_area_struct * mmap; /* list of VMAs */
struct rb_root mm_rb;
@@ -343,8 +344,9 @@
/*
* Priority of a process goes from 0..MAX_PRIO-1, valid RT
* priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL tasks are
- * in the range MAX_RT_PRIO..MAX_PRIO-1. Priority values
+ * in the range MIN_SN_PRIO..MAX_PRIO-1 (plus bonuses). Priority values
* are inverted: lower p->prio value means higher priority.
+ * MAX_RT_PRIO is reserved for well behaved PF_UNPRIV_RT tasks.
*
* The MAX_USER_RT_PRIO value allows the actual maximum
* RT priority to be separate from the value exported to
@@ -356,7 +358,13 @@
#define MAX_USER_RT_PRIO 100
#define MAX_RT_PRIO MAX_USER_RT_PRIO
-#define MAX_PRIO (MAX_RT_PRIO + 40)
+#define MIN_NORMAL_PRIO (MAX_RT_PRIO + 1)
+#define MAX_PRIO (MIN_NORMAL_PRIO + 40)
+/*
+ * Making IDLE_PRIO bigger than 159 would require modification of bitmaps
+ */
+#define IDLE_PRIO 159
+#define BGND_PRIO (IDLE_PRIO - 1)
#define rt_task(p) (unlikely((p)->prio < MAX_RT_PRIO))
@@ -387,7 +395,7 @@
extern struct user_struct root_user;
#define INIT_USER (&root_user)
-typedef struct prio_array prio_array_t;
+typedef struct runqueue runqueue_t;
struct backing_dev_info;
struct reclaim_state;
@@ -521,7 +529,6 @@
#define GROUP_AT(gi, i) \
((gi)->blocks[(i)/NGROUPS_PER_BLOCK][(i)%NGROUPS_PER_BLOCK])
-
struct audit_context; /* See audit.c */
struct mempolicy;
@@ -536,15 +543,20 @@
int prio, static_prio;
struct list_head run_list;
- prio_array_t *array;
-
- unsigned long sleep_avg;
- unsigned long long timestamp, last_ran;
- int activated;
+ runqueue_t *rq;
+ struct runq_cpustats *csrq;
+ struct sched_zaphod_runq_data *zrq;
+
+ struct sched_zaphod zaphod;
+ struct task_cpustats cpustats;
+ unsigned long cpu_rate_cap, min_cpu_rate_cap;
+ unsigned long cpu_rate_hard_cap;
+ struct timer_list sinbin_timer;
+ unsigned long long last_ran;
unsigned long policy;
cpumask_t cpus_allowed;
- unsigned int time_slice, first_time_slice;
+ unsigned long time_slice;
#ifdef CONFIG_SCHEDSTATS
struct sched_info sched_info;
@@ -735,6 +747,13 @@
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
#define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */
#define PF_BORROWED_MM 0x00400000 /* I am a kthread doing use_mm */
+#define PF_SINBINNED 0x00800000 /* I am sinbinned */
+#define PF_UNPRIV_RT 0x01000000 /* I wanted to be RT but had insufficient privilege*/
+#define PF_UISLEEP 0x02000000 /* Uninterruptible sleep */
+
+#define task_is_sinbinned(p) (unlikely(((p)->flags & PF_SINBINNED) != 0))
+#define task_is_unpriv_rt(p) (unlikely(((p)->flags & PF_UNPRIV_RT) != 0))
+#define task_is_bgnd(p) (unlikely((p)->cpu_rate_cap == 0))
/*
* Only the _current_ task can read/write to tsk->flags, but other
@@ -761,6 +780,13 @@
#define tsk_used_math(p) ((p)->flags & PF_USED_MATH)
#define used_math() tsk_used_math(current)
+extern void put_task_in_sinbin(struct task_struct *p, unsigned long durn);
+extern void sinbin_release_fn(unsigned long arg);
+
+/* set cpu rate caps in parts per thousand */
+extern int set_cpu_rate_cap(struct task_struct *p, unsigned long new_cap);
+extern int set_cpu_rate_hard_cap(struct task_struct *p, unsigned long new_cap);
+
#ifdef CONFIG_SMP
extern int set_cpus_allowed(task_t *p, cpumask_t new_mask);
#else
@@ -1148,10 +1174,7 @@
return p->thread_info->cpu;
}
-static inline void set_task_cpu(struct task_struct *p, unsigned int cpu)
-{
- p->thread_info->cpu = cpu;
-}
+extern FASTCALL(void set_task_cpu(struct task_struct *p, unsigned int cpu));
#else
Index: 2.6.11/include/linux/sched_cpustats.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ 2.6.11/include/linux/sched_cpustats.h 2005-04-15 10:46:29.045185928 -0500
@@ -0,0 +1,160 @@
+#ifndef _LINUX_SCHED_CPUSTATS_H
+#define _LINUX_SCHED_CPUSTATS_H
+
+#include <linux/sysctl.h>
+
+/*
+ * Fixed denominator rational numbers for use by the CPU scheduler
+ */
+#define SCHED_AVG_OFFSET 4
+/*
+ * Get the rounded integer value of a scheduling statistic average field
+ * i.e. those fields whose names begin with avg_
+ */
+#define SCHED_AVG_RND(x) \
+ (((x) + (1 << (SCHED_AVG_OFFSET - 1))) >> (SCHED_AVG_OFFSET))
+#define SCHED_AVG_REAL(a) ((a) << SCHED_AVG_OFFSET)
+
+#define INITIAL_CPUSTATS_TIMESTAMP \
+ ((unsigned long long)INITIAL_JIFFIES * (1000000000ULL / HZ))
+
+struct runq_cpustats {
+#ifdef CONFIG_SMP
+ unsigned long long timestamp_last_tick;
+#endif
+ unsigned long long total_delay;
+ unsigned long long total_rt_delay;
+ unsigned long long total_intr_delay;
+ unsigned long long total_rt_intr_delay;
+ unsigned long long total_fork_delay;
+ unsigned long long total_sinbin;
+};
+
+/*
+ * Scheduling statistics for a task/thread
+ */
+struct task_cpustats {
+ unsigned long long timestamp;
+ unsigned long long total_wake_ups;
+ unsigned long long intr_wake_ups;
+ unsigned long long total_sleep;
+ unsigned long long avg_sleep_per_cycle;
+ unsigned long long total_cpu;
+ unsigned long long avg_cpu_per_cycle;
+ unsigned long long total_delay;
+ unsigned long long avg_delay_per_cycle;
+ unsigned long long total_sinbin;
+ unsigned long long avg_cycle_length;
+ unsigned long cpu_usage_rate;
+ unsigned int flags;
+};
+
+#define CPUSTATS_WOKEN_FOR_INTR_FL (1 << 0)
+#define CPUSTATS_JUST_FORKED_FL (1 << 1)
+
+#define INIT_CPUSTATS \
+ .cpustats = { .timestamp = INITIAL_CPUSTATS_TIMESTAMP, 0, }, \
+ .csrq = NULL
+
+
+struct task_struct;
+
+extern void init_runq_cpustats(unsigned int cpu);
+extern struct runq_cpustats *cpu_runq_cpustats(unsigned int cpu);
+#ifdef CONFIG_SMP
+extern unsigned long long adjusted_sched_clock(const struct task_struct *p);
+extern void set_task_runq_cpustats(struct task_struct *p, unsigned int cpu);
+static inline void tick_runq_cpustats(struct runq_cpustats *rcsp, unsigned long long now)
+{
+ rcsp->timestamp_last_tick = now;
+}
+#else
+#define adjusted_sched_clock(p) sched_clock()
+static inline void set_task_runq_cpustats(struct task_struct *p, unsigned int cpu) {}
+static inline void tick_runq_cpustats(struct runq_cpustats *rcsp, unsigned long long now) {}
+#endif
+
+extern void initialize_cpustats(struct task_struct *p, unsigned long long now);
+extern void delta_sleep_cpustats(struct task_struct *p, unsigned long long now);
+extern void delta_cpu_cpustats(struct task_struct *p, unsigned long long now);
+extern void delta_delay_cpustats(struct task_struct *p, unsigned long long now);
+extern void delta_rq_delay_cpustats(struct task_struct *p, unsigned long long delta);
+extern void update_cpustats_at_wake_up(struct task_struct *p, unsigned long long now);
+extern void update_cpustats_at_end_of_ts(struct task_struct *p, unsigned long long now);
+
+/*
+ * Get "up to date" scheduling statistics for the given task
+ * This function should be used if reliable scheduling statistitcs are required
+ * outside the scheduler itself as the relevant fields in the task structure
+ * are not "up to date" NB the possible difference between those in the task
+ * structure and the correct values could be quite large for sleeping tasks.
+ */
+extern int get_task_cpustats(struct task_struct*, struct task_cpustats*);
+
+/*
+ * Scheduling statistics for a CPU
+ */
+struct cpu_cpustats {
+ unsigned long long timestamp;
+ unsigned long long total_idle;
+ unsigned long long total_busy;
+ unsigned long long total_delay;
+ unsigned long long total_rt_delay;
+ unsigned long long total_intr_delay;
+ unsigned long long total_rt_intr_delay;
+ unsigned long long total_fork_delay;
+ unsigned long long total_sinbin;
+ unsigned long long nr_switches;
+};
+
+/*
+ * Get scheduling statistics for the nominated CPU
+ */
+extern int get_cpu_cpustats(unsigned int, struct cpu_cpustats*);
+
+/*
+ * Make scheduling statistics available via /proc
+ */
+extern int task_sched_cpustats(struct task_struct *p, char *buffer);
+extern int cpustats_read_proc(char *page, char **start, off_t off, int count,
+ int *eof, void *data);
+
+
+/*
+ * CPU rate statistics are estimated as a proportions (i.e. real numbers in the
+ * rang 0 to 1 inclusive) using fixed denominator rational numbers.
+ * The denominator (PROPORTION_ONE) must be less than to 2^24
+ */
+#define PROPORTION_OFFSET 23
+#define PROPORTION_ONE (1ULL << PROPORTION_OFFSET)
+#define PROP_FM_PPT(a) (((unsigned long long)(a) * PROPORTION_ONE) / 1000)
+
+/* Require: a <= b */
+extern unsigned long calc_proportion(unsigned long long a, unsigned long long b);
+extern unsigned long map_proportion(unsigned long prop, unsigned long range);
+#define map_proportion_rnd(p, r) map_proportion((p) >> 1, ((r) << 1) + 1)
+extern unsigned long proportion_to_ppt(unsigned long proportion);
+extern unsigned long ppt_to_proportion(unsigned long ppt);
+
+extern unsigned long avg_cpu_usage_rate(const struct task_struct*);
+extern unsigned long avg_sleep_rate(const struct task_struct*);
+extern unsigned long avg_cpu_delay_rate(const struct task_struct*);
+extern unsigned long delay_in_jiffies_for_usage(const struct task_struct*, unsigned long);
+
+extern int do_proc_proportion(ctl_table *ctp, int write, struct file *fp,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+#define TASK_CPUSTATS(p) (p)->u.zaphod.cpustats
+#define RUNQ_CPUSTATS(p) (p)->u.zaphod.csrq
+/* set/get cpu rate caps in parts per thousand */
+extern int set_cpu_rate_cap(struct task_struct *p, unsigned long new_cap);
+extern int set_cpu_rate_hard_cap(struct task_struct *p, unsigned long new_cap);
+extern unsigned int get_cpu_rate_cap(struct task_struct *p);
+extern unsigned int get_cpu_rate_hard_cap(struct task_struct *p);
+#else
+#define TASK_CPUSTATS(p) (p)->cpustats
+#define RUNQ_CPUSTATS(p) (p)->csrq
+#endif
+
+#endif
Index: 2.6.11/include/linux/sched_zaphod.h
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ 2.6.11/include/linux/sched_zaphod.h 2005-04-15 10:46:00.865566976 -0500
@@ -0,0 +1,160 @@
+#ifndef _LINUX_SCHED_ZAPHOD_H
+#define _LINUX_SCHED_ZAPHOD_H
+
+#include <linux/sysctl.h>
+#include <linux/timer.h>
+
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+/*
+ * Making IDLE_PRIO bigger than 159 would require modification of bitmaps
+ */
+#define ZAPHOD_IDLE_PRIO 159
+#define ZAPHOD_BGND_PRIO (ZAPHOD_IDLE_PRIO - 1)
+#define ZAPHOD_MIN_NORMAL_PRIO (MAX_RT_PRIO + 1)
+#define ZAPHOD_MAX_PRIO (ZAPHOD_MIN_NORMAL_PRIO + 40)
+#else
+#define ZAPHOD_BGND_PRIO BGND_PRIO
+/* defined in sched.c */
+extern unsigned long time_slice;
+#endif
+
+/*
+ * For entitlemnet based scheduling a task's shares will be determined from
+ * their "nice"ness
+ */
+#define EB_SHARES_PER_NICE 5
+#define DEFAULT_EB_SHARES (20 * EB_SHARES_PER_NICE)
+#define MAX_EB_SHARES (DEFAULT_EB_SHARES * DEFAULT_EB_SHARES)
+
+struct sched_zaphod_runq_data {
+ unsigned long avg_nr_running;
+ atomic_t eb_yardstick;
+ atomic_t eb_ticks_to_decay;
+};
+
+extern void zaphod_init_cpu_runq_data(unsigned int cpu);
+extern struct sched_zaphod_runq_data *zaphod_cpu_runq_data(unsigned int cpu);
+extern void zaphod_runq_data_tick(struct sched_zaphod_runq_data *zrq, unsigned long numr);
+
+struct sched_zaphod {
+ unsigned int pre_bonus_priority;
+ unsigned int interactive_bonus;
+ unsigned int throughput_bonus;
+ unsigned int eb_shares;
+};
+
+#define ZAPHOD_TASK_DATA_INIT() \
+ { .pre_bonus_priority = (ZAPHOD_BGND_PRIO - 20), \
+ .eb_shares = DEFAULT_EB_SHARES, \
+ .interactive_bonus = 0, \
+ .throughput_bonus = 0, \
+ }
+
+#define SCHED_ZAPHOD_INIT \
+ .zrq = NULL, \
+ .zaphod = ZAPHOD_TASK_DATA_INIT()
+
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+static inline struct sched_zaphod zaphod_task_data_init(void) {
+ struct sched_zaphod ret = ZAPHOD_TASK_DATA_INIT();
+
+ return ret;
+}
+#endif
+
+struct task_struct;
+
+extern void zaphod_fork(struct task_struct *p);
+extern unsigned int zaphod_effective_prio(struct task_struct *p);
+extern void zaphod_reassess_at_activation(struct task_struct *p);
+extern void zaphod_reassess_at_end_of_ts(struct task_struct *p);
+extern void zaphod_reassess_at_sinbin_release(struct task_struct *p);
+extern void zaphod_reassess_at_renice(struct task_struct *p);
+extern void zaphod_reassess_at_new_cap(struct task_struct *p);
+
+#ifdef CONFIG_SYSCTL
+#ifndef CONFIG_CPUSCHED_ZAPHOD
+extern struct ctl_table zaphod_ctl_table[];
+#else
+extern unsigned int max_ia_bonus;
+extern unsigned int max_max_ia_bonus;
+extern unsigned int initial_ia_bonus;
+extern unsigned int max_tpt_bonus;
+extern unsigned int max_max_tpt_bonus;
+extern unsigned long ia_threshold;
+extern unsigned long cpu_hog_threshold;
+#define ZAPHOD_MODE_BUFFER_LEN 16
+extern char current_zaphod_mode[ZAPHOD_MODE_BUFFER_LEN];
+int proc_zaphod_mode(ctl_table *ctp, int write, struct file *fp,
+ void __user *buffer, size_t *lenp, loff_t *ppos);
+#endif
+
+#define ZAPHOD_SYSCTL_FNS() \
+ CPU_SCHED_ZAPHOD_MAX_IA_BONUS, \
+ CPU_SCHED_ZAPHOD_INITIAL_IA_BONUS, \
+ CPU_SCHED_ZAPHOD_IA_THRESHOLD, \
+ CPU_SCHED_ZAPHOD_CPU_HOG_THRESHOLD, \
+ CPU_SCHED_ZAPHOD_MAX_TPT_BONUS, \
+ CPU_SCHED_ZAPHOD_MODE
+
+#define ZAPHOD_CTL_TABLE_INIT() \
+{ \
+ .ctl_name = CPU_SCHED_ZAPHOD_MAX_IA_BONUS, \
+ .procname = "max_ia_bonus", \
+ .data = &max_ia_bonus, \
+ .maxlen = sizeof (unsigned int), \
+ .mode = 0644, \
+ .proc_handler = &proc_dointvec_minmax, \
+ .extra1 = NULL, \
+ .extra2 = (void *)&max_max_ia_bonus \
+}, \
+{ \
+ .ctl_name = CPU_SCHED_ZAPHOD_INITIAL_IA_BONUS, \
+ .procname = "initial_ia_bonus", \
+ .data = &initial_ia_bonus, \
+ .maxlen = sizeof (unsigned int), \
+ .mode = 0644, \
+ .proc_handler = &proc_dointvec_minmax, \
+ .extra1 = (void *)&zero, \
+ .extra2 = (void *)&max_max_ia_bonus \
+}, \
+{ \
+ .ctl_name = CPU_SCHED_ZAPHOD_IA_THRESHOLD, \
+ .procname = "ia_threshold", \
+ .data = &ia_threshold, \
+ .maxlen = sizeof (unsigned long), \
+ .mode = 0644, \
+ .proc_handler = &do_proc_proportion, \
+ .extra1 = NULL, \
+ .extra2 = NULL \
+}, \
+{ \
+ .ctl_name = CPU_SCHED_ZAPHOD_CPU_HOG_THRESHOLD, \
+ .procname = "cpu_hog_threshold", \
+ .data = &cpu_hog_threshold, \
+ .maxlen = sizeof (unsigned long), \
+ .mode = 0644, \
+ .proc_handler = &do_proc_proportion, \
+ .extra1 = NULL, \
+ .extra2 = NULL \
+}, \
+{ \
+ .ctl_name = CPU_SCHED_ZAPHOD_MAX_TPT_BONUS, \
+ .procname = "max_tpt_bonus", \
+ .data = &max_tpt_bonus, \
+ .maxlen = sizeof (unsigned int), \
+ .mode = 0644, \
+ .proc_handler = &proc_dointvec_minmax, \
+ .extra1 = (void *)&zero, \
+ .extra2 = (void *)&max_max_tpt_bonus \
+}, \
+{ \
+ .ctl_name = CPU_SCHED_ZAPHOD_MODE, \
+ .procname = "mode", \
+ .data = &current_zaphod_mode, \
+ .maxlen = ZAPHOD_MODE_BUFFER_LEN, \
+ .mode = 0644, \
+ .proc_handler = &proc_zaphod_mode, \
+}
+#endif
+#endif
Index: 2.6.11/include/linux/sysctl.h
===================================================================
--- 2.6.11.orig/include/linux/sysctl.h 2005-04-14 19:29:15.618974312 -0500
+++ 2.6.11/include/linux/sysctl.h 2005-04-15 10:46:00.542472808 -0500
@@ -61,7 +61,8 @@
CTL_DEV=7, /* Devices */
CTL_BUS=8, /* Busses */
CTL_ABI=9, /* Binary emulation */
- CTL_CPU=10 /* CPU stuff (speed scaling, etc) */
+ CTL_CPU=10, /* CPU stuff (speed scaling, etc) */
+ CTL_CPU_SCHED=11, /* CPU scheduler stuff */
};
/* CTL_BUS names: */
@@ -803,6 +804,10 @@
void __user *, size_t *, loff_t *);
extern int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int,
struct file *, void __user *, size_t *, loff_t *);
+typedef int (*sysctl_ul_convf_t)(unsigned long *val, void *data, int write);
+extern int do_proc_doulongvec_convf_minmax(ctl_table *, int,
+ struct file *, void __user *, size_t *,
+ loff_t *s, sysctl_ul_convf_t, void __user*);
extern int do_sysctl (int __user *name, int nlen,
void __user *oldval, size_t __user *oldlenp,
Index: 2.6.11/kernel/Makefile
===================================================================
--- 2.6.11.orig/kernel/Makefile 2005-04-14 19:29:15.618974312 -0500
+++ 2.6.11/kernel/Makefile 2005-04-15 10:46:00.870568836 -0500
@@ -7,7 +7,7 @@
sysctl.o capability.o ptrace.o timer.o user.o \
signal.o sys.o kmod.o workqueue.o pid.o \
rcupdate.o intermodule.o extable.o params.o posix-timers.o \
- kthread.o wait.o kfifo.o sys_ni.o
+ kthread.o wait.o kfifo.o sys_ni.o sched_cpustats.o sched_zaphod.o
obj-$(CONFIG_FUTEX) += futex.o
obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o
Index: 2.6.11/kernel/sched.c
===================================================================
--- 2.6.11.orig/kernel/sched.c 2005-04-14 19:29:15.619974684 -0500
+++ 2.6.11/kernel/sched.c 2005-04-15 10:46:29.048187044 -0500
@@ -51,125 +51,57 @@
/*
* Convert user-nice values [ -20 ... 0 ... 19 ]
- * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
+ * to static priority [ MIN_NORMAL_PRIO..MAX_PRIO-1 ],
* and back.
*/
-#define NICE_TO_PRIO(nice) (MAX_RT_PRIO + (nice) + 20)
-#define PRIO_TO_NICE(prio) ((prio) - MAX_RT_PRIO - 20)
+#define NICE_TO_PRIO(nice) (MIN_NORMAL_PRIO + (nice) + 20)
+#define PRIO_TO_NICE(prio) ((prio) - MIN_NORMAL_PRIO - 20)
#define TASK_NICE(p) PRIO_TO_NICE((p)->static_prio)
/*
- * 'User priority' is the nice value converted to something we
- * can work with better when scaling various scheduler parameters,
- * it's a [ 0 ... 39 ] range.
- */
-#define USER_PRIO(p) ((p)-MAX_RT_PRIO)
-#define TASK_USER_PRIO(p) USER_PRIO((p)->static_prio)
-#define MAX_USER_PRIO (USER_PRIO(MAX_PRIO))
-
-/*
- * Some helpers for converting nanosecond timing to jiffy resolution
- */
-#define NS_TO_JIFFIES(TIME) ((TIME) / (1000000000 / HZ))
-#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ))
-
-/*
* These are the 'tuning knobs' of the scheduler:
*
- * Minimum timeslice is 5 msecs (or 1 jiffy, whichever is larger),
- * default timeslice is 100 msecs, maximum timeslice is 800 msecs.
- * Timeslices get refilled after they expire.
+ * Default configurable timeslice is 100 msecs, maximum configurable
+ * timeslice is 1000 msecs and minumum configurable timeslice is 1 jiffy.
+ * Timeslices get renewed on task creation, on wake up and after they expire.
*/
-#define MIN_TIMESLICE max(5 * HZ / 1000, 1)
+#define MIN_TIMESLICE 1
#define DEF_TIMESLICE (100 * HZ / 1000)
-#define ON_RUNQUEUE_WEIGHT 30
-#define CHILD_PENALTY 95
-#define PARENT_PENALTY 100
-#define EXIT_WEIGHT 3
-#define PRIO_BONUS_RATIO 25
-#define MAX_BONUS (MAX_USER_PRIO * PRIO_BONUS_RATIO / 100)
-#define INTERACTIVE_DELTA 2
-#define MAX_SLEEP_AVG (DEF_TIMESLICE * MAX_BONUS)
-#define STARVATION_LIMIT (MAX_SLEEP_AVG)
-#define NS_MAX_SLEEP_AVG (JIFFIES_TO_NS(MAX_SLEEP_AVG))
-
-/*
- * If a task is 'interactive' then we reinsert it in the active
- * array after it has expired its current timeslice. (it will not
- * continue to run immediately, it will still roundrobin with
- * other interactive tasks.)
- *
- * This part scales the interactivity limit depending on niceness.
- *
- * We scale it linearly, offset by the INTERACTIVE_DELTA delta.
- * Here are a few examples of different nice levels:
- *
- * TASK_INTERACTIVE(-20): [1,1,1,1,1,1,1,1,1,0,0]
- * TASK_INTERACTIVE(-10): [1,1,1,1,1,1,1,0,0,0,0]
- * TASK_INTERACTIVE( 0): [1,1,1,1,0,0,0,0,0,0,0]
- * TASK_INTERACTIVE( 10): [1,1,0,0,0,0,0,0,0,0,0]
- * TASK_INTERACTIVE( 19): [0,0,0,0,0,0,0,0,0,0,0]
- *
- * (the X axis represents the possible -5 ... 0 ... +5 dynamic
- * priority range a task can explore, a value of '1' means the
- * task is rated interactive.)
- *
- * Ie. nice +19 tasks can never get 'interactive' enough to be
- * reinserted into the active array. And only heavily CPU-hog nice -20
- * tasks will be expired. Default nice 0 tasks are somewhere between,
- * it takes some effort for them to get interactive, but it's not
- * too hard.
- */
-
-#define CURRENT_BONUS(p) \
- (NS_TO_JIFFIES((p)->sleep_avg) * MAX_BONUS / \
- MAX_SLEEP_AVG)
-
-#define GRANULARITY (10 * HZ / 1000 ? : 1)
-
-#ifdef CONFIG_SMP
-#define TIMESLICE_GRANULARITY(p) (GRANULARITY * \
- (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)) * \
- num_online_cpus())
-#else
-#define TIMESLICE_GRANULARITY(p) (GRANULARITY * \
- (1 << (((MAX_BONUS - CURRENT_BONUS(p)) ? : 1) - 1)))
-#endif
-
-#define SCALE(v1,v1_max,v2_max) \
- (v1) * (v2_max) / (v1_max)
-
-#define DELTA(p) \
- (SCALE(TASK_NICE(p), 40, MAX_BONUS) + INTERACTIVE_DELTA)
-
-#define TASK_INTERACTIVE(p) \
- ((p)->prio <= (p)->static_prio - DELTA(p))
+#define MAX_TIMESLICE (1000 * HZ / 1000)
-#define INTERACTIVE_SLEEP(p) \
- (JIFFIES_TO_NS(MAX_SLEEP_AVG * \
- (MAX_BONUS / 2 + DELTA((p)) + 1) / MAX_BONUS - 1))
+/*
+ * UNPRIV_RT tasks that have a CPU usage rate less than this threshold
+ * (in parts per thousand) are treated as psuedo RT tasks
+ */
+#define DEFAULT_UNPRIV_RT_THRESHOLD 10
+unsigned long unpriv_rt_threshold = PROP_FM_PPT(DEFAULT_UNPRIV_RT_THRESHOLD);
-#define TASK_PREEMPTS_CURR(p, rq) \
- ((p)->prio < (rq)->curr->prio)
+unsigned long time_slice = DEF_TIMESLICE;
+static unsigned long sched_rr_time_slice = (100 * HZ / 1000);
/*
- * task_timeslice() scales user-nice values [ -20 ... 0 ... 19 ]
- * to time slice values: [800ms ... 100ms ... 5ms]
- *
- * The higher a thread's priority, the bigger timeslices
- * it gets during one round of execution. But even the lowest
- * priority thread gets MIN_TIMESLICE worth of execution time.
+ * Background tasks may have longer time slices as compensation
*/
+unsigned int bgnd_time_slice_multiplier = 1;
+unsigned int max_bgnd_time_slice_multiplier = 100;
-#define SCALE_PRIO(x, prio) \
- max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO/2), MIN_TIMESLICE)
-
-static unsigned int task_timeslice(task_t *p)
+static unsigned long task_timeslice(const task_t *p)
{
- if (p->static_prio < NICE_TO_PRIO(0))
- return SCALE_PRIO(DEF_TIMESLICE*4, p->static_prio);
- else
- return SCALE_PRIO(DEF_TIMESLICE, p->static_prio);
+ if (rt_task(p))
+ return sched_rr_time_slice;
+
+ /* hard capped tasks that never use their full time slice evade
+ * the sinbin so we need to reduce the size of their time slice
+ * to reduce the size of the hole that they slip through.
+ * It would be unwise to close it completely.
+ */
+ if (unlikely(p->cpustats.cpu_usage_rate > p->cpu_rate_hard_cap))
+ return 1;
+
+ if (unlikely(p->prio == BGND_PRIO))
+ return time_slice * bgnd_time_slice_multiplier;
+
+ return time_slice;
}
#define task_hot(p, now, sd) ((long long) ((now) - (p)->last_ran) \
< (long long) (sd)->cache_hot_time)
@@ -177,15 +109,11 @@
/*
* These are the runqueue data structures:
*/
+#define NUM_PRIO_SLOTS (IDLE_PRIO + 1)
-#define BITMAP_SIZE ((((MAX_PRIO+1+7)/8)+sizeof(long)-1)/sizeof(long))
-
-typedef struct runqueue runqueue_t;
-
-struct prio_array {
- unsigned int nr_active;
- unsigned long bitmap[BITMAP_SIZE];
- struct list_head queue[MAX_PRIO];
+struct prio_slot {
+ unsigned int prio;
+ struct list_head queue;
};
/*
@@ -215,13 +143,13 @@
* it on another CPU. Always updated under the runqueue lock:
*/
unsigned long nr_uninterruptible;
-
- unsigned long expired_timestamp;
unsigned long long timestamp_last_tick;
task_t *curr, *idle;
struct mm_struct *prev_mm;
- prio_array_t *active, *expired, arrays[2];
- int best_expired_prio;
+ DECLARE_BITMAP(bitmap, NUM_PRIO_SLOTS);
+ struct prio_slot queues[NUM_PRIO_SLOTS - 1];
+ unsigned long next_prom_due;
+ unsigned long pcount;
atomic_t nr_iowait;
#ifdef CONFIG_SMP
@@ -285,16 +213,30 @@
#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))
#define this_rq() (&__get_cpu_var(runqueues))
-#define task_rq(p) cpu_rq(task_cpu(p))
+#define task_rq(p) ((p)->rq)
#define cpu_curr(cpu) (cpu_rq(cpu)->curr)
+#ifdef CONFIG_SMP
+void fastcall set_task_cpu(struct task_struct *p, unsigned int cpu)
+{
+ BUG_ON(!list_empty(&p->run_list));
+
+ p->thread_info->cpu = cpu;
+ p->rq = cpu_rq(cpu);
+ set_task_runq_cpustats(p, cpu);
+ p->zrq = zaphod_cpu_runq_data(cpu);
+}
+#endif
+
/*
* Default context-switch locking:
*/
#ifndef prepare_arch_switch
# define prepare_arch_switch(rq, next) do { } while (0)
# define finish_arch_switch(rq, next) spin_unlock_irq(&(rq)->lock)
-# define task_running(rq, p) ((rq)->curr == (p))
+# define task_is_running(p) (task_rq(p)->curr == (p))
+#else
+# define task_is_running(p) task_running(task_rq(p), p)
#endif
/*
@@ -560,150 +502,157 @@
#define sched_info_switch(t, next) do { } while (0)
#endif /* CONFIG_SCHEDSTATS */
+static inline int task_queued(const task_t *task)
+{
+ return !list_empty(&task->run_list);
+}
+
/*
* Adding/removing a task to/from a priority array:
*/
-static void dequeue_task(struct task_struct *p, prio_array_t *array)
+static void dequeue_task(struct task_struct *p)
{
- array->nr_active--;
- list_del(&p->run_list);
- if (list_empty(array->queue + p->prio))
- __clear_bit(p->prio, array->bitmap);
+ /*
+ * Initialize after removal from the list so that list_empty() works
+ * as a means for testing whether the task is runnable
+ * If p is the last task in this priority slot then slotp will be
+ * a pointer to the head of the list in the sunqueue structure
+ * NB we can't use p->prio for bitmap as task may have been
+ * promoted
+ */
+ struct list_head *slotp = p->run_list.next;
+
+ list_del_init(&p->run_list);
+ if (list_empty(slotp)) {
+ /* Take the opportunity to update p->prio */
+ p->prio = list_entry(slotp, struct prio_slot, queue)->prio;
+ __clear_bit(p->prio, p->rq->bitmap);
+ }
}
-static void enqueue_task(struct task_struct *p, prio_array_t *array)
+static void enqueue_task(struct task_struct *p)
{
+ runqueue_t *rq = task_rq(p);
+
sched_info_queued(p);
- list_add_tail(&p->run_list, array->queue + p->prio);
- __set_bit(p->prio, array->bitmap);
- array->nr_active++;
- p->array = array;
+ list_add_tail(&p->run_list, &rq->queues[p->prio].queue);
+ __set_bit(p->prio, rq->bitmap);
}
-/*
- * Put task to the end of the run list without the overhead of dequeue
- * followed by enqueue.
- */
-static void requeue_task(struct task_struct *p, prio_array_t *array)
+static inline void enqueue_task_head(struct task_struct *p)
{
- list_move_tail(&p->run_list, array->queue + p->prio);
-}
+ runqueue_t *rq = task_rq(p);
-static inline void enqueue_task_head(struct task_struct *p, prio_array_t *array)
-{
- list_add(&p->run_list, array->queue + p->prio);
- __set_bit(p->prio, array->bitmap);
- array->nr_active++;
- p->array = array;
+ list_add(&p->run_list, &rq->queues[p->prio].queue);
+ __set_bit(p->prio, rq->bitmap);
}
/*
* effective_prio - return the priority that is based on the static
* priority but is modified by bonuses/penalties.
- *
- * We scale the actual sleep average [0 .... MAX_SLEEP_AVG]
- * into the -5 ... 0 ... +5 bonus/penalty range.
- *
- * We use 25% of the full 0...39 priority range so that:
- *
- * 1) nice +19 interactive tasks do not preempt nice 0 CPU hogs.
- * 2) nice -20 CPU hogs do not get preempted by nice 0 tasks.
- *
- * Both properties are important to certain workloads.
*/
static int effective_prio(task_t *p)
{
- int bonus, prio;
-
if (rt_task(p))
return p->prio;
- bonus = CURRENT_BONUS(p) - MAX_BONUS / 2;
+ if (task_is_unpriv_rt(p) && avg_cpu_usage_rate(p) < unpriv_rt_threshold)
+ return MAX_RT_PRIO;
+
+ if (task_is_bgnd(p) && !(p->flags & PF_UISLEEP))
+ return BGND_PRIO;
- prio = p->static_prio - bonus;
- if (prio < MAX_RT_PRIO)
- prio = MAX_RT_PRIO;
- if (prio > MAX_PRIO-1)
- prio = MAX_PRIO-1;
- return prio;
+ /* using the minimum of the hard and soft caps makes things smoother */
+ if (unlikely(avg_cpu_usage_rate(p) > p->min_cpu_rate_cap))
+ return BGND_PRIO - 1;
+
+ return zaphod_effective_prio(p);
}
/*
- * __activate_task - move a task to the runqueue.
+ * Control value for promotion mechanism NB this controls severity of "nice"
*/
-static inline void __activate_task(task_t *p, runqueue_t *rq)
+unsigned long base_prom_interval = ((DEF_TIMESLICE * 15) / 10);
+
+static inline void restart_promotions(struct runqueue *rq)
+{
+ rq->next_prom_due = jiffies + base_prom_interval;
+ rq->pcount = 1;
+}
+
+/* make it (relatively) easy to switch to using a timer */
+static inline void stop_promotions(struct runqueue *rq)
{
- enqueue_task(p, rq->active);
- rq->nr_running++;
}
/*
- * __activate_idle_task - move idle task to the _front_ of runqueue.
+ * Are promotions due?
*/
-static inline void __activate_idle_task(task_t *p, runqueue_t *rq)
+static inline int promotions_due(const runqueue_t *rq)
{
- enqueue_task_head(p, rq->active);
- rq->nr_running++;
+ return unlikely(time_after_eq(jiffies, rq->next_prom_due)) && (rq->nr_running > 1);
}
-static void recalc_task_prio(task_t *p, unsigned long long now)
+/*
+ * Assume runqueue lock is NOT already held.
+ */
+static void do_promotions(runqueue_t *rq)
{
- unsigned long long __sleep_time = now - p->timestamp;
- unsigned long sleep_time;
+ int idx = MIN_NORMAL_PRIO;
- if (__sleep_time > NS_MAX_SLEEP_AVG)
- sleep_time = NS_MAX_SLEEP_AVG;
- else
- sleep_time = (unsigned long)__sleep_time;
+ spin_lock(&rq->lock);
+ rq->pcount++;
+ if (rq->nr_running < rq->pcount) {
+ rq->next_prom_due = jiffies + base_prom_interval;
+ goto out_unlock;
+ }
+ for (;;) {
+ int new_prio;
+ idx = find_next_bit(rq->bitmap, BGND_PRIO, idx + 1);
+ if (idx > (BGND_PRIO - 1))
+ break;
- if (likely(sleep_time > 0)) {
- /*
- * User tasks that sleep a long time are categorised as
- * idle and will get just interactive status to stay active &
- * prevent them suddenly becoming cpu hogs and starving
- * other processes.
- */
- if (p->mm && p->activated != -1 &&
- sleep_time > INTERACTIVE_SLEEP(p)) {
- p->sleep_avg = JIFFIES_TO_NS(MAX_SLEEP_AVG -
- DEF_TIMESLICE);
- } else {
- /*
- * The lower the sleep avg a task has the more
- * rapidly it will rise with sleep time.
- */
- sleep_time *= (MAX_BONUS - CURRENT_BONUS(p)) ? : 1;
+ new_prio = idx - 1;
+ __list_splice(&rq->queues[idx].queue, rq->queues[new_prio].queue.prev);
+ INIT_LIST_HEAD(&rq->queues[idx].queue);
+ __clear_bit(idx, rq->bitmap);
+ __set_bit(new_prio, rq->bitmap);
+ }
+ /* The only prio field that needs update is the current task's */
+ if (likely(rq->curr->prio > MIN_NORMAL_PRIO && rq->curr->prio < BGND_PRIO))
+ rq->curr->prio--;
+ restart_promotions(rq);
+out_unlock:
+ spin_unlock(&rq->lock);
+}
- /*
- * Tasks waking from uninterruptible sleep are
- * limited in their sleep_avg rise as they
- * are likely to be waiting on I/O
- */
- if (p->activated == -1 && p->mm) {
- if (p->sleep_avg >= INTERACTIVE_SLEEP(p))
- sleep_time = 0;
- else if (p->sleep_avg + sleep_time >=
- INTERACTIVE_SLEEP(p)) {
- p->sleep_avg = INTERACTIVE_SLEEP(p);
- sleep_time = 0;
- }
- }
+/*
+ * __activate_task - move a task to the runqueue.
+ */
+static inline void __activate_task(task_t *p)
+{
+ runqueue_t *rq = task_rq(p);
- /*
- * This code gives a bonus to interactive tasks.
- *
- * The boost works by updating the 'average sleep time'
- * value here, based on ->timestamp. The more time a
- * task spends sleeping, the higher the average gets -
- * and the higher the priority boost gets as well.
- */
- p->sleep_avg += sleep_time;
+ enqueue_task(p);
+ rq->nr_running++;
+ if (rq->nr_running == 2)
+ restart_promotions(rq);
+}
- if (p->sleep_avg > NS_MAX_SLEEP_AVG)
- p->sleep_avg = NS_MAX_SLEEP_AVG;
- }
- }
+/*
+ * __activate_idle_task - move idle task to the _front_ of runqueue.
+ */
+static inline void __activate_idle_task(runqueue_t *rq)
+{
+ enqueue_task_head(rq->idle);
+ rq->nr_running++;
+ if (rq->nr_running == 2)
+ restart_promotions(rq);
+}
+static void recalc_task_prio(task_t *p, unsigned long long now)
+{
+ zaphod_reassess_at_activation(p);
p->prio = effective_prio(p);
}
@@ -713,57 +662,80 @@
* Update all the scheduling statistics stuff. (sleep average
* calculation, priority modifiers, etc.)
*/
-static void activate_task(task_t *p, runqueue_t *rq, int local)
+static void activate_task(task_t *p)
{
- unsigned long long now;
+ unsigned long long now = adjusted_sched_clock(p);
- now = sched_clock();
-#ifdef CONFIG_SMP
- if (!local) {
- /* Compensate for drifting sched_clock */
- runqueue_t *this_rq = this_rq();
- now = (now - this_rq->timestamp_last_tick)
- + rq->timestamp_last_tick;
- }
-#endif
+ if (!rt_task(p))
+ recalc_task_prio(p, now);
+
+ p->time_slice = task_timeslice(p);
+ p->flags &= ~PF_UISLEEP;
+ __activate_task(p);
+}
+
+/*
+ * deactivate_task - remove a task from the runqueue.
+ */
+static void deactivate_task(struct task_struct *p)
+{
+ runqueue_t *rq = task_rq(p);
- recalc_task_prio(p, now);
+ rq->nr_running--;
+ if (p->state == TASK_UNINTERRUPTIBLE)
+ p->flags |= PF_UISLEEP;
+ dequeue_task(p);
+ if (rq->nr_running == 1)
+ stop_promotions(rq);
+}
+
+/*
+ * Take an active task off the runqueue for a short while
+ * Assun=mes that task's runqueue is already locked
+ */
+void put_task_in_sinbin(struct task_struct *p, unsigned long durn)
+{
+ if (durn == 0)
+ return;
+ deactivate_task(p);
+ p->flags |= PF_SINBINNED;
+ p->sinbin_timer.expires = jiffies + durn;
+ add_timer(&p->sinbin_timer);
+}
+
+/*
+ * Release a task from the sinbin
+ */
+void sinbin_release_fn(unsigned long arg)
+{
+ unsigned long flags;
+ struct task_struct *p = (struct task_struct*)arg;
+ runqueue_t *rq = task_rq_lock(p, &flags);
/*
- * This checks to make sure it's not an uninterruptible task
- * that is now waking up.
+ * Sinbin time is included in delay time
*/
- if (!p->activated) {
- /*
- * Tasks which were woken up by interrupts (ie. hw events)
- * are most likely of interactive nature. So we give them
- * the credit of extending their sleep time to the period
- * of time they spend on the runqueue, waiting for execution
- * on a CPU, first time around:
- */
- if (in_interrupt())
- p->activated = 2;
- else {
- /*
- * Normal first-time wakeups get a credit too for
- * on-runqueue time, but it will be weighted down:
- */
- p->activated = 1;
- }
+ delta_delay_cpustats(p, adjusted_sched_clock(p));
+ p->flags &= ~PF_SINBINNED;
+ if (!rt_task(p)) {
+ zaphod_reassess_at_sinbin_release(p);
+ p->prio = effective_prio(p);
}
- p->timestamp = now;
+ __activate_task(p);
- __activate_task(p, rq);
+ task_rq_unlock(rq, &flags);
}
-/*
- * deactivate_task - remove a task from the runqueue.
- */
-static void deactivate_task(struct task_struct *p, runqueue_t *rq)
+static inline int task_needs_sinbinning(const struct task_struct *p)
{
- rq->nr_running--;
- dequeue_task(p, p->array);
- p->array = NULL;
+ return unlikely(avg_cpu_usage_rate(p) > p->cpu_rate_hard_cap) &&
+ (p->state == TASK_RUNNING) && !rt_task(p) &&
+ ((p->flags & PF_EXITING) == 0);
+}
+
+static inline unsigned long required_sinbin_durn(const struct task_struct *p)
+{
+ return delay_in_jiffies_for_usage(p, p->cpu_rate_hard_cap);
}
/*
@@ -795,6 +767,20 @@
}
#endif
+/*
+ * preempt_curr_if_warranted - preempt the current task on this tasks CPU
+ * if the circumanstances warrant
+ */
+static inline void preempt_curr_if_warranted(task_t *p)
+{
+ /*
+ * Note that idle threads have a prio of IDLE_PRIO, for this test
+ * to be always true when they are the current task.
+ */
+ if (p->prio < p->rq->curr->prio)
+ resched_task(p->rq->curr);
+}
+
/**
* task_curr - is this task currently executing on a CPU?
* @p: the task in question.
@@ -830,13 +816,12 @@
*/
static int migrate_task(task_t *p, int dest_cpu, migration_req_t *req)
{
- runqueue_t *rq = task_rq(p);
-
/*
* If the task is not on a runqueue (and not running), then
* it is sufficient to simply update the task's cpu field.
*/
- if (!p->array && !task_running(rq, p)) {
+ if (!task_queued(p) && !task_is_running(p)) {
+ delta_sleep_cpustats(p, adjusted_sched_clock(p));
set_task_cpu(p, dest_cpu);
return 0;
}
@@ -845,7 +830,7 @@
req->type = REQ_MOVE_TASK;
req->task = p;
req->dest_cpu = dest_cpu;
- list_add(&req->list, &rq->migration_queue);
+ list_add(&req->list, &task_rq(p)->migration_queue);
return 1;
}
@@ -867,9 +852,9 @@
repeat:
rq = task_rq_lock(p, &flags);
/* Must be off runqueue entirely, not preempted. */
- if (unlikely(p->array || task_running(rq, p))) {
+ if (unlikely(task_queued(p) || task_is_running(p))) {
/* If it's preempted, we yield. It could be a while. */
- preempted = !task_running(rq, p);
+ preempted = !task_is_running(p);
task_rq_unlock(rq, &flags);
cpu_relax();
if (preempted)
@@ -1000,14 +985,19 @@
if (!(old_state & state))
goto out;
- if (p->array)
+ if (task_queued(p))
goto out_running;
+ /*
+ * This is the end of one scheduling cycle and the start of the next
+ */
+ update_cpustats_at_wake_up(p, adjusted_sched_clock(p));
+
cpu = task_cpu(p);
this_cpu = smp_processor_id();
#ifdef CONFIG_SMP
- if (unlikely(task_running(rq, p)))
+ if (unlikely(task_is_running(p)))
goto out_activate;
new_cpu = cpu;
@@ -1079,7 +1069,7 @@
old_state = p->state;
if (!(old_state & state))
goto out;
- if (p->array)
+ if (task_queued(p))
goto out_running;
this_cpu = smp_processor_id();
@@ -1088,14 +1078,8 @@
out_activate:
#endif /* CONFIG_SMP */
- if (old_state == TASK_UNINTERRUPTIBLE) {
+ if (old_state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible--;
- /*
- * Tasks on involuntary sleep don't earn
- * sleep_avg beyond just interactive state.
- */
- p->activated = -1;
- }
/*
* Sync wakeups (i.e. those types of wakeups where the waker
@@ -1105,11 +1089,9 @@
* the waker guarantees that the freshly woken up task is going
* to be considered on this CPU.)
*/
- activate_task(p, rq, cpu == this_cpu);
- if (!sync || cpu != this_cpu) {
- if (TASK_PREEMPTS_CURR(p, rq))
- resched_task(rq->curr);
- }
+ activate_task(p);
+ if (!sync || cpu != this_cpu)
+ preempt_curr_if_warranted(p);
success = 1;
out_running:
@@ -1144,6 +1126,8 @@
*/
void fastcall sched_fork(task_t *p)
{
+ unsigned long long now;
+
/*
* We mark the process as running here, but have not actually
* inserted it onto the runqueue yet. This guarantees that
@@ -1152,8 +1136,9 @@
*/
p->state = TASK_RUNNING;
INIT_LIST_HEAD(&p->run_list);
- p->array = NULL;
spin_lock_init(&p->switch_lock);
+ init_timer(&p->sinbin_timer);
+ p->sinbin_timer.data = (unsigned long) p;
#ifdef CONFIG_SCHEDSTATS
memset(&p->sched_info, 0, sizeof(p->sched_info));
#endif
@@ -1166,33 +1151,18 @@
*/
p->thread_info->preempt_count = 1;
#endif
+ local_irq_disable();
/*
- * Share the timeslice between parent and child, thus the
- * total amount of pending timeslices in the system doesn't change,
- * resulting in more scheduling fairness.
+ * Give the task a new timeslice.
*/
- local_irq_disable();
- p->time_slice = (current->time_slice + 1) >> 1;
+ p->time_slice = task_timeslice(p);
+ now = sched_clock();
+ local_irq_enable();
/*
- * The remainder of the first timeslice might be recovered by
- * the parent if the child exits early enough.
+ * Initialize the scheduling statistics
*/
- p->first_time_slice = 1;
- current->time_slice >>= 1;
- p->timestamp = sched_clock();
- if (unlikely(!current->time_slice)) {
- /*
- * This case is rare, it happens when the parent has only
- * a single jiffy left from its timeslice. Taking the
- * runqueue lock is not a problem.
- */
- current->time_slice = 1;
- preempt_disable();
- scheduler_tick();
- local_irq_enable();
- preempt_enable();
- } else
- local_irq_enable();
+ initialize_cpustats(p, now);
+ zaphod_fork(p);
}
/*
@@ -1202,112 +1172,70 @@
* that must be done for every newly created context, then puts the task
* on the runqueue and wakes it.
*/
+#ifdef CONFIG_SMP
+#define assigned_to_this_rq(p) (likely((p)->rq == this_rq()))
+#else
+#define assigned_to_this_rq(p) 1
+#endif
void fastcall wake_up_new_task(task_t * p, unsigned long clone_flags)
{
unsigned long flags;
- int this_cpu, cpu;
- runqueue_t *rq, *this_rq;
+ runqueue_t *rq;
rq = task_rq_lock(p, &flags);
- cpu = task_cpu(p);
- this_cpu = smp_processor_id();
BUG_ON(p->state != TASK_RUNNING);
schedstat_inc(rq, wunt_cnt);
- /*
- * We decrease the sleep average of forking parents
- * and children as well, to keep max-interactive tasks
- * from forking tasks that are max-interactive. The parent
- * (current) is done further down, under its lock.
- */
- p->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(p) *
- CHILD_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
-
- p->prio = effective_prio(p);
- if (likely(cpu == this_cpu)) {
+ if (assigned_to_this_rq(p)) {
if (!(clone_flags & CLONE_VM)) {
/*
* The VM isn't cloned, so we're in a good position to
* do child-runs-first in anticipation of an exec. This
* usually avoids a lot of COW overhead.
*/
- if (unlikely(!current->array))
- __activate_task(p, rq);
- else {
+ if (unlikely(!task_queued(current))) {
+ p->prio = effective_prio(p);
+ __activate_task(p);
+ } else {
p->prio = current->prio;
list_add_tail(&p->run_list, &current->run_list);
- p->array = current->array;
- p->array->nr_active++;
rq->nr_running++;
}
set_need_resched();
- } else
+ } else {
/* Run child last */
- __activate_task(p, rq);
- /*
- * We skip the following code due to cpu == this_cpu
- *
- * task_rq_unlock(rq, &flags);
- * this_rq = task_rq_lock(current, &flags);
- */
- this_rq = rq;
+ p->prio = effective_prio(p);
+ __activate_task(p);
+ }
} else {
- this_rq = cpu_rq(this_cpu);
-
- /*
- * Not the local CPU - must adjust timestamp. This should
- * get optimised away in the !CONFIG_SMP case.
- */
- p->timestamp = (p->timestamp - this_rq->timestamp_last_tick)
- + rq->timestamp_last_tick;
- __activate_task(p, rq);
- if (TASK_PREEMPTS_CURR(p, rq))
- resched_task(rq->curr);
-
+ p->prio = effective_prio(p);
+ __activate_task(p);
+ preempt_curr_if_warranted(p);
schedstat_inc(rq, wunt_moved);
- /*
- * Parent and child are on different CPUs, now get the
- * parent runqueue to update the parent's ->sleep_avg:
- */
- task_rq_unlock(rq, &flags);
- this_rq = task_rq_lock(current, &flags);
}
- current->sleep_avg = JIFFIES_TO_NS(CURRENT_BONUS(current) *
- PARENT_PENALTY / 100 * MAX_SLEEP_AVG / MAX_BONUS);
- task_rq_unlock(this_rq, &flags);
+ task_rq_unlock(rq, &flags);
}
-/*
- * Potentially available exiting-child timeslices are
- * retrieved here - this way the parent does not get
- * penalized for creating too many threads.
- *
- * (this cannot be used to 'generate' timeslices
- * artificially, because any timeslice recovered here
- * was given away by the parent in the first place.)
+/**
+ * No more timeslice fiddling on exit
+ * (Optionally) log scheduler statistics at exit.
*/
+static int log_at_exit = 0;
void fastcall sched_exit(task_t * p)
{
- unsigned long flags;
- runqueue_t *rq;
+ struct task_cpustats stats;
- /*
- * If the child was a (relative-) CPU hog then decrease
- * the sleep_avg of the parent as well.
- */
- rq = task_rq_lock(p->parent, &flags);
- if (p->first_time_slice) {
- p->parent->time_slice += p->time_slice;
- if (unlikely(p->parent->time_slice > task_timeslice(p)))
- p->parent->time_slice = task_timeslice(p);
- }
- if (p->sleep_avg < p->parent->sleep_avg)
- p->parent->sleep_avg = p->parent->sleep_avg /
- (EXIT_WEIGHT + 1) * EXIT_WEIGHT + p->sleep_avg /
- (EXIT_WEIGHT + 1);
- task_rq_unlock(rq, &flags);
+ if (!log_at_exit)
+ return;
+
+ get_task_cpustats(p, &stats);
+ printk("SCHED_EXIT[%d] (%s) %llu %llu %llu %llu %llu %llu %lu %lu\n",
+ p->pid, p->comm,
+ stats.total_sleep, stats.total_cpu, stats.total_delay,
+ stats.total_sinbin, stats.total_wake_ups, stats.intr_wake_ups,
+ p->nvcsw, p->nivcsw);
}
/**
@@ -1626,30 +1554,25 @@
* Both runqueues must be locked.
*/
static inline
-void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p,
- runqueue_t *this_rq, prio_array_t *this_array, int this_cpu)
+void pull_task(task_t *p, int this_cpu)
{
- dequeue_task(p, src_array);
+ runqueue_t *src_rq = task_rq(p), *this_rq;
+
+ dequeue_task(p);
src_rq->nr_running--;
+ delta_delay_cpustats(p, adjusted_sched_clock(p));
set_task_cpu(p, this_cpu);
+ this_rq = task_rq(p);
this_rq->nr_running++;
- enqueue_task(p, this_array);
- p->timestamp = (p->timestamp - src_rq->timestamp_last_tick)
- + this_rq->timestamp_last_tick;
- /*
- * Note that idle threads have a prio of MAX_PRIO, for this test
- * to be always true for them.
- */
- if (TASK_PREEMPTS_CURR(p, this_rq))
- resched_task(this_rq->curr);
+ enqueue_task(p);
+ preempt_curr_if_warranted(p);
}
/*
* can_migrate_task - may task p from runqueue rq be migrated to this_cpu?
*/
static inline
-int can_migrate_task(task_t *p, runqueue_t *rq, int this_cpu,
- struct sched_domain *sd, enum idle_type idle)
+int can_migrate_task(task_t *p, int this_cpu, struct sched_domain *sd, enum idle_type idle)
{
/*
* We do not migrate tasks that are:
@@ -1657,7 +1580,7 @@
* 2) cannot be migrated to this CPU due to cpus_allowed, or
* 3) are cache-hot on their current CPU.
*/
- if (task_running(rq, p))
+ if (task_is_running(p))
return 0;
if (!cpu_isset(this_cpu, p->cpus_allowed))
return 0;
@@ -1672,7 +1595,7 @@
sd->nr_balance_failed > sd->cache_nice_tries)
return 1;
- if (task_hot(p, rq->timestamp_last_tick, sd))
+ if (task_hot(p, p->rq->timestamp_last_tick, sd))
return 0;
return 1;
}
@@ -1688,7 +1611,6 @@
unsigned long max_nr_move, struct sched_domain *sd,
enum idle_type idle)
{
- prio_array_t *array, *dst_array;
struct list_head *head, *curr;
int idx, pulled = 0;
task_t *tmp;
@@ -1696,45 +1618,26 @@
if (max_nr_move <= 0 || busiest->nr_running <= 1)
goto out;
- /*
- * We first consider expired tasks. Those will likely not be
- * executed in the near future, and they are most likely to
- * be cache-cold, thus switching CPUs has the least effect
- * on them.
- */
- if (busiest->expired->nr_active) {
- array = busiest->expired;
- dst_array = this_rq->expired;
- } else {
- array = busiest->active;
- dst_array = this_rq->active;
- }
-
-new_array:
/* Start searching at priority 0: */
idx = 0;
skip_bitmap:
if (!idx)
- idx = sched_find_first_bit(array->bitmap);
+ idx = sched_find_first_bit(busiest->bitmap);
else
- idx = find_next_bit(array->bitmap, MAX_PRIO, idx);
- if (idx >= MAX_PRIO) {
- if (array == busiest->expired && busiest->active->nr_active) {
- array = busiest->active;
- dst_array = this_rq->active;
- goto new_array;
- }
+ idx = find_next_bit(busiest->bitmap, IDLE_PRIO, idx);
+ if (idx >= IDLE_PRIO)
goto out;
- }
- head = array->queue + idx;
+ head = &busiest->queues[idx].queue;
curr = head->prev;
skip_queue:
tmp = list_entry(curr, task_t, run_list);
+ /* take the opportunity to update tmp's prio field */
+ tmp->prio = idx;
curr = curr->prev;
- if (!can_migrate_task(tmp, busiest, this_cpu, sd, idle)) {
+ if (!can_migrate_task(tmp, this_cpu, sd, idle)) {
if (curr != head)
goto skip_queue;
idx++;
@@ -1749,7 +1652,7 @@
schedstat_inc(this_rq, pt_gained[idle]);
schedstat_inc(busiest, pt_lost[idle]);
- pull_task(busiest, array, tmp, this_rq, dst_array, this_cpu);
+ pull_task(tmp, this_cpu);
pulled++;
/* We only want to steal up to the prescribed number of tasks. */
@@ -2220,8 +2123,9 @@
static inline int wake_priority_sleeper(runqueue_t *rq)
{
- int ret = 0;
#ifdef CONFIG_SCHED_SMT
+ int ret = 0;
+
spin_lock(&rq->lock);
/*
* If an SMT sibling task has been put to sleep for priority
@@ -2232,8 +2136,11 @@
ret = 1;
}
spin_unlock(&rq->lock);
-#endif
+
return ret;
+#else
+ return 0;
+#endif
}
DEFINE_PER_CPU(struct kernel_stat, kstat);
@@ -2241,22 +2148,6 @@
EXPORT_PER_CPU_SYMBOL(kstat);
/*
- * We place interactive tasks back into the active array, if possible.
- *
- * To guarantee that this does not starve expired tasks we ignore the
- * interactivity of a task if the first expired task had to wait more
- * than a 'reasonable' amount of time. This deadline timeout is
- * load-dependent, as the frequency of array switched decreases with
- * increasing number of running tasks. We also ignore the interactivity
- * if a better static_prio task has expired:
- */
-#define EXPIRED_STARVING(rq) \
- ((STARVATION_LIMIT && ((rq)->expired_timestamp && \
- (jiffies - (rq)->expired_timestamp >= \
- STARVATION_LIMIT * ((rq)->nr_running) + 1))) || \
- ((rq)->curr->static_prio > (rq)->best_expired_prio))
-
-/*
* Do the virtual cpu time signal calculations.
* @p: the process that the cpu time gets accounted to
* @cputime: the cpu time spent in user space since the last update
@@ -2416,6 +2307,8 @@
task_t *p = current;
rq->timestamp_last_tick = sched_clock();
+ tick_runq_cpustats(p->csrq, rq->timestamp_last_tick);
+ zaphod_runq_data_tick(p->zrq, rq->nr_running);
if (p == rq->idle) {
if (wake_priority_sleeper(rq))
@@ -2424,79 +2317,34 @@
return;
}
- /* Task might have expired already, but not scheduled off yet */
- if (p->array != rq->active) {
- set_tsk_need_resched(p);
+ /*
+ * FIFO tasks have no timeslices.
+ */
+ if (unlikely(p->policy == SCHED_FIFO))
goto out;
- }
+
spin_lock(&rq->lock);
/*
- * The task was running during this tick - update the
- * time slice counter. Note: we do not update a thread's
- * priority until it either goes to sleep or uses up its
- * timeslice. This makes it possible for interactive tasks
- * to use up their timeslices at their highest priority levels.
+ * The task was running during this tick
*/
- if (rt_task(p)) {
- /*
- * RR tasks need a special form of timeslice management.
- * FIFO tasks have no timeslices.
- */
- if ((p->policy == SCHED_RR) && !--p->time_slice) {
- p->time_slice = task_timeslice(p);
- p->first_time_slice = 0;
- set_tsk_need_resched(p);
-
- /* put it at the end of the queue: */
- requeue_task(p, rq->active);
- }
- goto out_unlock;
- }
if (!--p->time_slice) {
- dequeue_task(p, rq->active);
+ dequeue_task(p);
set_tsk_need_resched(p);
- p->prio = effective_prio(p);
- p->time_slice = task_timeslice(p);
- p->first_time_slice = 0;
-
- if (!rq->expired_timestamp)
- rq->expired_timestamp = jiffies;
- if (!TASK_INTERACTIVE(p) || EXPIRED_STARVING(rq)) {
- enqueue_task(p, rq->expired);
- if (p->static_prio < rq->best_expired_prio)
- rq->best_expired_prio = p->static_prio;
- } else
- enqueue_task(p, rq->active);
- } else {
- /*
- * Prevent a too long timeslice allowing a task to monopolize
- * the CPU. We do this by splitting up the timeslice into
- * smaller pieces.
- *
- * Note: this does not mean the task's timeslices expire or
- * get lost in any way, they just might be preempted by
- * another task of equal priority. (one with higher
- * priority would have preempted this task already.) We
- * requeue this task to the end of the list on this priority
- * level, which is in essence a round-robin of tasks with
- * equal priority.
- *
- * This only applies to tasks in the interactive
- * delta range with at least TIMESLICE_GRANULARITY to requeue.
- */
- if (TASK_INTERACTIVE(p) && !((task_timeslice(p) -
- p->time_slice) % TIMESLICE_GRANULARITY(p)) &&
- (p->time_slice >= TIMESLICE_GRANULARITY(p)) &&
- (p->array == rq->active)) {
-
- requeue_task(p, rq->active);
- set_tsk_need_resched(p);
- }
+ update_cpustats_at_end_of_ts(p, rq->timestamp_last_tick);
+ if (unlikely(p->policy == SCHED_RR))
+ p->time_slice = sched_rr_time_slice;
+ else {
+ zaphod_reassess_at_end_of_ts(p);
+ p->prio = effective_prio(p);
+ p->time_slice = task_timeslice(p);
+ }
+ enqueue_task(p);
}
-out_unlock:
spin_unlock(&rq->lock);
out:
rebalance_tick(cpu, rq, NOT_IDLE);
+ if (unlikely(promotions_due(rq)))
+ do_promotions(rq);
}
#ifdef CONFIG_SCHED_SMT
@@ -2545,12 +2393,33 @@
*/
}
+/* maximum expected priority difference for SCHED_NORMAL tasks */
+#define MAX_SN_PD (IDLE_PRIO - MIN_NORMAL_PRIO)
+static inline int dependent_sleeper_trumps(const task_t *p1,
+ const task_t * p2, unsigned int rq_percent_ts_rm)
+{
+ int dp = p2->prio - p1->prio;
+
+ if ((dp > 0) && (rq_percent_ts_rm < 100) && p2->mm && !rt_task(p2)) {
+ unsigned long rq_ts_rm;
+
+ if (rt_task(p1))
+ return 1;
+
+ rq_ts_rm = ((MAX_SN_PD - dp) * time_slice * rq_percent_ts_rm) /
+ (100 * MAX_SN_PD);
+
+ return p1->time_slice > rq_ts_rm;
+ }
+
+ return 0;
+}
+
static inline int dependent_sleeper(int this_cpu, runqueue_t *this_rq)
{
struct sched_domain *sd = this_rq->sd;
cpumask_t sibling_map;
- prio_array_t *array;
- int ret = 0, i;
+ int ret = 0, i, idx;
task_t *p;
if (!(sd->flags & SD_SHARE_CPUPOWER))
@@ -2572,13 +2441,11 @@
*/
if (!this_rq->nr_running)
goto out_unlock;
- array = this_rq->active;
- if (!array->nr_active)
- array = this_rq->expired;
- BUG_ON(!array->nr_active);
- p = list_entry(array->queue[sched_find_first_bit(array->bitmap)].next,
- task_t, run_list);
+ idx = sched_find_first_bit(this_rq->bitmap);
+ p = list_entry(this_rq->queues[idx].queue.next, task_t, run_list);
+ /* take the opportunity to update p's prio field */
+ p->prio = idx;
for_each_cpu_mask(i, sibling_map) {
runqueue_t *smt_rq = cpu_rq(i);
@@ -2592,9 +2459,7 @@
* task from using an unfair proportion of the
* physical cpu's resources. -ck
*/
- if (((smt_curr->time_slice * (100 - sd->per_cpu_gain) / 100) >
- task_timeslice(p) || rt_task(smt_curr)) &&
- p->mm && smt_curr->mm && !rt_task(p))
+ if (dependent_sleeper_trumps(smt_curr, p, sd->per_cpu_gain))
ret = 1;
/*
@@ -2602,9 +2467,7 @@
* or wake it up if it has been put to sleep for priority
* reasons.
*/
- if ((((p->time_slice * (100 - sd->per_cpu_gain) / 100) >
- task_timeslice(smt_curr) || rt_task(p)) &&
- smt_curr->mm && p->mm && !rt_task(smt_curr)) ||
+ if (dependent_sleeper_trumps(p, smt_curr, sd->per_cpu_gain) ||
(smt_curr == smt_rq->idle && smt_rq->nr_running))
resched_task(smt_curr);
}
@@ -2664,10 +2527,8 @@
long *switch_count;
task_t *prev, *next;
runqueue_t *rq;
- prio_array_t *array;
struct list_head *queue;
unsigned long long now;
- unsigned long run_time;
int cpu, idx;
/*
@@ -2703,16 +2564,6 @@
schedstat_inc(rq, sched_cnt);
now = sched_clock();
- if (likely(now - prev->timestamp < NS_MAX_SLEEP_AVG))
- run_time = now - prev->timestamp;
- else
- run_time = NS_MAX_SLEEP_AVG;
-
- /*
- * Tasks charged proportionately less run_time at high sleep_avg to
- * delay them losing their interactive status
- */
- run_time /= (CURRENT_BONUS(prev) ? : 1);
spin_lock_irq(&rq->lock);
@@ -2728,17 +2579,20 @@
else {
if (prev->state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible++;
- deactivate_task(prev, rq);
+ deactivate_task(prev);
}
}
+ delta_cpu_cpustats(prev, now);
+ if (task_needs_sinbinning(prev) && !signal_pending(prev))
+ put_task_in_sinbin(prev, required_sinbin_durn(prev));
+
cpu = smp_processor_id();
if (unlikely(!rq->nr_running)) {
go_idle:
idle_balance(cpu, rq);
if (!rq->nr_running) {
next = rq->idle;
- rq->expired_timestamp = 0;
wake_sleeping_dependent(cpu, rq);
/*
* wake_sleeping_dependent() might have released
@@ -2762,36 +2616,13 @@
goto go_idle;
}
- array = rq->active;
- if (unlikely(!array->nr_active)) {
- /*
- * Switch the active and expired arrays.
- */
- schedstat_inc(rq, sched_switch);
- rq->active = rq->expired;
- rq->expired = array;
- array = rq->active;
- rq->expired_timestamp = 0;
- rq->best_expired_prio = MAX_PRIO;
- } else
- schedstat_inc(rq, sched_noswitch);
+ schedstat_inc(rq, sched_noswitch);
- idx = sched_find_first_bit(array->bitmap);
- queue = array->queue + idx;
+ idx = sched_find_first_bit(rq->bitmap);
+ queue = &rq->queues[idx].queue;
next = list_entry(queue->next, task_t, run_list);
-
- if (!rt_task(next) && next->activated > 0) {
- unsigned long long delta = now - next->timestamp;
-
- if (next->activated == 1)
- delta = delta * (ON_RUNQUEUE_WEIGHT * 128 / 100) / 128;
-
- array = next->array;
- dequeue_task(next, array);
- recalc_task_prio(next, next->timestamp + delta);
- enqueue_task(next, array);
- }
- next->activated = 0;
+ /* take the opportunity to update next's prio field */
+ next->prio = idx;
switch_tasks:
if (next == rq->idle)
schedstat_inc(rq, sched_goidle);
@@ -2799,14 +2630,11 @@
clear_tsk_need_resched(prev);
rcu_qsctr_inc(task_cpu(prev));
- prev->sleep_avg -= run_time;
- if ((long)prev->sleep_avg <= 0)
- prev->sleep_avg = 0;
- prev->timestamp = prev->last_ran = now;
+ prev->last_ran = now;
sched_info_switch(prev, next);
if (likely(prev != next)) {
- next->timestamp = now;
+ delta_delay_cpustats(next, now);
rq->nr_switches++;
rq->curr = next;
++*switch_count;
@@ -3227,9 +3055,7 @@
void set_user_nice(task_t *p, long nice)
{
unsigned long flags;
- prio_array_t *array;
runqueue_t *rq;
- int old_prio, new_prio, delta;
if (TASK_NICE(p) == nice || nice < -20 || nice > 19)
return;
@@ -3238,36 +3064,31 @@
* the task might be in the middle of scheduling on another CPU.
*/
rq = task_rq_lock(p, &flags);
+
+ p->static_prio = NICE_TO_PRIO(nice);
+ zaphod_reassess_at_renice(p);
/*
* The RT priorities are set via sched_setscheduler(), but we still
* allow the 'normal' nice value to be set - but as expected
* it wont have any effect on scheduling until the task is
* not SCHED_NORMAL:
+ * There's no need to set "prio" if the task isn't queued as it will
+ * get set during activation.
*/
- if (rt_task(p)) {
- p->static_prio = NICE_TO_PRIO(nice);
- goto out_unlock;
- }
- array = p->array;
- if (array)
- dequeue_task(p, array);
-
- old_prio = p->prio;
- new_prio = NICE_TO_PRIO(nice);
- delta = new_prio - old_prio;
- p->static_prio = NICE_TO_PRIO(nice);
- p->prio += delta;
+ if (!rt_task(p) && task_queued(p)) {
+ int delta = -p->prio;
- if (array) {
- enqueue_task(p, array);
+ dequeue_task(p);
+ delta += (p->prio = effective_prio(p));
+ enqueue_task(p);
/*
* If the task increased its priority or is running and
* lowered its priority, then reschedule its CPU:
*/
- if (delta < 0 || (delta > 0 && task_running(rq, p)))
+ if (delta < 0 || (delta > 0 && task_is_running(p)))
resched_task(rq->curr);
}
-out_unlock:
+
task_rq_unlock(rq, &flags);
}
@@ -3317,6 +3138,116 @@
#endif
+/*
+ * Require: 0 <= new_cap <= 1000
+ */
+int set_cpu_rate_cap(struct task_struct *p, unsigned long new_cap)
+{
+ int is_allowed;
+ unsigned long flags;
+ runqueue_t *rq;
+ long delta;
+
+ if (new_cap > 1000)
+ return -EINVAL;
+ is_allowed = capable(CAP_SYS_NICE);
+ /*
+ * We have to be careful, if called from /proc code,
+ * the task might be in the middle of scheduling on another CPU.
+ */
+ new_cap = ppt_to_proportion(new_cap);
+ rq = task_rq_lock(p, &flags);
+ delta = new_cap - p->cpu_rate_cap;
+ if (!is_allowed) {
+ /*
+ * Ordinary users can set/change caps on their own tasks
+ * provided that the new setting is MORE constraining
+ */
+ if (((current->euid != p->uid) && (current->uid != p->uid)) || (delta > 0)) {
+ task_rq_unlock(rq, &flags);
+ return -EPERM;
+ }
+ }
+ /*
+ * The RT tasks don't have caps, but we still allow the caps to be
+ * set - but as expected it wont have any effect on scheduling until
+ * the task becomes SCHED_NORMAL:
+ */
+ p->cpu_rate_cap = new_cap;
+ if (p->cpu_rate_cap < p->cpu_rate_hard_cap)
+ p->min_cpu_rate_cap = p->cpu_rate_cap;
+ else
+ p->min_cpu_rate_cap = p->cpu_rate_hard_cap;
+ zaphod_reassess_at_renice(p);
+ if (!rt_task(p) && task_queued(p)) {
+ int delta = -p->prio;
+
+ dequeue_task(p);
+ delta += p->prio = effective_prio(p);
+ enqueue_task(p);
+ /*
+ * If the task increased its priority or is running and
+ * lowered its priority, then reschedule its CPU:
+ */
+ if (delta < 0 || (delta > 0 && task_is_running(p)))
+ resched_task(rq->curr);
+ }
+ task_rq_unlock(rq, &flags);
+ return 0;
+}
+
+EXPORT_SYMBOL(set_cpu_rate_cap);
+
+/*
+ * Require: 1 <= new_cap <= 1000
+ */
+int set_cpu_rate_hard_cap(struct task_struct *p, unsigned long new_cap)
+{
+ int is_allowed;
+ unsigned long flags;
+ runqueue_t *rq;
+ long delta;
+
+ if ((new_cap > 1000) || (new_cap == 0)) /* zero hard caps are not allowed */
+ return -EINVAL;
+ is_allowed = capable(CAP_SYS_NICE);
+ new_cap = ppt_to_proportion(new_cap);
+ /*
+ * We have to be careful, if called from /proc code,
+ * the task might be in the middle of scheduling on another CPU.
+ */
+ rq = task_rq_lock(p, &flags);
+ delta = new_cap - p->cpu_rate_hard_cap;
+ if (!is_allowed) {
+ /*
+ * Ordinary users can set/change caps on their own tasks
+ * provided that the new setting is MORE constraining
+ */
+ if (((current->euid != p->uid) && (current->uid != p->uid)) || (delta > 0)) {
+ task_rq_unlock(rq, &flags);
+ return -EPERM;
+ }
+ }
+ /*
+ * The RT tasks don't have caps, but we still allow the caps to be
+ * set - but as expected it wont have any effect on scheduling until
+ * the task becomes SCHED_NORMAL:
+ */
+ p->cpu_rate_hard_cap = new_cap;
+ if (p->cpu_rate_cap < p->cpu_rate_hard_cap)
+ p->min_cpu_rate_cap = p->cpu_rate_cap;
+ else
+ p->min_cpu_rate_cap = p->cpu_rate_hard_cap;
+ zaphod_reassess_at_renice(p);
+ /* (POSSIBLY) TODO: if it's sinbinned and the cap is relaxed then
+ * release it from the sinbin
+ */
+ task_rq_unlock(rq, &flags);
+ return 0;
+}
+
+EXPORT_SYMBOL(set_cpu_rate_hard_cap);
+
/**
* task_prio - return the priority value of a given task.
* @p: the task in question.
@@ -3380,7 +3311,7 @@
/* Actually do priority change: must hold rq lock. */
static void __setscheduler(struct task_struct *p, int policy, int prio)
{
- BUG_ON(p->array);
+ BUG_ON(task_queued(p));
p->policy = policy;
p->rt_priority = prio;
if (policy != SCHED_NORMAL)
@@ -3400,7 +3331,7 @@
{
int retval;
int oldprio, oldpolicy = -1;
- prio_array_t *array;
+ int queued;
unsigned long flags;
runqueue_t *rq;
@@ -3422,8 +3353,14 @@
return -EINVAL;
if ((policy == SCHED_FIFO || policy == SCHED_RR) &&
- !capable(CAP_SYS_NICE))
+ !capable(CAP_SYS_NICE)) {
+ if (current->euid == p->uid) {
+ rq = task_rq_lock(p, &flags);
+ p->flags |= PF_UNPRIV_RT;
+ task_rq_unlock(rq, &flags);
+ }
return -EPERM;
+ }
if ((current->euid != p->euid) && (current->euid != p->uid) &&
!capable(CAP_SYS_NICE))
return -EPERM;
@@ -3442,23 +3379,23 @@
task_rq_unlock(rq, &flags);
goto recheck;
}
- array = p->array;
- if (array)
- deactivate_task(p, rq);
+ queued = task_queued(p);
+ if (queued)
+ deactivate_task(p);
oldprio = p->prio;
__setscheduler(p, policy, param->sched_priority);
- if (array) {
- __activate_task(p, rq);
+ if (queued) {
+ __activate_task(p);
/*
* Reschedule if we are currently running on this runqueue and
* our priority decreased, or if we are not currently running on
* this runqueue and our priority is higher than the current's
*/
- if (task_running(rq, p)) {
+ if (task_is_running(p)) {
if (p->prio > oldprio)
resched_task(rq->curr);
- } else if (TASK_PREEMPTS_CURR(p, rq))
- resched_task(rq->curr);
+ } else
+ preempt_curr_if_warranted(p);
}
task_rq_unlock(rq, &flags);
return 0;
@@ -3705,6 +3642,96 @@
return sizeof(cpumask_t);
}
+int get_task_cpustats(struct task_struct *tsk, struct task_cpustats *stats)
+{
+ int on_runq = 0;
+ int on_cpu = 0;
+ unsigned long long timestamp = 0;
+ unsigned long flags;
+ runqueue_t *rq = task_rq_lock(tsk, &flags);
+
+ *stats = tsk->cpustats;
+#ifdef CONFIG_SMP
+ timestamp = tsk->csrq->timestamp_last_tick;
+#endif
+ if ((on_runq = task_queued(tsk)))
+ on_cpu = task_is_running(tsk);
+
+ task_rq_unlock(rq, &flags);
+
+ /*
+ * Update values to the previous tick (only)
+ */
+ if (timestamp > stats->timestamp) {
+ unsigned long long delta = timestamp - stats->timestamp;
+
+ stats->timestamp = timestamp;
+ if (on_cpu) {
+ stats->total_cpu += delta;
+ } else if (on_runq) {
+ stats->total_delay += delta;
+ } else {
+ stats->total_sleep += delta;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(get_task_cpustats);
+
+/*
+ * Get scheduling statistics for the nominated CPU
+ */
+int get_cpu_cpustats(unsigned int cpu, struct cpu_cpustats *stats)
+{
+ int idle;
+ unsigned long long idle_timestamp;
+ runqueue_t *rq = cpu_rq(cpu);
+ struct runq_cpustats *csrq = cpu_runq_cpustats(cpu);
+ /*
+ * No need to crash the whole machine if they've asked for stats for
+ * a non existent CPU.
+ */
+ if (rq == NULL)
+ return -EFAULT;
+
+ local_irq_disable();
+ spin_lock(&rq->lock);
+ idle = rq->curr == rq->idle;
+#ifdef CONFIG_SMP
+ if (csrq->timestamp_last_tick > rq->curr->cpustats.timestamp)
+ stats->timestamp = csrq->timestamp_last_tick;
+ else
+#endif
+ stats->timestamp = rq->curr->cpustats.timestamp;
+ idle_timestamp = rq->idle->cpustats.timestamp;
+ if (idle_timestamp > stats->timestamp)
+ stats->timestamp = idle_timestamp;
+ stats->total_idle = rq->idle->cpustats.total_cpu;
+ stats->total_busy = rq->idle->cpustats.total_delay;
+ stats->total_delay = csrq->total_delay;
+ stats->total_rt_delay = csrq->total_rt_delay;
+ stats->total_intr_delay = csrq->total_intr_delay;
+ stats->total_rt_intr_delay = csrq->total_rt_intr_delay;
+ stats->total_fork_delay = csrq->total_fork_delay;
+ stats->total_sinbin = csrq->total_sinbin;
+ stats->nr_switches = rq->nr_switches;
+ spin_unlock_irq(&rq->lock);
+
+ /*
+ * Update idle/busy time to the current tick
+ */
+ if (idle)
+ stats->total_idle += (stats->timestamp - idle_timestamp);
+ else
+ stats->total_busy += (stats->timestamp - idle_timestamp);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(get_cpu_cpustats);
+
/**
* sys_sched_yield - yield the current processor to other threads.
*
@@ -3715,35 +3742,30 @@
asmlinkage long sys_sched_yield(void)
{
runqueue_t *rq = this_rq_lock();
- prio_array_t *array = current->array;
- prio_array_t *target = rq->expired;
schedstat_inc(rq, yld_cnt);
- /*
- * We implement yielding by moving the task into the expired
- * queue.
- *
- * (special rule: RT tasks will just roundrobin in the active
- * array.)
- */
- if (rt_task(current))
- target = rq->active;
-
- if (current->array->nr_active == 1) {
- schedstat_inc(rq, yld_act_empty);
- if (!rq->expired->nr_active)
- schedstat_inc(rq, yld_both_empty);
- } else if (!rq->expired->nr_active)
- schedstat_inc(rq, yld_exp_empty);
-
- if (array != target) {
- dequeue_task(current, array);
- enqueue_task(current, target);
- } else
- /*
- * requeue_task is cheaper so perform that if possible.
- */
- requeue_task(current, array);
+ /* If there's other tasks on this CPU make sure that at least
+ * one of them get some CPU before this task's next bite of the
+ * cherry. Dequeue before looking for the appropriate run
+ * queue so that we don't find our queue if we were the sole
+ * occupant of that queue.
+ */
+ dequeue_task(current);
+ /*
+ * special rule: RT tasks will just roundrobin.
+ */
+ if (likely(!rt_task(current))) {
+ int idx = find_next_bit(rq->bitmap, IDLE_PRIO, current->prio);
+ if (idx < IDLE_PRIO) {
+ if ((idx < BGND_PRIO) || task_is_bgnd(current))
+ current->prio = idx;
+ else
+ current->prio = BGND_PRIO - 1;
+ }
+ }
+ enqueue_task(current);
+ if (rq->nr_running == 1)
+ schedstat_inc(rq, yld_both_empty);
/*
* Since we are going to call schedule() anyway, there's
@@ -4054,9 +4076,14 @@
runqueue_t *rq = cpu_rq(cpu);
unsigned long flags;
- idle->sleep_avg = 0;
- idle->array = NULL;
- idle->prio = MAX_PRIO;
+ idle->prio = IDLE_PRIO;
+ /*
+ * Initialize scheduling statistics counters as they may provide
+ * valuable about the CPU e.g. avg_cpu_time_per_cycle for the idle
+ * task will be an estimate of the average time the CPU is idle.
+ * sched_init() may not be ready so use INITIAL_JIFFIES instead.
+ */
+ initialize_cpustats(idle, INITIAL_CPUSTATS_TIMESTAMP);
idle->state = TASK_RUNNING;
set_task_cpu(idle, cpu);
@@ -4153,6 +4180,7 @@
static void __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
{
runqueue_t *rq_dest, *rq_src;
+ unsigned long long now;
if (unlikely(cpu_is_offline(dest_cpu)))
return;
@@ -4168,20 +4196,17 @@
if (!cpu_isset(dest_cpu, p->cpus_allowed))
goto out;
- set_task_cpu(p, dest_cpu);
- if (p->array) {
- /*
- * Sync timestamp with rq_dest's before activating.
- * The same thing could be achieved by doing this step
- * afterwards, and pretending it was a local activate.
- * This way is cleaner and logically correct.
- */
- p->timestamp = p->timestamp - rq_src->timestamp_last_tick
- + rq_dest->timestamp_last_tick;
- deactivate_task(p, rq_src);
- activate_task(p, rq_dest, 0);
- if (TASK_PREEMPTS_CURR(p, rq_dest))
- resched_task(rq_dest->curr);
+ now = adjusted_sched_clock(p);
+ if (task_queued(p)) {
+ deactivate_task(p);
+ /* not the current task on its cpu so increment delay stats */
+ delta_delay_cpustats(p, now);
+ set_task_cpu(p, dest_cpu);
+ activate_task(p);
+ preempt_curr_if_warranted(p);
+ } else {
+ delta_sleep_cpustats(p, now);
+ set_task_cpu(p, dest_cpu);
}
out:
@@ -4353,7 +4378,7 @@
__setscheduler(p, SCHED_FIFO, MAX_RT_PRIO-1);
/* Add idle task to _front_ of it's priority queue */
- __activate_idle_task(p, rq);
+ __activate_idle_task(rq);
spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -4399,18 +4424,16 @@
/* release_task() removes task from tasklist, so we won't find dead tasks. */
static void migrate_dead_tasks(unsigned int dead_cpu)
{
- unsigned arr, i;
+ unsigned i;
struct runqueue *rq = cpu_rq(dead_cpu);
- for (arr = 0; arr < 2; arr++) {
- for (i = 0; i < MAX_PRIO; i++) {
- struct list_head *list = &rq->arrays[arr].queue[i];
- while (!list_empty(list))
- migrate_dead(dead_cpu,
- list_entry(list->next, task_t,
- run_list));
- }
+ for (i = 0; i < IDLE_PRIO; i++) {
+ struct list_head *list = &rq->queues[i].queue;
+ while (!list_empty(list))
+ migrate_dead(dead_cpu, list_entry(list->next, task_t, run_list));
}
+
+ return 0;
}
#endif /* CONFIG_HOTPLUG_CPU */
@@ -4457,8 +4480,8 @@
rq->migration_thread = NULL;
/* Idle task back to normal (off runqueue, low prio) */
rq = task_rq_lock(rq->idle, &flags);
- deactivate_task(rq->idle, rq);
- rq->idle->static_prio = MAX_PRIO;
+ deactivate_task(rq->idle);
+ rq->idle->static_prio = IDLE_PRIO;
__setscheduler(rq->idle, SCHED_NORMAL, 0);
migrate_dead_tasks(cpu);
task_rq_unlock(rq, &flags);
@@ -4952,16 +4975,11 @@
void __init sched_init(void)
{
runqueue_t *rq;
- int i, j, k;
+ int i, k, cpu;
for (i = 0; i < NR_CPUS; i++) {
- prio_array_t *array;
-
rq = cpu_rq(i);
spin_lock_init(&rq->lock);
- rq->active = rq->arrays;
- rq->expired = rq->arrays + 1;
- rq->best_expired_prio = MAX_PRIO;
#ifdef CONFIG_SMP
rq->sd = &sched_domain_dummy;
@@ -4973,17 +4991,26 @@
#endif
atomic_set(&rq->nr_iowait, 0);
- for (j = 0; j < 2; j++) {
- array = rq->arrays + j;
- for (k = 0; k < MAX_PRIO; k++) {
- INIT_LIST_HEAD(array->queue + k);
- __clear_bit(k, array->bitmap);
- }
- // delimiter for bitsearch
- __set_bit(MAX_PRIO, array->bitmap);
+ for (k = 0; k < IDLE_PRIO; k++) {
+ rq->queues[k].prio = k;
+ INIT_LIST_HEAD(&rq->queues[k].queue);
}
+
+ bitmap_zero(rq->bitmap, NUM_PRIO_SLOTS);
+ /* delimiter for bitsearch */
+ __set_bit(IDLE_PRIO, rq->bitmap);
+ init_runq_cpustats(i);
+ zaphod_init_cpu_runq_data(i);
+ rq->next_prom_due = ULONG_MAX;
+ rq->pcount = 0;
}
+ cpu = smp_processor_id();
+ /* make sure that these get set on single CPU systems */
+ current->csrq = cpu_runq_cpustats(cpu);
+ current->zrq = zaphod_cpu_runq_data(cpu);
+ current->rq = cpu_rq(cpu);
+
/*
* The boot idle thread does lazy MMU switching as well:
*/
@@ -4996,7 +5023,7 @@
* but because we are the idle thread, we just pick up running again
* when this runqueue becomes "idle".
*/
- init_idle(current, smp_processor_id());
+ init_idle(current, cpu);
}
#ifdef CONFIG_DEBUG_SPINLOCK_SLEEP
@@ -5025,7 +5052,7 @@
void normalize_rt_tasks(void)
{
struct task_struct *p;
- prio_array_t *array;
+ int queued;
unsigned long flags;
runqueue_t *rq;
@@ -5036,12 +5063,12 @@
rq = task_rq_lock(p, &flags);
- array = p->array;
- if (array)
- deactivate_task(p, task_rq(p));
+ queued = task_queued(p);
+ if (queued)
+ deactivate_task(p);
__setscheduler(p, SCHED_NORMAL, 0);
- if (array) {
- __activate_task(p, task_rq(p));
+ if (queued) {
+ __activate_task(p);
resched_task(rq->curr);
}
@@ -5051,3 +5078,98 @@
}
#endif /* CONFIG_MAGIC_SYSRQ */
+
+#if defined(CONFIG_SYSCTL)
+/*
+ * CPU scheduler control via /proc/sys/cpusched/xxx
+ */
+enum
+{
+ CPU_SCHED_END_OF_LIST=0,
+ CPU_SCHED_LOG_AT_EXIT,
+ CPU_SCHED_TIME_SLICE,
+ CPU_SCHED_SCHED_RR_TIME_SLICE,
+ CPU_SCHED_BASE_PROMOTION_INTERVAL,
+ CPU_SCHED_UNPRIV_RT_THRESHOLD,
+ CPU_SCHED_BGND_TIME_SLICE_MULTIPLIER,
+ CPU_SCHED_ZAPHOD,
+};
+
+
+static const unsigned int zero = 0;
+static const unsigned int one = 1;
+static const unsigned long min_time_slice = MIN_TIMESLICE;
+static const unsigned long max_time_slice = MAX_TIMESLICE;
+#define min_base_prom_interval min_time_slice
+static const unsigned long max_base_prom_interval = ULONG_MAX;
+
+ctl_table cpu_sched_table[] = {
+ {
+ .ctl_name = CPU_SCHED_LOG_AT_EXIT,
+ .procname = "log_at_exit",
+ .data = &log_at_exit,
+ .maxlen = sizeof (unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = (void *)&zero,
+ .extra2 = (void *)&one
+ },
+ {
+ .ctl_name = CPU_SCHED_TIME_SLICE,
+ .procname = "time_slice",
+ .data = &time_slice,
+ .maxlen = sizeof (unsigned long),
+ .mode = 0644,
+ .proc_handler = &proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&min_time_slice,
+ .extra2 = (void *)&max_time_slice
+ },
+ {
+ .ctl_name = CPU_SCHED_SCHED_RR_TIME_SLICE,
+ .procname = "sched_rr_time_slice",
+ .data = &sched_rr_time_slice,
+ .maxlen = sizeof (unsigned long),
+ .mode = 0644,
+ .proc_handler = &proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&min_time_slice,
+ .extra2 = (void *)&max_time_slice
+ },
+ {
+ .ctl_name = CPU_SCHED_BASE_PROMOTION_INTERVAL,
+ .procname = "base_promotion_interval",
+ .data = &base_prom_interval,
+ .maxlen = sizeof (unsigned long),
+ .mode = 0644,
+ .proc_handler = &proc_doulongvec_ms_jiffies_minmax,
+ .extra1 = (void *)&min_base_prom_interval,
+ .extra2 = (void *)&max_base_prom_interval
+ },
+ {
+ .ctl_name = CPU_SCHED_UNPRIV_RT_THRESHOLD,
+ .procname = "unpriv_rt_threshold",
+ .data = &unpriv_rt_threshold,
+ .maxlen = sizeof (unsigned long),
+ .mode = 0644,
+ .proc_handler = &do_proc_proportion,
+ .extra1 = NULL,
+ .extra2 = NULL
+ },
+ {
+ .ctl_name = CPU_SCHED_BGND_TIME_SLICE_MULTIPLIER,
+ .procname = "bgnd_time_slice_multiplier",
+ .data = &bgnd_time_slice_multiplier,
+ .maxlen = sizeof (unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .extra1 = (void *)&one,
+ .extra2 = (void *)&max_bgnd_time_slice_multiplier
+ },
+ {
+ .ctl_name = CPU_SCHED_ZAPHOD,
+ .procname = "zaphod",
+ .mode = 0555,
+ .child = zaphod_ctl_table,
+ },
+ { .ctl_name = CPU_SCHED_END_OF_LIST }
+};
+#endif
Index: 2.6.11/kernel/sched_cpustats.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ 2.6.11/kernel/sched_cpustats.c 2005-04-15 10:46:29.045185928 -0500
@@ -0,0 +1,399 @@
+/*
+ * kernel/sched_stats.c
+ *
+ * Kernel highe resolution cpu statistics for use by schedulers
+ *
+ * Copyright (C) 2004 Aurema Pty Ltd
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/sched.h>
+#include <linux/hardirq.h>
+
+static DEFINE_PER_CPU(struct runq_cpustats, cpustats_runqs);
+
+void init_runq_cpustats(unsigned int cpu)
+{
+ struct runq_cpustats *csrq = &per_cpu(cpustats_runqs, cpu);
+
+ csrq->total_delay = 0;
+ csrq->total_sinbin = 0;
+ csrq->total_rt_delay = 0;
+ csrq->total_intr_delay = 0;
+ csrq->total_rt_intr_delay = 0;
+ csrq->total_fork_delay = 0;
+#ifdef CONFIG_SMP
+ csrq->timestamp_last_tick = INITIAL_CPUSTATS_TIMESTAMP;
+#endif
+}
+
+#ifdef CONFIG_SMP
+unsigned long long adjusted_sched_clock(const task_t *p)
+{
+ struct runq_cpustats *trq = &__get_cpu_var(cpustats_runqs);
+
+ return sched_clock() + (RUNQ_CPUSTATS(p)->timestamp_last_tick - trq->timestamp_last_tick);
+}
+
+void set_task_runq_cpustats(struct task_struct *p, unsigned int cpu)
+{
+ unsigned long long oldts = RUNQ_CPUSTATS(p)->timestamp_last_tick;
+
+ RUNQ_CPUSTATS(p) = cpu_runq_cpustats(cpu);
+ TASK_CPUSTATS(p).timestamp += (RUNQ_CPUSTATS(p)->timestamp_last_tick - oldts);
+}
+#endif
+
+extern struct runq_cpustats *cpu_runq_cpustats(unsigned int cpu)
+{
+ return &per_cpu(cpustats_runqs, cpu);
+}
+
+void initialize_cpustats(struct task_struct *p, unsigned long long now)
+{
+ TASK_CPUSTATS(p).avg_sleep_per_cycle = 0;
+ TASK_CPUSTATS(p).avg_delay_per_cycle = 0;
+ TASK_CPUSTATS(p).avg_cpu_per_cycle = 0;
+ TASK_CPUSTATS(p).total_sleep = 0;
+ TASK_CPUSTATS(p).total_delay = 0;
+ TASK_CPUSTATS(p).total_sinbin = 0;
+ TASK_CPUSTATS(p).total_cpu = 0;
+ TASK_CPUSTATS(p).total_wake_ups = 0;
+ TASK_CPUSTATS(p).intr_wake_ups = 0;
+ TASK_CPUSTATS(p).avg_cycle_length = 0;
+ TASK_CPUSTATS(p).timestamp = now;
+ TASK_CPUSTATS(p).flags = CPUSTATS_JUST_FORKED_FL;
+}
+
+void delta_sleep_cpustats(struct task_struct *p, unsigned long long now)
+{
+ unsigned long long delta;
+
+ /* sched_clock() is not guaranteed monotonic */
+ if (now <= TASK_CPUSTATS(p).timestamp) {
+ TASK_CPUSTATS(p).timestamp = now;
+ return;
+ }
+
+ delta = now - TASK_CPUSTATS(p).timestamp;
+ TASK_CPUSTATS(p).timestamp = now;
+ TASK_CPUSTATS(p).avg_sleep_per_cycle += delta;
+ TASK_CPUSTATS(p).total_sleep += delta;
+}
+
+void delta_cpu_cpustats(struct task_struct *p, unsigned long long now)
+{
+ unsigned long long delta;
+
+ /* sched_clock() is not guaranteed monotonic */
+ if (now <= TASK_CPUSTATS(p).timestamp) {
+ TASK_CPUSTATS(p).timestamp = now;
+ return;
+ }
+
+ delta = now - TASK_CPUSTATS(p).timestamp;
+ TASK_CPUSTATS(p).timestamp = now;
+ TASK_CPUSTATS(p).avg_cpu_per_cycle += delta;
+ TASK_CPUSTATS(p).total_cpu += delta;
+}
+
+void delta_delay_cpustats(struct task_struct *p, unsigned long long now)
+{
+ unsigned long long delta;
+
+ /* sched_clock() is not guaranteed monotonic */
+ if (now <= TASK_CPUSTATS(p).timestamp) {
+ TASK_CPUSTATS(p).timestamp = now;
+ return;
+ }
+
+ delta = now - TASK_CPUSTATS(p).timestamp;
+ TASK_CPUSTATS(p).timestamp = now;
+ TASK_CPUSTATS(p).avg_delay_per_cycle += delta;
+ TASK_CPUSTATS(p).total_delay += delta;
+ RUNQ_CPUSTATS(p)->total_delay += delta;
+ if (task_is_sinbinned(p)) {
+ TASK_CPUSTATS(p).total_sinbin += delta;
+ RUNQ_CPUSTATS(p)->total_sinbin += delta;
+ } else if (rt_task(p)) { /* rt tasks are never sinbinned */
+ RUNQ_CPUSTATS(p)->total_rt_delay += delta;
+ if (TASK_CPUSTATS(p).flags & CPUSTATS_WOKEN_FOR_INTR_FL)
+ RUNQ_CPUSTATS(p)->total_rt_intr_delay += delta;
+ }
+ if (unlikely(TASK_CPUSTATS(p).flags & CPUSTATS_JUST_FORKED_FL)) {
+ RUNQ_CPUSTATS(p)->total_fork_delay += delta;
+ TASK_CPUSTATS(p).flags &= ~CPUSTATS_JUST_FORKED_FL;
+ }
+ if (TASK_CPUSTATS(p).flags & CPUSTATS_WOKEN_FOR_INTR_FL) {
+ RUNQ_CPUSTATS(p)->total_intr_delay += delta;
+ TASK_CPUSTATS(p).flags &= ~CPUSTATS_WOKEN_FOR_INTR_FL;
+ }
+}
+
+#define SCHED_AVG_ALPHA ((1 << SCHED_AVG_OFFSET) - 1)
+static inline void apply_sched_avg_decay(unsigned long long *valp)
+{
+ *valp *= SCHED_AVG_ALPHA;
+ *valp >>= SCHED_AVG_OFFSET;
+}
+
+static inline void decay_cpustats_for_cycle(struct task_struct *p)
+{
+ apply_sched_avg_decay(&TASK_CPUSTATS(p).avg_sleep_per_cycle);
+ apply_sched_avg_decay(&TASK_CPUSTATS(p).avg_delay_per_cycle);
+ apply_sched_avg_decay(&TASK_CPUSTATS(p).avg_cpu_per_cycle);
+ TASK_CPUSTATS(p).avg_cycle_length = TASK_CPUSTATS(p).avg_sleep_per_cycle +
+ TASK_CPUSTATS(p).avg_delay_per_cycle +
+ TASK_CPUSTATS(p).avg_cpu_per_cycle;
+ /* take short cut and avoid possible divide by zero below */
+ if (TASK_CPUSTATS(p).avg_cpu_per_cycle == 0)
+ TASK_CPUSTATS(p).cpu_usage_rate = 0;
+ else
+ TASK_CPUSTATS(p).cpu_usage_rate = calc_proportion(TASK_CPUSTATS(p).avg_cpu_per_cycle, TASK_CPUSTATS(p).avg_cycle_length);
+}
+
+void update_cpustats_at_wake_up(struct task_struct *p, unsigned long long now)
+{
+ delta_sleep_cpustats(p, now);
+ if (in_interrupt()) {
+ TASK_CPUSTATS(p).intr_wake_ups++;
+ TASK_CPUSTATS(p).flags |= CPUSTATS_WOKEN_FOR_INTR_FL;
+ }
+ TASK_CPUSTATS(p).total_wake_ups++;
+ decay_cpustats_for_cycle(p);
+}
+
+void update_cpustats_at_end_of_ts(struct task_struct *p, unsigned long long now)
+{
+ delta_cpu_cpustats(p, now);
+ decay_cpustats_for_cycle(p);
+}
+
+int task_sched_cpustats(struct task_struct *p, char *buffer)
+{
+ struct task_cpustats stats;
+ unsigned long nvcsw, nivcsw; /* context switch counts */
+ int result;
+
+ read_lock(&tasklist_lock);
+ result = get_task_cpustats(p, &stats);
+ nvcsw = p->nvcsw;
+ nivcsw = p-> nivcsw;
+ read_unlock(&tasklist_lock);
+ if (result)
+ return sprintf(buffer, "Data unavailable\n");
+ return sprintf(buffer,
+ "%llu %llu %llu %llu %llu %llu %lu %lu @ %llu\n",
+ stats.total_sleep,
+ stats.total_cpu,
+ stats.total_delay,
+ stats.total_sinbin,
+ stats.total_wake_ups,
+ stats.intr_wake_ups,
+ nvcsw, nivcsw,
+ stats.timestamp);
+}
+
+int cpustats_read_proc(char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int i;
+ int len = 0;
+ int avail = 1;
+ struct cpu_cpustats total = {0, };
+
+ for_each_online_cpu(i) {
+ struct cpu_cpustats stats;
+
+ if (get_cpu_cpustats(i, &stats) != 0) {
+ avail = 0;
+ break;
+ }
+ len += sprintf(page + len,
+ "cpu%02d %llu %llu %llu %llu %llu %llu %llu %llu @ %llu\n", i,
+ stats.total_idle,
+ stats.total_busy,
+ stats.total_delay,
+ stats.total_rt_delay,
+ stats.total_intr_delay,
+ stats.total_rt_intr_delay,
+ stats.total_sinbin,
+ stats.nr_switches,
+ stats.timestamp);
+ total.total_idle += stats.total_idle;
+ total.total_busy += stats.total_busy;
+ total.total_delay += stats.total_delay;
+ total.total_rt_delay += stats.total_rt_delay;
+ total.total_intr_delay += stats.total_intr_delay;
+ total.total_rt_intr_delay += stats.total_rt_intr_delay;
+ total.total_sinbin += stats.total_sinbin;
+ total.nr_switches += stats.nr_switches;
+ }
+ if (avail)
+ len += sprintf(page + len, "total %llu %llu %llu %llu %llu %llu %llu %llu\n",
+ total.total_idle,
+ total.total_busy,
+ total.total_delay,
+ total.total_intr_delay,
+ total.total_rt_delay,
+ total.total_rt_intr_delay,
+ total.total_sinbin,
+ total.nr_switches);
+ else
+ len = sprintf(page, "Data unavailable\n");
+
+ if (len <= off+count) *eof = 1;
+ *start = page + off;
+ len -= off;
+ if (len > count) len = count;
+ if (len < 0) len = 0;
+
+ return len;
+}
+
+static inline unsigned long long sched_div_64(unsigned long long a, unsigned long long b)
+{
+#if BITS_PER_LONG < 64
+ /*
+ * Assume that there's no 64 bit divide available
+ */
+ if (a < b)
+ return 0;
+ /*
+ * Scale down until b less than 32 bits so that we can do
+ * a divide using do_div()
+ */
+ while (b > ULONG_MAX) { a >>= 1; b >>= 1; }
+
+ (void)do_div(a, (unsigned long)b);
+
+ return a;
+#else
+ return a / b;
+#endif
+}
+
+/*
+ * CPU usage rate is estimated as a proportion of a CPU using fixed denominator
+ * rational numbers. The denominator must be less than 2^24 so that
+ * we can store the eb_yardstick in an atomic_t on sparc
+ */
+#if PROPORTION_OFFSET >= 24
+#error "PROPORTION_OFFSET must be less than 24"
+#endif
+#define PROPORTION_OVERFLOW ((1ULL << (64 - PROPORTION_OFFSET)) - 1)
+
+/*
+ * Convert a / b to a proportion in the range 0 to PROPORTION_ONE
+ * Requires a <= b or may get a divide by zero exception
+ */
+unsigned long calc_proportion(unsigned long long a, unsigned long long b)
+{
+ if (unlikely(a == b))
+ return PROPORTION_ONE;
+
+ while (a > PROPORTION_OVERFLOW) { a >>= 1; b >>= 1; }
+
+ return sched_div_64(a << PROPORTION_OFFSET, b);
+}
+
+/*
+ * Map the given proportion to an unsigned long in the specified range
+ * Requires range < PROPORTION_ONE to avoid overflow
+ */
+unsigned long map_proportion(unsigned long prop, unsigned long range)
+{
+ /* use 64 bits to help avoid overflow on 32 bit systems */
+ return ((unsigned long long)prop * (unsigned long long)range) >> PROPORTION_OFFSET;
+}
+
+/* WANT: proportion_to_ppt(ppt_to_proportion(x)) == x
+ */
+unsigned long proportion_to_ppt(unsigned long proportion)
+{
+ return ((unsigned long long)proportion * 2001ULL) >> (PROPORTION_OFFSET + 1);
+}
+
+unsigned long ppt_to_proportion(unsigned long ppt)
+{
+ return sched_div_64((unsigned long long)ppt * PROPORTION_ONE, 1000);
+}
+
+unsigned long avg_cpu_usage_rate(const struct task_struct *p)
+{
+ return TASK_CPUSTATS(p).cpu_usage_rate;
+}
+
+unsigned long avg_sleep_rate(const struct task_struct *p)
+{
+ /* take short cut and avoid possible divide by zero below */
+ if (TASK_CPUSTATS(p).avg_sleep_per_cycle == 0)
+ return 0;
+
+ return calc_proportion(TASK_CPUSTATS(p).avg_sleep_per_cycle, TASK_CPUSTATS(p).avg_cycle_length);
+}
+
+unsigned long avg_cpu_delay_rate(const struct task_struct *p)
+{
+ /* take short cut and avoid possible divide by zero below */
+ if (TASK_CPUSTATS(p).avg_delay_per_cycle == 0)
+ return 0;
+
+ return calc_proportion(TASK_CPUSTATS(p).avg_delay_per_cycle, TASK_CPUSTATS(p).avg_cycle_length);
+}
+
+unsigned long delay_in_jiffies_for_usage(const struct task_struct *p, unsigned long rur)
+{
+ unsigned long long acpc_jiffies, aspc_jiffies, res;
+
+ if (rur == 0)
+ return ULONG_MAX;
+
+ acpc_jiffies = sched_div_64(SCHED_AVG_RND(TASK_CPUSTATS(p).avg_cpu_per_cycle) * HZ, 1000000000);
+ aspc_jiffies = sched_div_64(SCHED_AVG_RND(TASK_CPUSTATS(p).avg_sleep_per_cycle) * HZ, 1000000000);
+
+ /*
+ * we have to be careful about overflow and/or underflow
+ */
+ while (unlikely(acpc_jiffies > PROPORTION_OVERFLOW)) {
+ acpc_jiffies >>= 1;
+ if (unlikely((rur >>= 1) == 0))
+ return ULONG_MAX;
+ }
+
+ res = sched_div_64(acpc_jiffies << PROPORTION_OFFSET, rur);
+ if (res > aspc_jiffies)
+ return res - aspc_jiffies;
+ else
+ return 0;
+}
+
+static int convert_proportion(unsigned long *val, void *data, int write)
+{
+ if (write) {
+ if (*val > 1000)
+ return -1;
+ *val = ppt_to_proportion(*val);
+ } else
+ *val = proportion_to_ppt(*val);
+
+ return 0;
+}
+
+int do_proc_proportion(ctl_table *ctp, int write, struct file *fp,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ return do_proc_doulongvec_convf_minmax(ctp, write, fp, buffer, lenp,
+ ppos, convert_proportion, NULL);
+}
Index: 2.6.11/kernel/sched_zaphod.c
===================================================================
--- /dev/null 1970-01-01 00:00:00.000000000 +0000
+++ 2.6.11/kernel/sched_zaphod.c 2005-04-15 10:46:00.864566604 -0500
@@ -0,0 +1,494 @@
+/*
+ * kernel/sched_zaphod.c
+ *
+ * CPU scheduler mode
+ *
+ * Copyright (C) 2004 Aurema Pty Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+#include <linux/sched.h>
+#include <linux/proc_fs.h>
+
+#include <asm/uaccess.h>
+
+#ifdef CONFIG_CPUSCHED_ZAPHOD
+#define MAX_PRIO ZAPHOD_MAX_PRIO
+#define MIN_NORMAL_PRIO ZAPHOD_MIN_NORMAL_PRIO
+#define IDLE_PRIO ZAPHOD_IDLE_PRIO
+#define BGND_PRIO ZAPHOD_BGND_PRIO
+#define TASK_ZD(p) (p)->u.zaphod.zaphod
+#define RUNQ_ZD(p) (p)->u.zaphod.zrq
+#define MIN_RATE_CAP(p) (p)->u.zaphod.min_cpu_rate_cap
+#define task_is_bgnd(p) (unlikely((p)->u.zaphod.cpu_rate_cap == 0))
+#else
+#define TASK_CPUSTATS(p) (p)->cpustats
+#define TASK_ZD(p) (p)->zaphod
+#define RUNQ_ZD(p) (p)->zrq
+#define MIN_RATE_CAP(p) (p)->min_cpu_rate_cap
+#endif
+
+#define EB_YARDSTICK_DECAY_INTERVAL 100
+
+enum zaphod_mode_enum {
+ ZAPHOD_MODE_PRIORITY_BASED,
+ ZAPHOD_MODE_ENTITLEMENT_BASED
+};
+
+static enum zaphod_mode_enum zaphod_mode = ZAPHOD_MODE_PRIORITY_BASED;
+
+#ifdef CONFIG_SYSCTL
+static const char *zaphod_mode_names[] = {
+ "pb", /* ZAPHOD_MODE_PRIORITY_BASED */
+ "eb", /* ZAPHOD_MODE_ENTITLEMENT_BASED */
+ NULL /* end of list marker */
+};
+#endif
+
+/*
+ * Convert nice to shares
+ * Proportional symmetry is aimed for: i.e.
+ * (nice_to_shares(0) / nice_to_shares(19)) == (nice_to_shares(-20) / nice_to_shares(0))
+ * Make sure that this function is robust for variations of EB_SHARES_PER_NICE
+ */
+static inline unsigned int nice_to_shares(int nice)
+{
+ unsigned int result = DEFAULT_EB_SHARES;
+
+ if (nice > 0)
+ result -= (nice * (20 * EB_SHARES_PER_NICE - 1)) / 19;
+ else if (nice < 0)
+ result += (nice * nice * ((20 * EB_SHARES_PER_NICE - 1) * EB_SHARES_PER_NICE)) / 20;
+
+ return result;
+}
+
+static inline int shares_to_nice(unsigned int shares)
+{
+ int result = 0;
+
+ if (shares > DEFAULT_EB_SHARES)
+ result = -int_sqrt((20 * (shares - DEFAULT_EB_SHARES)) /
+ (EB_SHARES_PER_NICE * (20 * EB_SHARES_PER_NICE - 1)));
+ else if (shares < DEFAULT_EB_SHARES)
+ result = (19 * (DEFAULT_EB_SHARES - shares)) /
+ (20 * EB_SHARES_PER_NICE - 1);
+
+ return result;
+}
+
+#define MAX_TOTAL_BONUS (BGND_PRIO - MAX_PRIO - 1)
+#define MAX_MAX_IA_BONUS ((MAX_TOTAL_BONUS + 1) / 2)
+#define MAX_MAX_TPT_BONUS (MAX_TOTAL_BONUS - MAX_MAX_IA_BONUS)
+#define DEFAULT_MAX_IA_BONUS ((MAX_MAX_IA_BONUS < 7) ? MAX_MAX_IA_BONUS : 7)
+#define DEFAULT_MAX_TPT_BONUS ((DEFAULT_MAX_IA_BONUS - 2) ? : 1)
+
+
+#define SCHED_IA_BONUS_OFFSET 8
+#define SCHED_IA_BONUS_ALPHA ((1 << SCHED_IA_BONUS_OFFSET) - 1)
+#define SCHED_IA_BONUS_MUL(a, b) (((a) * (b)) >> SCHED_IA_BONUS_OFFSET)
+/*
+ * Get the rounded integer value of the interactive bonus
+ */
+#define SCHED_IA_BONUS_RND(x) \
+ (((x) + (1 << (SCHED_IA_BONUS_OFFSET - 1))) >> (SCHED_IA_BONUS_OFFSET))
+
+unsigned int max_ia_bonus = DEFAULT_MAX_IA_BONUS;
+unsigned int max_max_ia_bonus = MAX_MAX_IA_BONUS;
+unsigned int initial_ia_bonus = 1;
+unsigned int max_tpt_bonus = DEFAULT_MAX_TPT_BONUS;
+unsigned int max_max_tpt_bonus = MAX_MAX_TPT_BONUS;
+
+/*
+ * Find the square root of a proportion
+ * Require: x <= PROPORTION_ONE
+ */
+static unsigned long proportion_sqrt(unsigned long x)
+{
+ /* use 64 bits internally to avoid overflow */
+ unsigned long long res, b, ulx;
+ int bshift;
+
+ /*
+ * Take shortcut AND prevent overflow
+ */
+ if (x == PROPORTION_ONE)
+ return PROPORTION_ONE;
+
+ res = 0;
+ b = (1UL << (PROPORTION_OFFSET - 1));
+ bshift = PROPORTION_OFFSET - 1;
+ ulx = x << PROPORTION_OFFSET;
+
+ for (; ulx && b; b >>= 1, bshift--) {
+ unsigned long long temp = (((res << 1) + b) << bshift);
+
+ if (ulx >= temp) {
+ res += b;
+ ulx -= temp;
+ }
+ }
+
+ return res;
+}
+
+/*
+ * Tasks that have a CPU usage rate greater than this threshold (in parts per
+ * thousand) are considered to be CPU bound and start to lose interactive bonus
+ * points
+ */
+#define DEFAULT_CPU_HOG_THRESHOLD 900
+unsigned long cpu_hog_threshold = PROP_FM_PPT(DEFAULT_CPU_HOG_THRESHOLD);
+
+/*
+ * Tasks that would sleep for more than 900 parts per thousand of the time if
+ * they had the CPU to themselves are considered to be interactive provided
+ * that their average sleep duration per scheduling cycle isn't too long
+ */
+#define DEFAULT_IA_THRESHOLD 900
+unsigned long ia_threshold = PROP_FM_PPT(DEFAULT_IA_THRESHOLD);
+#define LOWER_MAX_IA_SLEEP SCHED_AVG_REAL(15 * 60LL * NSEC_PER_SEC)
+#define UPPER_MAX_IA_SLEEP SCHED_AVG_REAL(2 * 60 * 60LL * NSEC_PER_SEC)
+
+/*
+ * Calculate CPU usage rate and sleepiness.
+ * This never gets called on real time tasks
+ */
+static unsigned long calc_sleepiness(task_t *p)
+{
+ unsigned long long bl;
+
+ bl = TASK_CPUSTATS(p).avg_sleep_per_cycle + TASK_CPUSTATS(p).avg_cpu_per_cycle;
+ /*
+ * Take a shortcut and avoid possible divide by zero
+ */
+ if (unlikely(bl == 0))
+ return PROPORTION_ONE;
+ else
+ return calc_proportion(TASK_CPUSTATS(p).avg_sleep_per_cycle, bl);
+}
+
+static inline void decay_sched_ia_bonus(struct task_struct *p)
+{
+ TASK_ZD(p).interactive_bonus *= SCHED_IA_BONUS_ALPHA;
+ TASK_ZD(p).interactive_bonus >>= SCHED_IA_BONUS_OFFSET;
+}
+
+/*
+ * Check whether a task with an interactive bonus still qualifies and if not
+ * decrease its bonus
+ * This never gets called on real time tasks
+ */
+static void reassess_cpu_boundness(task_t *p)
+{
+ if (max_ia_bonus == 0) {
+ TASK_ZD(p).interactive_bonus = 0;
+ return;
+ }
+ /*
+ * No point going any further if there's no bonus to lose
+ */
+ if (TASK_ZD(p).interactive_bonus == 0)
+ return;
+
+ if (TASK_CPUSTATS(p).cpu_usage_rate > cpu_hog_threshold)
+ decay_sched_ia_bonus(p);
+}
+
+/*
+ * Check whether a task qualifies for an interactive bonus and if it does
+ * increase its bonus
+ * This never gets called on real time tasks
+ */
+static void reassess_interactiveness(task_t *p)
+{
+ unsigned long sleepiness;
+
+ if (max_ia_bonus == 0) {
+ TASK_ZD(p).interactive_bonus = 0;
+ return;
+ }
+ /*
+ * No sleep means not interactive (in most cases), but
+ */
+ if (unlikely(TASK_CPUSTATS(p).avg_sleep_per_cycle > LOWER_MAX_IA_SLEEP)) {
+ /*
+ * Really long sleeps mean it's probably not interactive
+ */
+ if (unlikely(TASK_CPUSTATS(p).avg_sleep_per_cycle > UPPER_MAX_IA_SLEEP))
+ decay_sched_ia_bonus(p);
+ return;
+ }
+
+ sleepiness = calc_sleepiness(p);
+ if (sleepiness > ia_threshold) {
+ decay_sched_ia_bonus(p);
+ TASK_ZD(p).interactive_bonus += map_proportion_rnd(sleepiness, max_ia_bonus);
+ }
+}
+
+/*
+ * Check whether a task qualifies for a throughput bonus and if it does
+ * give it one
+ * This never gets called on real time tasks
+ */
+#define NRUN_AVG_OFFSET 6
+#define NRUN_AVG_ALPHA ((1 << NRUN_AVG_OFFSET) - 1)
+#define NRUN_AVG_ONE (1UL << NRUN_AVG_OFFSET)
+#define NRUN_AVG_MUL(a, b) (((a) * (b)) >> NRUN_AVG_OFFSET)
+static void recalc_throughput_bonus(task_t *p)
+{
+ unsigned long long ratio;
+ unsigned long long expected_delay;
+ unsigned long long adjusted_delay;
+ unsigned long long load = RUNQ_ZD(p)->avg_nr_running;
+
+ TASK_ZD(p).throughput_bonus = 0;
+ if (max_tpt_bonus == 0)
+ return;
+
+ if (load <= NRUN_AVG_ONE)
+ expected_delay = 0;
+ else
+ expected_delay = NRUN_AVG_MUL(TASK_CPUSTATS(p).avg_cpu_per_cycle, (load - NRUN_AVG_ONE));
+
+ /*
+ * No unexpected delay means no bonus, but
+ * NB this test also avoids a possible divide by zero error if
+ * cpu is also zero and negative bonuses
+ */
+ if (TASK_CPUSTATS(p).avg_delay_per_cycle <= expected_delay)
+ return;
+
+ adjusted_delay = TASK_CPUSTATS(p).avg_delay_per_cycle - expected_delay;
+ ratio = calc_proportion(adjusted_delay, adjusted_delay + TASK_CPUSTATS(p).avg_cpu_per_cycle);
+ ratio = proportion_sqrt(ratio);
+ TASK_ZD(p).throughput_bonus = map_proportion_rnd(ratio, max_tpt_bonus);
+}
+
+/*
+ * Calculate priority based priority (without bonuses).
+ * This never gets called on real time tasks
+ */
+static void calculate_pb_pre_bonus_priority(task_t *p)
+{
+ TASK_ZD(p).pre_bonus_priority = p->static_prio + MAX_TOTAL_BONUS;
+}
+
+/*
+ * We're just trying to protect a reading and writing of the yardstick.
+ * We not to fussed about protecting the calculation so the following is
+ * adequate
+ */
+static inline void decay_eb_yardstick(struct sched_zaphod_runq_data *zrq)
+{
+ static const unsigned long decay_per_interval = PROP_FM_PPT(990);
+ unsigned long curry = atomic_read(&zrq->eb_yardstick);
+ unsigned long pny; /* potential new yardstick */
+ struct task_struct *p = current;
+
+ curry = map_proportion(decay_per_interval, curry);
+ atomic_set(&zrq->eb_ticks_to_decay, EB_YARDSTICK_DECAY_INTERVAL);
+ if (unlikely(rt_task(p) || task_is_bgnd(p)))
+ goto out;
+ if (TASK_CPUSTATS(p).cpu_usage_rate < MIN_RATE_CAP(p))
+ pny = TASK_CPUSTATS(p).cpu_usage_rate / TASK_ZD(p).eb_shares;
+ else
+ pny = MIN_RATE_CAP(p) / TASK_ZD(p).eb_shares;
+ if (pny > curry)
+ curry = pny;
+out:
+ if (unlikely(curry >= PROPORTION_ONE))
+ curry = PROPORTION_ONE - 1;
+ atomic_set(&zrq->eb_yardstick, curry);
+}
+
+/*
+ * Calculate entitlement based priority (without bonuses).
+ * This never gets called on real time tasks
+ */
+#define EB_PAR 19
+static void calculate_eb_pre_bonus_priority(task_t *p)
+{
+ /*
+ * Prevent possible divide by zero and take shortcut
+ */
+ if (unlikely(MIN_RATE_CAP(p) == 0)) {
+ TASK_ZD(p).pre_bonus_priority = BGND_PRIO - 1;
+ } else if (TASK_CPUSTATS(p).cpu_usage_rate > MIN_RATE_CAP(p)) {
+ unsigned long cap_per_share = MIN_RATE_CAP(p) / TASK_ZD(p).eb_shares;
+ unsigned long prop = calc_proportion(MIN_RATE_CAP(p), TASK_CPUSTATS(p).cpu_usage_rate);
+
+ TASK_ZD(p).pre_bonus_priority = (BGND_PRIO - 1);
+ TASK_ZD(p).pre_bonus_priority -= map_proportion_rnd(prop, EB_PAR + 1);
+ if (cap_per_share > atomic_read(&RUNQ_ZD(p)->eb_yardstick)) {
+ if (likely(cap_per_share < PROPORTION_ONE))
+ atomic_set(&RUNQ_ZD(p)->eb_yardstick, cap_per_share);
+ else
+ atomic_set(&RUNQ_ZD(p)->eb_yardstick, PROPORTION_ONE - 1);
+ }
+
+ } else {
+ unsigned long usage_per_share = TASK_CPUSTATS(p).cpu_usage_rate / TASK_ZD(p).eb_shares;
+
+ if (usage_per_share > atomic_read(&RUNQ_ZD(p)->eb_yardstick)) {
+ if (likely(usage_per_share < PROPORTION_ONE))
+ atomic_set(&RUNQ_ZD(p)->eb_yardstick, usage_per_share);
+ else
+ atomic_set(&RUNQ_ZD(p)->eb_yardstick, PROPORTION_ONE - 1);
+ TASK_ZD(p).pre_bonus_priority = MAX_RT_PRIO + MAX_TOTAL_BONUS + EB_PAR;
+ } else {
+ unsigned long prop;
+
+ prop = calc_proportion(usage_per_share, atomic_read(&RUNQ_ZD(p)->eb_yardstick));
+ TASK_ZD(p).pre_bonus_priority = MAX_RT_PRIO + MAX_TOTAL_BONUS;
+ TASK_ZD(p).pre_bonus_priority += map_proportion_rnd(prop, EB_PAR);
+ }
+ }
+}
+
+static inline void calculate_pre_bonus_priority(task_t *p)
+{
+ if (zaphod_mode == ZAPHOD_MODE_ENTITLEMENT_BASED)
+ calculate_eb_pre_bonus_priority(p);
+ else
+ calculate_pb_pre_bonus_priority(p);
+}
+
+static DEFINE_PER_CPU(struct sched_zaphod_runq_data, zaphod_runqs);
+
+void zaphod_init_cpu_runq_data(unsigned int cpu)
+{
+ struct sched_zaphod_runq_data *zrq = &per_cpu(zaphod_runqs, cpu);
+
+ zrq->avg_nr_running = 0;
+ atomic_set(&zrq->eb_yardstick, 0);
+ atomic_set(&zrq->eb_ticks_to_decay, EB_YARDSTICK_DECAY_INTERVAL + cpu);
+}
+
+struct sched_zaphod_runq_data *zaphod_cpu_runq_data(unsigned int cpu)
+{
+ return &per_cpu(zaphod_runqs, cpu);
+}
+
+void zaphod_runq_data_tick(struct sched_zaphod_runq_data *zrq, unsigned long numr)
+{
+ unsigned long nval = NRUN_AVG_MUL(zrq->avg_nr_running, NRUN_AVG_ALPHA);
+ nval += numr;
+
+ zrq->avg_nr_running = nval;
+
+ if (atomic_dec_and_test(&zrq->eb_ticks_to_decay))
+ decay_eb_yardstick(zrq);
+}
+
+void zaphod_fork(struct task_struct *p)
+{
+ TASK_ZD(p).interactive_bonus = (max_ia_bonus >= initial_ia_bonus) ?
+ initial_ia_bonus : max_ia_bonus;
+ TASK_ZD(p).throughput_bonus = 0;
+}
+
+unsigned int zaphod_effective_prio(struct task_struct *p)
+{
+ unsigned int bonus = 0;
+
+ /* no bonuses for tasks that have exceeded their cap */
+ if (likely(TASK_CPUSTATS(p).cpu_usage_rate < MIN_RATE_CAP(p))) {
+ bonus = SCHED_IA_BONUS_RND(TASK_ZD(p).interactive_bonus);
+ bonus += TASK_ZD(p).throughput_bonus;
+ }
+
+ return TASK_ZD(p).pre_bonus_priority - bonus;
+}
+
+void zaphod_reassess_at_activation(struct task_struct *p)
+{
+ recalc_throughput_bonus(p);
+ reassess_interactiveness(p);
+ calculate_pre_bonus_priority(p);
+}
+
+void zaphod_reassess_at_end_of_ts(struct task_struct *p)
+{
+ recalc_throughput_bonus(p);
+ reassess_cpu_boundness(p);
+ /*
+ * Arguably the interactive bonus should be updated here
+ * as well. But depends on whether we wish to encourage
+ * interactive tasks to maintain a high bonus or CPU bound
+ * tasks to lose some of there bonus?
+ */
+ calculate_pre_bonus_priority(p);
+}
+
+void zaphod_reassess_at_sinbin_release(struct task_struct *p)
+{
+ calculate_pre_bonus_priority(p);
+}
+
+void zaphod_reassess_at_renice(struct task_struct *p)
+{
+ TASK_ZD(p).eb_shares = nice_to_shares(task_nice(p));
+ if (!rt_task(p))
+ calculate_pre_bonus_priority(p);
+}
+
+#if defined(CONFIG_SYSCTL)
+static const unsigned int zero = 0;
+
+#define ZAPHOD_MODE_BUFFER_LEN 16
+char current_zaphod_mode[ZAPHOD_MODE_BUFFER_LEN] = "";
+int proc_zaphod_mode(ctl_table *ctp, int write, struct file *fp,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int res;
+
+ strcpy(current_zaphod_mode, zaphod_mode_names[zaphod_mode]);
+ res = proc_dostring(ctp, write, fp, buffer, lenp, ppos);
+
+ if ((res == 0) && write) {
+ int i;
+
+ for (i = 0; zaphod_mode_names[i] != NULL; i++)
+ if (strcmp(current_zaphod_mode, zaphod_mode_names[i]) == 0)
+ break;
+ if (zaphod_mode_names[i] == NULL)
+ res = -EINVAL;
+ else /* set the zaphod mode */
+ zaphod_mode = i;
+
+ strcpy(current_zaphod_mode, zaphod_mode_names[zaphod_mode]);
+ }
+
+ return res;
+}
+
+#ifndef CONFIG_CPUSCHED_ZAPHOD
+/*
+ * CPU scheduler control via /proc/sys/cpusched/xxx
+ */
+enum
+{
+ CPU_SCHED_ZAPHOD_END_OF_LIST=0,
+ ZAPHOD_SYSCTL_FNS()
+};
+
+struct ctl_table zaphod_ctl_table[] = {
+ ZAPHOD_CTL_TABLE_INIT(),
+ { .ctl_name = CPU_SCHED_ZAPHOD_END_OF_LIST }
+};
+#endif
+#endif
Index: 2.6.11/kernel/sysctl.c
===================================================================
--- 2.6.11.orig/kernel/sysctl.c 2005-04-14 19:29:15.619974684 -0500
+++ 2.6.11/kernel/sysctl.c 2005-04-15 10:46:00.544473552 -0500
@@ -143,6 +143,7 @@
#ifdef CONFIG_UNIX98_PTYS
extern ctl_table pty_table[];
#endif
+extern ctl_table cpu_sched_table[];
#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
int sysctl_legacy_va_layout;
@@ -215,6 +216,12 @@
.mode = 0555,
.child = dev_table,
},
+ {
+ .ctl_name = CTL_CPU_SCHED,
+ .procname = "cpu_sched",
+ .mode = 0555,
+ .child = cpu_sched_table,
+ },
{ .ctl_name = 0 }
};
@@ -1706,12 +1713,29 @@
do_proc_dointvec_minmax_conv, &param);
}
-static int do_proc_doulongvec_minmax(ctl_table *table, int write,
+/**
+ * Function pointer type: sysctl_ul_conf_t
+ * Interface for a function that changes the of (e.g. scale) an unsigned long value
+ * @table: a pointer to the unsigned long variable whose value is to be changed
+ * @write: %TRUE if this is a write to the sysctl file
+ * @data: a pointer to (optional) data required by the function.
+ *
+ * This function should change the value of the supplied variable depending
+ * on whether the data has been written to the file or is to be read form the
+ * file
+ *
+ * This routine MAY do range (or other) checking (e.g. overflow) when write is
+ * %TRUE if it wishes and signal that the supplied data should be rejected by
+ * returning an non zero value.
+ *
+ * Should return 0 on success.
+ */
+int do_proc_doulongvec_convf_minmax(ctl_table *table, int write,
struct file *filp,
void __user *buffer,
size_t *lenp, loff_t *ppos,
- unsigned long convmul,
- unsigned long convdiv)
+ sysctl_ul_convf_t convf,
+ void *convdata)
{
#define TMPBUFLEN 21
unsigned long *i, *min, *max, val;
@@ -1719,19 +1743,19 @@
size_t len, left;
char buf[TMPBUFLEN], *p;
char __user *s = buffer;
-
+
if (!table->data || !table->maxlen || !*lenp ||
(*ppos && !write)) {
*lenp = 0;
return 0;
}
-
+
i = (unsigned long *) table->data;
min = (unsigned long *) table->extra1;
max = (unsigned long *) table->extra2;
vleft = table->maxlen / sizeof(unsigned long);
left = *lenp;
-
+
for (; left && vleft--; i++, min++, max++, first=0) {
if (write) {
while (left) {
@@ -1759,7 +1783,7 @@
}
if (*p < '0' || *p > '9')
break;
- val = simple_strtoul(p, &p, 0) * convmul / convdiv ;
+ val = simple_strtoul(p, &p, 0);
len = p-buf;
if ((len < left) && *p && !isspace(*p))
break;
@@ -1770,6 +1794,8 @@
if(neg)
continue;
+ if (convf && convf(&val, convdata, 1) != 0)
+ continue;
if ((min && val < *min) || (max && val > *max))
continue;
*i = val;
@@ -1777,7 +1803,10 @@
p = buf;
if (!first)
*p++ = '\t';
- sprintf(p, "%lu", convdiv * (*i) / convmul);
+ val = *i;
+ if (convf)
+ (void)convf(&val, convdata, 0);
+ sprintf(p, "%lu", val);
len = strlen(buf);
if (len > left)
len = left;
@@ -1811,6 +1840,55 @@
#undef TMPBUFLEN
}
+struct ul_scale_data {
+ unsigned long convmul;
+ unsigned long convdiv;
+};
+
+/* Try to provide some protection against overflow including intermediate
+ * overflow where possible (i.e. on 32 bit systems)
+ */
+static int ul_scale(unsigned long *val, void *data, int write)
+{
+ struct ul_scale_data *sdp = (struct ul_scale_data *)data;
+ unsigned long long tl;
+
+ if (write) {
+ tl = (*val) * sdp->convmul;
+ (void)do_div(tl, sdp->convdiv);
+ } else {
+ unsigned long rem;
+
+ tl = (*val) * sdp->convdiv;
+ rem = do_div(tl, sdp->convmul);
+ if (rem > (sdp->convmul >> 1))
+ tl++;
+ }
+
+ if (tl > ULONG_MAX)
+ return -1;
+
+ *val = tl;
+
+ return 0;
+}
+
+static int do_proc_doulongvec_minmax(ctl_table *table, int write,
+ struct file *filp,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos,
+ unsigned long convmul,
+ unsigned long convdiv)
+{
+ struct ul_scale_data sd;
+
+ sd.convmul = convmul;
+ sd.convdiv = convdiv;
+
+ return do_proc_doulongvec_convf_minmax(table, write, filp, buffer, lenp,
+ ppos, ul_scale, &sd);
+}
+
/**
* proc_doulongvec_minmax - read a vector of long integers with min/max values
* @table: the sysctl table
@@ -2053,6 +2131,15 @@
return -ENOSYS;
}
+int do_proc_doulongvec_convf_minmax(ctl_table *table, int write,
+ struct file *filp,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos,
+ sysctl_ul_convf_t convf,
+ void *convdata)
+{
+ return -ENOSYS;
+}
#endif /* CONFIG_PROC_FS */
@@ -2288,6 +2375,16 @@
return -ENOSYS;
}
+int do_proc_doulongvec_convf_minmax(ctl_table *table, int write,
+ struct file *filp,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos,
+ sysctl_ul_convf_t convf,
+ void *convdata)
+{
+ return -ENOSYS;
+}
+
struct ctl_table_header * register_sysctl_table(ctl_table * table,
int insert_at_head)
{
@@ -2310,6 +2407,7 @@
EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
EXPORT_SYMBOL(proc_dostring);
+EXPORT_SYMBOL(do_proc_doulongvec_convf_minmax);
EXPORT_SYMBOL(proc_doulongvec_minmax);
EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
EXPORT_SYMBOL(register_sysctl_table);