416 lines
12 KiB
Diff
416 lines
12 KiB
Diff
|
|
Change Log
|
|
0.2+ 2/17/05
|
|
- Limited read_batch_expire not being greater then write_expire.
|
|
- Limited write_batch_expire not being greater then read_expire.
|
|
- Allowed antic_expire to go down to zero to cause it to go into
|
|
deadline mode
|
|
|
|
---
|
|
|
|
|
|
diff -puN drivers/block/Kconfig.iosched~genetic-as-sched drivers/block/Kconfig.iosched
|
|
--- linux-2.6.10/drivers/block/Kconfig.iosched~genetic-as-sched Mon Feb 21 08:38:40 2005
|
|
+++ linux-2.6.10-moilanen/drivers/block/Kconfig.iosched Mon Feb 21 08:38:40 2005
|
|
@@ -38,4 +38,13 @@ config IOSCHED_CFQ
|
|
among all processes in the system. It should provide a fair
|
|
working environment, suitable for desktop systems.
|
|
|
|
+config GENETIC_IOSCHED_AS
|
|
+ bool "Genetic Anticipatory I/O scheduler (EXPERIMENTAL)"
|
|
+ depends on IOSCHED_AS && GENETIC_LIB && EXPERIMENTAL
|
|
+ default n
|
|
+ ---help---
|
|
+ This will use a genetic algorithm to tweak the tunables of the
|
|
+ anticipatory scheduler autonomically and will adapt tunables
|
|
+ depending on the present workload.
|
|
+
|
|
endmenu
|
|
diff -puN drivers/block/as-iosched.c~genetic-as-sched drivers/block/as-iosched.c
|
|
--- linux-2.6.10/drivers/block/as-iosched.c~genetic-as-sched Mon Feb 21 08:38:40 2005
|
|
+++ linux-2.6.10-moilanen/drivers/block/as-iosched.c Wed Feb 23 14:11:15 2005
|
|
@@ -20,6 +20,8 @@
|
|
#include <linux/hash.h>
|
|
#include <linux/rbtree.h>
|
|
#include <linux/interrupt.h>
|
|
+#include <linux/genetic.h>
|
|
+#include <linux/random.h>
|
|
|
|
#define REQ_SYNC 1
|
|
#define REQ_ASYNC 0
|
|
@@ -67,6 +69,8 @@
|
|
*/
|
|
#define MAX_THINKTIME (HZ/50UL)
|
|
|
|
+unsigned long max_thinktime = MAX_THINKTIME;
|
|
+
|
|
/* Bits in as_io_context.state */
|
|
enum as_io_states {
|
|
AS_TASK_RUNNING=0, /* Process has not exitted */
|
|
@@ -83,6 +87,94 @@ enum anticipation_status {
|
|
* or timed out */
|
|
};
|
|
|
|
+#ifdef CONFIG_GENETIC_IOSCHED_AS
|
|
+
|
|
+struct disk_stats_snapshot * as_stats_snapshot;
|
|
+
|
|
+extern void disk_stats_snapshot(phenotype_t * pt);
|
|
+
|
|
+static void as_num_ops_create_child(genetic_child_t * child);
|
|
+static void as_throughput_create_child(genetic_child_t * child);
|
|
+static void as_latency_create_child(genetic_child_t * child);
|
|
+static void as_general_create_child(genetic_child_t * child);
|
|
+
|
|
+static void as_general_set_child_genes(void * in_genes);
|
|
+
|
|
+static void as_num_ops_calc_fitness(genetic_child_t * child);
|
|
+static void as_throughput_calc_fitness(genetic_child_t * child);
|
|
+static void as_latency_calc_fitness(genetic_child_t * child);
|
|
+
|
|
+static void as_general_calc_post_fitness(phenotype_t * in_pt);
|
|
+
|
|
+static void as_shift_mutation_rate(phenotype_t * in_pt);
|
|
+
|
|
+struct genetic_ops as_num_ops_genetic_ops = {
|
|
+ .create_child = as_num_ops_create_child,
|
|
+ .calc_fitness = as_num_ops_calc_fitness,
|
|
+};
|
|
+
|
|
+struct genetic_ops as_throughput_genetic_ops = {
|
|
+ .create_child = as_throughput_create_child,
|
|
+ .calc_fitness = as_throughput_calc_fitness,
|
|
+};
|
|
+
|
|
+struct genetic_ops as_latency_genetic_ops = {
|
|
+ .create_child = as_latency_create_child,
|
|
+ .calc_fitness = as_latency_calc_fitness,
|
|
+};
|
|
+
|
|
+struct genetic_ops as_general_genetic_ops = {
|
|
+ .create_child = as_general_create_child,
|
|
+ .set_child_genes = as_general_set_child_genes,
|
|
+ .combine_genes = genetic_generic_combine_genes,
|
|
+ .mutate_child = genetic_generic_mutate_child,
|
|
+ .calc_post_fitness = as_general_calc_post_fitness,
|
|
+ .take_snapshot = disk_stats_snapshot,
|
|
+ .shift_mutation_rate = as_shift_mutation_rate
|
|
+};
|
|
+
|
|
+#define AS_NUM_CHILDREN 8
|
|
+
|
|
+#define AS_NUM_OPS_UID 1
|
|
+#define AS_NUM_OPS_NUM_GENES 0
|
|
+
|
|
+#define AS_THROUGHPUT_UID 2
|
|
+#define AS_THROUGHPUT_NUM_GENES 0
|
|
+
|
|
+#define AS_LATENCY_UID 4
|
|
+#define AS_LATENCY_NUM_GENES 0
|
|
+
|
|
+#define AS_GENERAL_UID (AS_NUM_OPS_UID | AS_THROUGHPUT_UID | AS_LATENCY_UID)
|
|
+#define AS_GENERAL_NUM_GENES 7
|
|
+struct as_genes {
|
|
+ unsigned long read_expire;
|
|
+ unsigned long write_expire;
|
|
+ unsigned long read_batch_expire;
|
|
+ unsigned long write_batch_expire;
|
|
+ unsigned long antic_expire;
|
|
+ unsigned long max_thinktime;
|
|
+ unsigned long nr_requests;
|
|
+};
|
|
+
|
|
+gene_param_t as_gene_param[AS_GENERAL_NUM_GENES] = {
|
|
+ { HZ/16, 3*HZ/16, default_read_expire, 0}, /* read_expire */
|
|
+ { HZ/8, 3*HZ/8, default_write_expire, 0}, /* write_expire */
|
|
+ { HZ/4, 3*HZ/4, default_read_batch_expire, 0}, /* read_batch_expire */
|
|
+ { HZ/16, 3*HZ/16, default_write_batch_expire, 0},/* write_batch_expire */
|
|
+// { HZ/300, HZ/100, default_antic_expire, 0}, /* default_antic_expire */
|
|
+ { 0, HZ/100, default_antic_expire, 0}, /* default_antic_expire */
|
|
+ { HZ/100, 3*HZ/100, MAX_THINKTIME, 0}, /* max_thinktime */
|
|
+ { BLKDEV_MIN_RQ, BLKDEV_MAX_RQ*30, BLKDEV_MAX_RQ, genetic_generic_iterative_mutate_gene} /* nr_requests */
|
|
+};
|
|
+
|
|
+extern void disk_stats_snapshot(phenotype_t * pt);
|
|
+extern unsigned long disk_num_ops_calc_fitness(genetic_child_t * child);
|
|
+extern unsigned long disk_throughput_calc_fitness(genetic_child_t * child);
|
|
+extern unsigned long disk_latency_calc_fitness(genetic_child_t * child);
|
|
+
|
|
+LIST_HEAD(as_data_list);
|
|
+#endif
|
|
+
|
|
struct as_data {
|
|
/*
|
|
* run time data
|
|
@@ -132,6 +224,9 @@ struct as_data {
|
|
unsigned long fifo_expire[2];
|
|
unsigned long batch_expire[2];
|
|
unsigned long antic_expire;
|
|
+#ifdef CONFIG_GENETIC_IOSCHED_AS
|
|
+ struct list_head data_list;
|
|
+#endif
|
|
};
|
|
|
|
#define list_entry_fifo(ptr) list_entry((ptr), struct as_rq, fifo)
|
|
@@ -869,7 +964,7 @@ static void as_update_iohist(struct as_d
|
|
if (test_bit(AS_TASK_IORUNNING, &aic->state)
|
|
&& in_flight == 0) {
|
|
thinktime = jiffies - aic->last_end_request;
|
|
- thinktime = min(thinktime, MAX_THINKTIME-1);
|
|
+ thinktime = min(thinktime, max_thinktime-1);
|
|
} else
|
|
thinktime = 0;
|
|
as_update_thinktime(ad, aic, thinktime);
|
|
@@ -1854,6 +1949,11 @@ static void as_exit_queue(elevator_t *e)
|
|
|
|
mempool_destroy(ad->arq_pool);
|
|
put_io_context(ad->io_context);
|
|
+
|
|
+#ifdef CONFIG_GENETIC_IOSCHED_AS
|
|
+ list_del(&ad->data_list);
|
|
+#endif
|
|
+
|
|
kfree(ad->hash);
|
|
kfree(ad);
|
|
}
|
|
@@ -1916,6 +2016,10 @@ static int as_init_queue(request_queue_t
|
|
if (ad->write_batch_count < 2)
|
|
ad->write_batch_count = 2;
|
|
|
|
+#ifdef CONFIG_GENETIC_IOSCHED_AS
|
|
+ list_add_tail(&ad->data_list, &as_data_list);
|
|
+#endif
|
|
+
|
|
return 0;
|
|
}
|
|
|
|
@@ -2099,6 +2203,9 @@ static struct elevator_type iosched_as =
|
|
static int __init as_init(void)
|
|
{
|
|
int ret;
|
|
+#ifdef CONFIG_GENETIC_IOSCHED_AS
|
|
+ genetic_t * genetic = 0;
|
|
+#endif
|
|
|
|
arq_pool = kmem_cache_create("as_arq", sizeof(struct as_rq),
|
|
0, 0, NULL, NULL);
|
|
@@ -2107,6 +2214,36 @@ static int __init as_init(void)
|
|
|
|
ret = elv_register(&iosched_as);
|
|
if (!ret) {
|
|
+
|
|
+#ifdef CONFIG_GENETIC_IOSCHED_AS
|
|
+ as_stats_snapshot = (struct disk_stats_snapshot *)kmalloc(sizeof(struct disk_stats_snapshot), GFP_KERNEL);
|
|
+ if (!as_stats_snapshot)
|
|
+ panic("as: failed to malloc enough space");
|
|
+
|
|
+
|
|
+ ret = genetic_init(&genetic, AS_NUM_CHILDREN, 2 * HZ, "as-ioscheduler");
|
|
+ if (ret)
|
|
+ panic("as: failed to init genetic lib");
|
|
+
|
|
+ if(genetic_register_phenotype(genetic, &as_num_ops_genetic_ops, AS_NUM_CHILDREN,
|
|
+ "num_ops", AS_NUM_OPS_NUM_GENES, AS_NUM_OPS_UID))
|
|
+ panic("as: failed to register num_ops phenotype");
|
|
+
|
|
+ if(genetic_register_phenotype(genetic, &as_throughput_genetic_ops, AS_NUM_CHILDREN,
|
|
+ "throughput", AS_THROUGHPUT_NUM_GENES, AS_THROUGHPUT_UID))
|
|
+ panic("as: failed to register throughput phenotype");
|
|
+
|
|
+ if(genetic_register_phenotype(genetic, &as_latency_genetic_ops, AS_NUM_CHILDREN,
|
|
+ "latency", AS_LATENCY_NUM_GENES, AS_LATENCY_UID))
|
|
+ panic("as: failed to register latency phenotype");
|
|
+
|
|
+ if(genetic_register_phenotype(genetic, &as_general_genetic_ops, AS_NUM_CHILDREN,
|
|
+ "general", AS_GENERAL_NUM_GENES, AS_GENERAL_UID))
|
|
+ panic("as: failed to register general phenotype");
|
|
+
|
|
+ genetic_start(genetic);
|
|
+#endif
|
|
+
|
|
/*
|
|
* don't allow AS to get unregistered, since we would have
|
|
* to browse all tasks in the system and release their
|
|
@@ -2126,6 +2263,180 @@ static void __exit as_exit(void)
|
|
elv_unregister(&iosched_as);
|
|
}
|
|
|
|
+#ifdef CONFIG_GENETIC_IOSCHED_AS
|
|
+
|
|
+static void as_num_ops_create_child(genetic_child_t * child)
|
|
+{
|
|
+ BUG_ON(!child);
|
|
+
|
|
+ child->genes = 0;
|
|
+ child->gene_param = 0;
|
|
+ child->num_genes = AS_NUM_OPS_NUM_GENES;
|
|
+ child->stats_snapshot = as_stats_snapshot;
|
|
+}
|
|
+
|
|
+static void as_throughput_create_child(genetic_child_t * child)
|
|
+{
|
|
+ BUG_ON(!child);
|
|
+
|
|
+ child->genes = 0;
|
|
+ child->gene_param = 0;
|
|
+ child->num_genes = AS_THROUGHPUT_NUM_GENES;
|
|
+ child->stats_snapshot = as_stats_snapshot;
|
|
+}
|
|
+
|
|
+static void as_latency_create_child(genetic_child_t * child)
|
|
+{
|
|
+ BUG_ON(!child);
|
|
+
|
|
+ child->genes = 0;
|
|
+ child->gene_param = 0;
|
|
+ child->num_genes = AS_LATENCY_NUM_GENES;
|
|
+ child->stats_snapshot = as_stats_snapshot;
|
|
+}
|
|
+
|
|
+/* need to create the genes for the child */
|
|
+static void as_general_create_child(genetic_child_t * child)
|
|
+{
|
|
+ BUG_ON(!child);
|
|
+
|
|
+ child->genes = (void *)kmalloc(sizeof(struct as_genes), GFP_KERNEL);
|
|
+ if (!child->genes)
|
|
+ panic("as_general_create_child: error mallocing space");
|
|
+
|
|
+ child->gene_param = as_gene_param;
|
|
+ child->num_genes = AS_GENERAL_NUM_GENES;
|
|
+ child->stats_snapshot = as_stats_snapshot;
|
|
+
|
|
+ genetic_create_child_spread(child, AS_NUM_CHILDREN-1);
|
|
+
|
|
+ ((struct as_genes *)child->genes)->nr_requests = BLKDEV_MAX_RQ;
|
|
+}
|
|
+
|
|
+static void as_shift_mutation_rate(phenotype_t * in_pt)
|
|
+{
|
|
+ struct list_head * p;
|
|
+ phenotype_t * pt;
|
|
+ int count = 0;
|
|
+ long rate = 0;
|
|
+
|
|
+ list_for_each(p, &in_pt->genetic->phenotype) {
|
|
+ pt = list_entry(p, phenotype_t, phenotype);
|
|
+
|
|
+ /* Look at everyone else that contributes to this
|
|
+ phenotype */
|
|
+ if (pt->uid & AS_GENERAL_UID && pt->uid != AS_GENERAL_UID) {
|
|
+
|
|
+ switch (pt->uid) {
|
|
+ case AS_NUM_OPS_UID:
|
|
+ case AS_THROUGHPUT_UID:
|
|
+ case AS_LATENCY_UID:
|
|
+ rate += pt->mutation_rate;
|
|
+ count++;
|
|
+ break;
|
|
+ default:
|
|
+ BUG();
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* If we are a general phenotype that is made up of other
|
|
+ phenotypes then we take the average */
|
|
+ if (count)
|
|
+ in_pt->mutation_rate = (rate / count);
|
|
+ else
|
|
+ BUG();
|
|
+}
|
|
+
|
|
+static void as_general_set_child_genes(void * in_genes)
|
|
+{
|
|
+ struct as_genes * genes = (struct as_genes *)in_genes;
|
|
+ struct list_head * d;
|
|
+ struct as_data * ad;
|
|
+
|
|
+ list_for_each(d, &as_data_list) {
|
|
+ ad = list_entry(d, struct as_data, data_list);
|
|
+ ad->fifo_expire[REQ_SYNC] = genes->read_expire;
|
|
+ ad->fifo_expire[REQ_ASYNC] = genes->write_expire;
|
|
+ ad->antic_expire = genes->antic_expire;
|
|
+
|
|
+ if (genes->read_batch_expire > genes->write_expire)
|
|
+ genes->read_batch_expire = genes->write_expire;
|
|
+ ad->batch_expire[REQ_SYNC] = genes->read_batch_expire;
|
|
+
|
|
+ if (genes->write_batch_expire > genes->read_expire)
|
|
+ genes->write_batch_expire = genes->read_expire;
|
|
+ ad->batch_expire[REQ_ASYNC] = genes->write_batch_expire;
|
|
+
|
|
+ ad->q->nr_requests = genes->nr_requests;
|
|
+ }
|
|
+ max_thinktime = genes->max_thinktime;
|
|
+
|
|
+}
|
|
+
|
|
+static void as_num_ops_calc_fitness(genetic_child_t * child)
|
|
+{
|
|
+ child->fitness = disk_num_ops_calc_fitness(child);
|
|
+}
|
|
+
|
|
+static void as_throughput_calc_fitness(genetic_child_t * child)
|
|
+{
|
|
+ child->fitness = disk_throughput_calc_fitness(child);
|
|
+}
|
|
+
|
|
+static void as_latency_calc_fitness(genetic_child_t * child)
|
|
+{
|
|
+ child->fitness = disk_latency_calc_fitness(child);
|
|
+}
|
|
+
|
|
+/* Make the general the one that takes into account all the fitness
|
|
+ * routines, since these are the common genes that effect everything.
|
|
+ */
|
|
+static void as_general_calc_post_fitness(phenotype_t * in_pt)
|
|
+{
|
|
+ struct list_head * p;
|
|
+ phenotype_t * pt;
|
|
+ genetic_t * genetic = in_pt->genetic;
|
|
+ int ranking[AS_NUM_CHILDREN];
|
|
+ int weight = 1;
|
|
+ int i;
|
|
+
|
|
+ memset(ranking, 0, sizeof(ranking));
|
|
+
|
|
+ list_for_each(p, &genetic->phenotype) {
|
|
+ pt = list_entry(p, phenotype_t, phenotype);
|
|
+
|
|
+ /* Look at everyone else that contributes to this
|
|
+ phenotype */
|
|
+ if (pt->uid & AS_GENERAL_UID && pt->uid != AS_GENERAL_UID) {
|
|
+
|
|
+ switch (pt->uid) {
|
|
+ case AS_NUM_OPS_UID:
|
|
+ weight = 2;
|
|
+ break;
|
|
+ case AS_THROUGHPUT_UID:
|
|
+ weight = 2;
|
|
+ break;
|
|
+ case AS_LATENCY_UID:
|
|
+ weight = 1;
|
|
+ break;
|
|
+ default:
|
|
+ BUG();
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < pt->num_children; i++)
|
|
+ ranking[pt->child_ranking[i]->id] += (i * weight);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < in_pt->num_children; i++)
|
|
+ in_pt->child_ranking[i]->fitness = ranking[i];
|
|
+
|
|
+}
|
|
+
|
|
+#endif
|
|
+
|
|
+
|
|
module_init(as_init);
|
|
module_exit(as_exit);
|
|
|
|
|
|
_
|