diff options
author | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-02-02 23:22:20 -0300 |
---|---|---|
committer | André Fabian Silva Delgado <emulatorman@parabola.nu> | 2016-02-02 23:22:20 -0300 |
commit | 5c545e1fb127a4b11ddc5f1a5ed066b853dd1a1a (patch) | |
tree | d4cd913bc79d37d32756a9bffbeedabf93e32579 /block | |
parent | b4b7ff4b08e691656c9d77c758fc355833128ac0 (diff) |
Linux-libre 4.4.1-gnupck-4.4.1-gnu
Diffstat (limited to 'block')
-rw-r--r-- | block/bfq-cgroup.c | 105 | ||||
-rw-r--r-- | block/bfq-iosched.c | 7 | ||||
-rw-r--r-- | block/bfq-sched.c | 9 | ||||
-rw-r--r-- | block/bfq.h | 8 |
4 files changed, 53 insertions, 76 deletions
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c index 7a6192007..5ee99ecbd 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c @@ -161,7 +161,9 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg) static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) { - return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq)); + struct blkg_policy_data *pd = blkg_to_pd(blkg, &blkcg_policy_bfq); + BUG_ON(!pd); + return pd_to_bfqg(pd); } /* @@ -396,7 +398,8 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) if (!bfqg) return NULL; - if (bfqg_stats_init(&bfqg->stats, gfp)) { + if (bfqg_stats_init(&bfqg->stats, gfp) || + bfqg_stats_init(&bfqg->dead_stats, gfp)) { kfree(bfqg); return NULL; } @@ -404,6 +407,20 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) return &bfqg->pd; } +static void bfq_group_set_parent(struct bfq_group *bfqg, + struct bfq_group *parent) +{ + struct bfq_entity *entity; + + BUG_ON(!parent); + BUG_ON(!bfqg); + BUG_ON(bfqg == parent); + + entity = &bfqg->entity; + entity->parent = parent->my_entity; + entity->sched_data = &parent->sched_data; +} + static void bfq_pd_init(struct blkg_policy_data *pd) { struct blkcg_gq *blkg = pd_to_blkg(pd); @@ -421,15 +438,16 @@ static void bfq_pd_init(struct blkg_policy_data *pd) bfqg->bfqd = bfqd; bfqg->active_entities = 0; bfqg->rq_pos_tree = RB_ROOT; - - /* if the root_group does not exist, we are handling it right now */ - if (bfqd->root_group && bfqg != bfqd->root_group) - hlist_add_head(&bfqg->bfqd_node, &bfqd->group_list); } static void bfq_pd_free(struct blkg_policy_data *pd) { - return kfree(pd_to_bfqg(pd)); + struct bfq_group *bfqg = pd_to_bfqg(pd); + + bfqg_stats_exit(&bfqg->stats); + bfqg_stats_exit(&bfqg->dead_stats); + + return kfree(bfqg); } /* offset delta from bfqg->stats to bfqg->dead_stats */ @@ -468,20 +486,6 @@ static void bfq_pd_reset_stats(struct blkg_policy_data *pd) bfqg_stats_reset(&bfqg->dead_stats); } -static void bfq_group_set_parent(struct bfq_group *bfqg, - struct bfq_group *parent) -{ - struct bfq_entity *entity; - - BUG_ON(!parent); - BUG_ON(!bfqg); - BUG_ON(bfqg == parent); - - entity = &bfqg->entity; - entity->parent = parent->my_entity; - entity->sched_data = &parent->sched_data; -} - static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, struct blkcg *blkcg) { @@ -721,11 +725,19 @@ static void bfq_reparent_active_entities(struct bfq_data *bfqd, static void bfq_pd_offline(struct blkg_policy_data *pd) { struct bfq_service_tree *st; - struct bfq_group *bfqg = pd_to_bfqg(pd); - struct bfq_data *bfqd = bfqg->bfqd; - struct bfq_entity *entity = bfqg->my_entity; + struct bfq_group *bfqg; + struct bfq_data *bfqd; + struct bfq_entity *entity; int i; + BUG_ON(!pd); + bfqg = pd_to_bfqg(pd); + BUG_ON(!bfqg); + bfqd = bfqg->bfqd; + BUG_ON(bfqd && !bfqd->root_group); + + entity = bfqg->my_entity; + if (!entity) /* root group */ return; @@ -734,8 +746,8 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) * deactivating the group itself. */ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) { + BUG_ON(!bfqg->sched_data.service_tree); st = bfqg->sched_data.service_tree + i; - /* * The idle tree may still contain bfq_queues belonging * to exited task because they never migrated to a different @@ -763,7 +775,6 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) BUG_ON(bfqg->sched_data.next_in_service); BUG_ON(bfqg->sched_data.in_service_entity); - hlist_del(&bfqg->bfqd_node); __bfq_deactivate_entity(entity, 0); bfq_put_async_queues(bfqd, bfqg); BUG_ON(entity->tree); @@ -773,46 +784,14 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) static void bfq_end_wr_async(struct bfq_data *bfqd) { - struct hlist_node *tmp; - struct bfq_group *bfqg; - - hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) - bfq_end_wr_async_queues(bfqd, bfqg); - bfq_end_wr_async_queues(bfqd, bfqd->root_group); -} - -/** - * bfq_disconnect_groups - disconnect @bfqd from all its groups. - * @bfqd: the device descriptor being exited. - * - * When the device exits we just make sure that no lookup can return - * the now unused group structures. They will be deallocated on cgroup - * destruction. - */ -static void bfq_disconnect_groups(struct bfq_data *bfqd) -{ - struct hlist_node *tmp; - struct bfq_group *bfqg; - - bfq_log(bfqd, "disconnect_groups beginning"); - hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) { - hlist_del(&bfqg->bfqd_node); - - __bfq_deactivate_entity(bfqg->my_entity, 0); + struct blkcg_gq *blkg; - /* - * Don't remove from the group hash, just set an - * invalid key. No lookups can race with the - * assignment as bfqd is being destroyed; this - * implies also that new elements cannot be added - * to the list. - */ - rcu_assign_pointer(bfqg->bfqd, NULL); + list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { + struct bfq_group *bfqg = blkg_to_bfqg(blkg); - bfq_log(bfqd, "disconnect_groups: put async for group %p", - bfqg); - bfq_put_async_queues(bfqd, bfqg); + bfq_end_wr_async_queues(bfqd, bfqg); } + bfq_end_wr_async_queues(bfqd, bfqd->root_group); } static u64 bfqio_cgroup_weight_read(struct cgroup_subsys_state *css, diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c index dbce1f83f..d1f648d05 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c @@ -3919,7 +3919,6 @@ static void bfq_exit_queue(struct elevator_queue *e) list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) bfq_deactivate_bfqq(bfqd, bfqq, 0); - bfq_disconnect_groups(bfqd); spin_unlock_irq(q->queue_lock); bfq_shutdown_timer_wq(bfqd); @@ -3930,6 +3929,8 @@ static void bfq_exit_queue(struct elevator_queue *e) #ifdef CONFIG_BFQ_GROUP_IOSCHED blkcg_deactivate_policy(q, &blkcg_policy_bfq); +#else + kfree(bfqd->root_group); #endif kfree(bfqd); @@ -4385,7 +4386,7 @@ static int __init bfq_init(void) if (ret) goto err_pol_unreg; - pr_info("BFQ I/O-scheduler: v7r10"); + pr_info("BFQ I/O-scheduler: v7r11"); return 0; @@ -4408,5 +4409,5 @@ static void __exit bfq_exit(void) module_init(bfq_init); module_exit(bfq_exit); -MODULE_AUTHOR("Fabio Checconi, Paolo Valente"); +MODULE_AUTHOR("Arianna Avanzini, Fabio Checconi, Paolo Valente"); MODULE_LICENSE("GPL"); diff --git a/block/bfq-sched.c b/block/bfq-sched.c index 9328a1f09..a64fec119 100644 --- a/block/bfq-sched.c +++ b/block/bfq-sched.c @@ -839,13 +839,16 @@ static void bfq_activate_entity(struct bfq_entity *entity) static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue) { struct bfq_sched_data *sd = entity->sched_data; - struct bfq_service_tree *st = bfq_entity_service_tree(entity); - int was_in_service = entity == sd->in_service_entity; + struct bfq_service_tree *st; + int was_in_service; int ret = 0; - if (!entity->on_st) + if (sd == NULL || !entity->on_st) /* never activated, or inactive */ return 0; + st = bfq_entity_service_tree(entity); + was_in_service = entity == sd->in_service_entity; + BUG_ON(was_in_service && entity->tree); if (was_in_service) { diff --git a/block/bfq.h b/block/bfq.h index 97a677f8c..32dfceead 100644 --- a/block/bfq.h +++ b/block/bfq.h @@ -1,5 +1,5 @@ /* - * BFQ-v7r10 for 4.4.0: data structures and common functions prototypes. + * BFQ-v7r11 for 4.4.0: data structures and common functions prototypes. * * Based on ideas and code from CFQ: * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> @@ -421,7 +421,6 @@ enum bfq_device_speed { * @peak_rate_samples: number of samples used to calculate @peak_rate. * @bfq_max_budget: maximum budget allotted to a bfq_queue before * rescheduling. - * @group_list: list of all the bfq_groups active on the device. * @active_list: list of all the bfq_queues active on the device. * @idle_list: list of all the bfq_queues idle on the device. * @bfq_fifo_expire: timeout for async/sync requests; when it expires @@ -526,7 +525,6 @@ struct bfq_data { u64 peak_rate; int bfq_max_budget; - struct hlist_head group_list; struct list_head active_list; struct list_head idle_list; @@ -702,8 +700,6 @@ struct bfq_group_data { * @entity: schedulable entity to insert into the parent group sched_data. * @sched_data: own sched_data, to contain child entities (they may be * both bfq_queues and bfq_groups). - * @bfqd_node: node to be inserted into the @bfqd->group_list list - * of the groups active on the same device; used for cleanup. * @bfqd: the bfq_data for the device this group acts upon. * @async_bfqq: array of async queues for all the tasks belonging to * the group, one queue per ioprio value per ioprio_class, @@ -737,8 +733,6 @@ struct bfq_group { struct bfq_entity entity; struct bfq_sched_data sched_data; - struct hlist_node bfqd_node; - void *bfqd; struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR]; |