BLD 추가 / 설정 변경:
This commit is contained in:
parent
db05a1ebec
commit
876c08a90f
5
PKGBUILD
5
PKGBUILD
|
@ -9,7 +9,7 @@ pkgname=$pkgbase
|
||||||
pkgdesc="The Linux Kernel and modules from Linus' git tree"
|
pkgdesc="The Linux Kernel and modules from Linus' git tree"
|
||||||
depends=('coreutils' 'linux-firmware-git' 'mkinitcpio')
|
depends=('coreutils' 'linux-firmware-git' 'mkinitcpio')
|
||||||
|
|
||||||
pkgver=4.5.rc6
|
pkgver=4.8.rc8
|
||||||
pkgrel=1
|
pkgrel=1
|
||||||
url="http://www.kernel.org/"
|
url="http://www.kernel.org/"
|
||||||
arch=(i686 x86_64)
|
arch=(i686 x86_64)
|
||||||
|
@ -17,13 +17,14 @@ license=('GPL2')
|
||||||
makedepends=(git bc)
|
makedepends=(git bc)
|
||||||
options=(!strip)
|
options=(!strip)
|
||||||
source=($pkgname::git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git)
|
source=($pkgname::git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git)
|
||||||
|
#source=($pkgname::git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git)
|
||||||
md5sums=('SKIP')
|
md5sums=('SKIP')
|
||||||
sha256sums=('SKIP')
|
sha256sums=('SKIP')
|
||||||
|
|
||||||
# set _gitrev to a git revision (man gitrevisions) like a tag, a commit sha1
|
# set _gitrev to a git revision (man gitrevisions) like a tag, a commit sha1
|
||||||
# hash or a branch name to build from this tree instead of master
|
# hash or a branch name to build from this tree instead of master
|
||||||
|
|
||||||
_gitrev="v4.4.4"
|
_gitrev="v4.7.5"
|
||||||
|
|
||||||
####################################################################
|
####################################################################
|
||||||
# KERNEL CONFIG FILES
|
# KERNEL CONFIG FILES
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -1,5 +1,5 @@
|
||||||
pkgname=linux-spica
|
pkgname=linux-spica
|
||||||
kernver=4.4.4-1spica-dirty
|
kernver=4.7.5-1spica-dirty
|
||||||
#bootdevice="BOOT_IMAGE=/boot/vmlinuz-$pkgname root=UUID=d670564f-2cb3-4981-9d51-6ed9c1327d47"
|
#bootdevice="BOOT_IMAGE=/boot/vmlinuz-$pkgname root=UUID=d670564f-2cb3-4981-9d51-6ed9c1327d47"
|
||||||
#option="rw quiet clocksource=hpet initrd=EFI/spi-ca/initrd intel_iommu=on pci-stub.ids=1002:683f,1002:aab0 vfio_iommu_type1.allow_unsafe_interrupts=1,kvm.ignore_msrs=1"
|
#option="rw quiet clocksource=hpet initrd=EFI/spi-ca/initrd intel_iommu=on pci-stub.ids=1002:683f,1002:aab0 vfio_iommu_type1.allow_unsafe_interrupts=1,kvm.ignore_msrs=1"
|
||||||
#option="rw quiet clocksource=hpet initrd=EFI/spi-ca/initrd quiet intremap=no_x2apic_optout zswap.enabled=1 zswap.max_pool_percent=25 zswap.compressor=lz4"
|
#option="rw quiet clocksource=hpet initrd=EFI/spi-ca/initrd quiet intremap=no_x2apic_optout zswap.enabled=1 zswap.max_pool_percent=25 zswap.compressor=lz4"
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
From d54aac68a9655574a91e6d224300aba239ea58cc Mon Sep 17 00:00:00 2001
|
From 22ee35ec82fa543b65c1b6d516a086a21f723846 Mon Sep 17 00:00:00 2001
|
||||||
From: Paolo Valente <paolo.valente@unimore.it>
|
From: Paolo Valente <paolo.valente@unimore.it>
|
||||||
Date: Tue, 7 Apr 2015 13:39:12 +0200
|
Date: Tue, 7 Apr 2015 13:39:12 +0200
|
||||||
Subject: [PATCH 1/3] block: cgroups, kconfig, build bits for BFQ-v7r10-4.4.0
|
Subject: [PATCH 1/4] block: cgroups, kconfig, build bits for BFQ-v7r11-4.7.0
|
||||||
|
|
||||||
Update Kconfig.iosched and do the related Makefile changes to include
|
Update Kconfig.iosched and do the related Makefile changes to include
|
||||||
kernel configuration options for BFQ. Also increase the number of
|
kernel configuration options for BFQ. Also increase the number of
|
||||||
|
@ -74,7 +74,7 @@ index 421bef9..0ee5f0f 100644
|
||||||
|
|
||||||
endmenu
|
endmenu
|
||||||
diff --git a/block/Makefile b/block/Makefile
|
diff --git a/block/Makefile b/block/Makefile
|
||||||
index 00ecc97..1ed86d5 100644
|
index 9eda232..4a36683 100644
|
||||||
--- a/block/Makefile
|
--- a/block/Makefile
|
||||||
+++ b/block/Makefile
|
+++ b/block/Makefile
|
||||||
@@ -18,6 +18,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
|
@@ -18,6 +18,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
|
||||||
|
@ -86,10 +86,10 @@ index 00ecc97..1ed86d5 100644
|
||||||
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
|
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
|
||||||
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
|
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
|
||||||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
|
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
|
||||||
index c70e358..ae43492 100644
|
index 3d9cf32..8d862a0 100644
|
||||||
--- a/include/linux/blkdev.h
|
--- a/include/linux/blkdev.h
|
||||||
+++ b/include/linux/blkdev.h
|
+++ b/include/linux/blkdev.h
|
||||||
@@ -44,7 +44,7 @@ struct pr_ops;
|
@@ -45,7 +45,7 @@ struct pr_ops;
|
||||||
* Maximum number of blkcg policies allowed to be registered concurrently.
|
* Maximum number of blkcg policies allowed to be registered concurrently.
|
||||||
* Defined here to simplify include dependency.
|
* Defined here to simplify include dependency.
|
||||||
*/
|
*/
|
|
@ -1,9 +1,8 @@
|
||||||
From e1db9f07f51ef6a8b1dfd2750cd45cb8f890ec7f Mon Sep 17 00:00:00 2001
|
From 2aae32be2a18a7d0da104ae42c08cb9bce9d9c7c Mon Sep 17 00:00:00 2001
|
||||||
From: Paolo Valente <paolo.valente@unimore.it>
|
From: Paolo Valente <paolo.valente@unimore.it>
|
||||||
Date: Thu, 9 May 2013 19:10:02 +0200
|
Date: Thu, 9 May 2013 19:10:02 +0200
|
||||||
Subject: [PATCH 2/3] block: introduce the BFQ-v7r10 I/O sched for 4.4.0
|
Subject: [PATCH 2/4] block: introduce the BFQ-v7r11 I/O sched for 4.7.0
|
||||||
|
|
||||||
Add the BFQ-v7r10 I/O scheduler to 4.4.0.
|
|
||||||
The general structure is borrowed from CFQ, as much of the code for
|
The general structure is borrowed from CFQ, as much of the code for
|
||||||
handling I/O contexts. Over time, several useful features have been
|
handling I/O contexts. Over time, several useful features have been
|
||||||
ported from CFQ as well (details in the changelog in README.BFQ). A
|
ported from CFQ as well (details in the changelog in README.BFQ). A
|
||||||
|
@ -57,12 +56,12 @@ Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
|
||||||
Signed-off-by: Arianna Avanzini <avanzini@google.com>
|
Signed-off-by: Arianna Avanzini <avanzini@google.com>
|
||||||
---
|
---
|
||||||
block/Kconfig.iosched | 6 +-
|
block/Kconfig.iosched | 6 +-
|
||||||
block/bfq-cgroup.c | 1203 ++++++++++++++++
|
block/bfq-cgroup.c | 1182 ++++++++++++++++
|
||||||
block/bfq-ioc.c | 36 +
|
block/bfq-ioc.c | 36 +
|
||||||
block/bfq-iosched.c | 3753 +++++++++++++++++++++++++++++++++++++++++++++++++
|
block/bfq-iosched.c | 3754 +++++++++++++++++++++++++++++++++++++++++++++++++
|
||||||
block/bfq-sched.c | 1197 ++++++++++++++++
|
block/bfq-sched.c | 1200 ++++++++++++++++
|
||||||
block/bfq.h | 807 +++++++++++
|
block/bfq.h | 801 +++++++++++
|
||||||
6 files changed, 6998 insertions(+), 4 deletions(-)
|
6 files changed, 6975 insertions(+), 4 deletions(-)
|
||||||
create mode 100644 block/bfq-cgroup.c
|
create mode 100644 block/bfq-cgroup.c
|
||||||
create mode 100644 block/bfq-ioc.c
|
create mode 100644 block/bfq-ioc.c
|
||||||
create mode 100644 block/bfq-iosched.c
|
create mode 100644 block/bfq-iosched.c
|
||||||
|
@ -92,10 +91,10 @@ index 0ee5f0f..f78cd1a 100644
|
||||||
prompt "Default I/O scheduler"
|
prompt "Default I/O scheduler"
|
||||||
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
|
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
|
||||||
new file mode 100644
|
new file mode 100644
|
||||||
index 0000000..707364a
|
index 0000000..8610cd6
|
||||||
--- /dev/null
|
--- /dev/null
|
||||||
+++ b/block/bfq-cgroup.c
|
+++ b/block/bfq-cgroup.c
|
||||||
@@ -0,0 +1,1203 @@
|
@@ -0,0 +1,1182 @@
|
||||||
+/*
|
+/*
|
||||||
+ * BFQ: CGROUPS support.
|
+ * BFQ: CGROUPS support.
|
||||||
+ *
|
+ *
|
||||||
|
@ -259,7 +258,9 @@ index 0000000..707364a
|
||||||
+
|
+
|
||||||
+static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
|
+static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
|
||||||
+{
|
+{
|
||||||
+ return pd_to_bfqg(blkg_to_pd(blkg, &blkcg_policy_bfq));
|
+ struct blkg_policy_data *pd = blkg_to_pd(blkg, &blkcg_policy_bfq);
|
||||||
|
+ BUG_ON(!pd);
|
||||||
|
+ return pd_to_bfqg(pd);
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+/*
|
+/*
|
||||||
|
@ -494,7 +495,8 @@ index 0000000..707364a
|
||||||
+ if (!bfqg)
|
+ if (!bfqg)
|
||||||
+ return NULL;
|
+ return NULL;
|
||||||
+
|
+
|
||||||
+ if (bfqg_stats_init(&bfqg->stats, gfp)) {
|
+ if (bfqg_stats_init(&bfqg->stats, gfp) ||
|
||||||
|
+ bfqg_stats_init(&bfqg->dead_stats, gfp)) {
|
||||||
+ kfree(bfqg);
|
+ kfree(bfqg);
|
||||||
+ return NULL;
|
+ return NULL;
|
||||||
+ }
|
+ }
|
||||||
|
@ -502,6 +504,20 @@ index 0000000..707364a
|
||||||
+ return &bfqg->pd;
|
+ return &bfqg->pd;
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
|
+static void bfq_group_set_parent(struct bfq_group *bfqg,
|
||||||
|
+ struct bfq_group *parent)
|
||||||
|
+{
|
||||||
|
+ struct bfq_entity *entity;
|
||||||
|
+
|
||||||
|
+ BUG_ON(!parent);
|
||||||
|
+ BUG_ON(!bfqg);
|
||||||
|
+ BUG_ON(bfqg == parent);
|
||||||
|
+
|
||||||
|
+ entity = &bfqg->entity;
|
||||||
|
+ entity->parent = parent->my_entity;
|
||||||
|
+ entity->sched_data = &parent->sched_data;
|
||||||
|
+}
|
||||||
|
+
|
||||||
+static void bfq_pd_init(struct blkg_policy_data *pd)
|
+static void bfq_pd_init(struct blkg_policy_data *pd)
|
||||||
+{
|
+{
|
||||||
+ struct blkcg_gq *blkg = pd_to_blkg(pd);
|
+ struct blkcg_gq *blkg = pd_to_blkg(pd);
|
||||||
|
@ -518,15 +534,16 @@ index 0000000..707364a
|
||||||
+ */
|
+ */
|
||||||
+ bfqg->bfqd = bfqd;
|
+ bfqg->bfqd = bfqd;
|
||||||
+ bfqg->active_entities = 0;
|
+ bfqg->active_entities = 0;
|
||||||
+
|
|
||||||
+ /* if the root_group does not exist, we are handling it right now */
|
|
||||||
+ if (bfqd->root_group && bfqg != bfqd->root_group)
|
|
||||||
+ hlist_add_head(&bfqg->bfqd_node, &bfqd->group_list);
|
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+static void bfq_pd_free(struct blkg_policy_data *pd)
|
+static void bfq_pd_free(struct blkg_policy_data *pd)
|
||||||
+{
|
+{
|
||||||
+ return kfree(pd_to_bfqg(pd));
|
+ struct bfq_group *bfqg = pd_to_bfqg(pd);
|
||||||
|
+
|
||||||
|
+ bfqg_stats_exit(&bfqg->stats);
|
||||||
|
+ bfqg_stats_exit(&bfqg->dead_stats);
|
||||||
|
+
|
||||||
|
+ return kfree(bfqg);
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+/* offset delta from bfqg->stats to bfqg->dead_stats */
|
+/* offset delta from bfqg->stats to bfqg->dead_stats */
|
||||||
|
@ -565,20 +582,6 @@ index 0000000..707364a
|
||||||
+ bfqg_stats_reset(&bfqg->dead_stats);
|
+ bfqg_stats_reset(&bfqg->dead_stats);
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+static void bfq_group_set_parent(struct bfq_group *bfqg,
|
|
||||||
+ struct bfq_group *parent)
|
|
||||||
+{
|
|
||||||
+ struct bfq_entity *entity;
|
|
||||||
+
|
|
||||||
+ BUG_ON(!parent);
|
|
||||||
+ BUG_ON(!bfqg);
|
|
||||||
+ BUG_ON(bfqg == parent);
|
|
||||||
+
|
|
||||||
+ entity = &bfqg->entity;
|
|
||||||
+ entity->parent = parent->my_entity;
|
|
||||||
+ entity->sched_data = &parent->sched_data;
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
|
+static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
|
||||||
+ struct blkcg *blkcg)
|
+ struct blkcg *blkcg)
|
||||||
+{
|
+{
|
||||||
|
@ -815,11 +818,19 @@ index 0000000..707364a
|
||||||
+static void bfq_pd_offline(struct blkg_policy_data *pd)
|
+static void bfq_pd_offline(struct blkg_policy_data *pd)
|
||||||
+{
|
+{
|
||||||
+ struct bfq_service_tree *st;
|
+ struct bfq_service_tree *st;
|
||||||
+ struct bfq_group *bfqg = pd_to_bfqg(pd);
|
+ struct bfq_group *bfqg;
|
||||||
+ struct bfq_data *bfqd = bfqg->bfqd;
|
+ struct bfq_data *bfqd;
|
||||||
+ struct bfq_entity *entity = bfqg->my_entity;
|
+ struct bfq_entity *entity;
|
||||||
+ int i;
|
+ int i;
|
||||||
+
|
+
|
||||||
|
+ BUG_ON(!pd);
|
||||||
|
+ bfqg = pd_to_bfqg(pd);
|
||||||
|
+ BUG_ON(!bfqg);
|
||||||
|
+ bfqd = bfqg->bfqd;
|
||||||
|
+ BUG_ON(bfqd && !bfqd->root_group);
|
||||||
|
+
|
||||||
|
+ entity = bfqg->my_entity;
|
||||||
|
+
|
||||||
+ if (!entity) /* root group */
|
+ if (!entity) /* root group */
|
||||||
+ return;
|
+ return;
|
||||||
+
|
+
|
||||||
|
@ -828,8 +839,8 @@ index 0000000..707364a
|
||||||
+ * deactivating the group itself.
|
+ * deactivating the group itself.
|
||||||
+ */
|
+ */
|
||||||
+ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
|
+ for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) {
|
||||||
|
+ BUG_ON(!bfqg->sched_data.service_tree);
|
||||||
+ st = bfqg->sched_data.service_tree + i;
|
+ st = bfqg->sched_data.service_tree + i;
|
||||||
+
|
|
||||||
+ /*
|
+ /*
|
||||||
+ * The idle tree may still contain bfq_queues belonging
|
+ * The idle tree may still contain bfq_queues belonging
|
||||||
+ * to exited task because they never migrated to a different
|
+ * to exited task because they never migrated to a different
|
||||||
|
@ -857,7 +868,6 @@ index 0000000..707364a
|
||||||
+ BUG_ON(bfqg->sched_data.next_in_service);
|
+ BUG_ON(bfqg->sched_data.next_in_service);
|
||||||
+ BUG_ON(bfqg->sched_data.in_service_entity);
|
+ BUG_ON(bfqg->sched_data.in_service_entity);
|
||||||
+
|
+
|
||||||
+ hlist_del(&bfqg->bfqd_node);
|
|
||||||
+ __bfq_deactivate_entity(entity, 0);
|
+ __bfq_deactivate_entity(entity, 0);
|
||||||
+ bfq_put_async_queues(bfqd, bfqg);
|
+ bfq_put_async_queues(bfqd, bfqg);
|
||||||
+ BUG_ON(entity->tree);
|
+ BUG_ON(entity->tree);
|
||||||
|
@ -867,46 +877,14 @@ index 0000000..707364a
|
||||||
+
|
+
|
||||||
+static void bfq_end_wr_async(struct bfq_data *bfqd)
|
+static void bfq_end_wr_async(struct bfq_data *bfqd)
|
||||||
+{
|
+{
|
||||||
+ struct hlist_node *tmp;
|
+ struct blkcg_gq *blkg;
|
||||||
+ struct bfq_group *bfqg;
|
+
|
||||||
|
+ list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) {
|
||||||
|
+ struct bfq_group *bfqg = blkg_to_bfqg(blkg);
|
||||||
+
|
+
|
||||||
+ hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node)
|
|
||||||
+ bfq_end_wr_async_queues(bfqd, bfqg);
|
+ bfq_end_wr_async_queues(bfqd, bfqg);
|
||||||
+ bfq_end_wr_async_queues(bfqd, bfqd->root_group);
|
|
||||||
+}
|
|
||||||
+
|
|
||||||
+/**
|
|
||||||
+ * bfq_disconnect_groups - disconnect @bfqd from all its groups.
|
|
||||||
+ * @bfqd: the device descriptor being exited.
|
|
||||||
+ *
|
|
||||||
+ * When the device exits we just make sure that no lookup can return
|
|
||||||
+ * the now unused group structures. They will be deallocated on cgroup
|
|
||||||
+ * destruction.
|
|
||||||
+ */
|
|
||||||
+static void bfq_disconnect_groups(struct bfq_data *bfqd)
|
|
||||||
+{
|
|
||||||
+ struct hlist_node *tmp;
|
|
||||||
+ struct bfq_group *bfqg;
|
|
||||||
+
|
|
||||||
+ bfq_log(bfqd, "disconnect_groups beginning");
|
|
||||||
+ hlist_for_each_entry_safe(bfqg, tmp, &bfqd->group_list, bfqd_node) {
|
|
||||||
+ hlist_del(&bfqg->bfqd_node);
|
|
||||||
+
|
|
||||||
+ __bfq_deactivate_entity(bfqg->my_entity, 0);
|
|
||||||
+
|
|
||||||
+ /*
|
|
||||||
+ * Don't remove from the group hash, just set an
|
|
||||||
+ * invalid key. No lookups can race with the
|
|
||||||
+ * assignment as bfqd is being destroyed; this
|
|
||||||
+ * implies also that new elements cannot be added
|
|
||||||
+ * to the list.
|
|
||||||
+ */
|
|
||||||
+ rcu_assign_pointer(bfqg->bfqd, NULL);
|
|
||||||
+
|
|
||||||
+ bfq_log(bfqd, "disconnect_groups: put async for group %p",
|
|
||||||
+ bfqg);
|
|
||||||
+ bfq_put_async_queues(bfqd, bfqg);
|
|
||||||
+ }
|
+ }
|
||||||
|
+ bfq_end_wr_async_queues(bfqd, bfqd->root_group);
|
||||||
+}
|
+}
|
||||||
+
|
+
|
||||||
+static u64 bfqio_cgroup_weight_read(struct cgroup_subsys_state *css,
|
+static u64 bfqio_cgroup_weight_read(struct cgroup_subsys_state *css,
|
||||||
|
@ -1343,10 +1321,10 @@ index 0000000..fb7bb8f
|
||||||
+}
|
+}
|
||||||
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
|
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
|
||||||
new file mode 100644
|
new file mode 100644
|
||||||
index 0000000..d61e402
|
index 0000000..f9787a6
|
||||||
--- /dev/null
|
--- /dev/null
|
||||||
+++ b/block/bfq-iosched.c
|
+++ b/block/bfq-iosched.c
|
||||||
@@ -0,0 +1,3753 @@
|
@@ -0,0 +1,3754 @@
|
||||||
+/*
|
+/*
|
||||||
+ * Budget Fair Queueing (BFQ) disk scheduler.
|
+ * Budget Fair Queueing (BFQ) disk scheduler.
|
||||||
+ *
|
+ *
|
||||||
|
@ -4612,7 +4590,6 @@ index 0000000..d61e402
|
||||||
+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
|
+ list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list)
|
||||||
+ bfq_deactivate_bfqq(bfqd, bfqq, 0);
|
+ bfq_deactivate_bfqq(bfqd, bfqq, 0);
|
||||||
+
|
+
|
||||||
+ bfq_disconnect_groups(bfqd);
|
|
||||||
+ spin_unlock_irq(q->queue_lock);
|
+ spin_unlock_irq(q->queue_lock);
|
||||||
+
|
+
|
||||||
+ bfq_shutdown_timer_wq(bfqd);
|
+ bfq_shutdown_timer_wq(bfqd);
|
||||||
|
@ -4623,6 +4600,8 @@ index 0000000..d61e402
|
||||||
+
|
+
|
||||||
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
+#ifdef CONFIG_BFQ_GROUP_IOSCHED
|
||||||
+ blkcg_deactivate_policy(q, &blkcg_policy_bfq);
|
+ blkcg_deactivate_policy(q, &blkcg_policy_bfq);
|
||||||
|
+#else
|
||||||
|
+ kfree(bfqd->root_group);
|
||||||
+#endif
|
+#endif
|
||||||
+
|
+
|
||||||
+ kfree(bfqd);
|
+ kfree(bfqd);
|
||||||
|
@ -5075,7 +5054,7 @@ index 0000000..d61e402
|
||||||
+ if (ret)
|
+ if (ret)
|
||||||
+ goto err_pol_unreg;
|
+ goto err_pol_unreg;
|
||||||
+
|
+
|
||||||
+ pr_info("BFQ I/O-scheduler: v7r10");
|
+ pr_info("BFQ I/O-scheduler: v7r11");
|
||||||
+
|
+
|
||||||
+ return 0;
|
+ return 0;
|
||||||
+
|
+
|
||||||
|
@ -5102,10 +5081,10 @@ index 0000000..d61e402
|
||||||
+MODULE_LICENSE("GPL");
|
+MODULE_LICENSE("GPL");
|
||||||
diff --git a/block/bfq-sched.c b/block/bfq-sched.c
|
diff --git a/block/bfq-sched.c b/block/bfq-sched.c
|
||||||
new file mode 100644
|
new file mode 100644
|
||||||
index 0000000..9328a1f
|
index 0000000..a64fec1
|
||||||
--- /dev/null
|
--- /dev/null
|
||||||
+++ b/block/bfq-sched.c
|
+++ b/block/bfq-sched.c
|
||||||
@@ -0,0 +1,1197 @@
|
@@ -0,0 +1,1200 @@
|
||||||
+/*
|
+/*
|
||||||
+ * BFQ: Hierarchical B-WF2Q+ scheduler.
|
+ * BFQ: Hierarchical B-WF2Q+ scheduler.
|
||||||
+ *
|
+ *
|
||||||
|
@ -5947,13 +5926,16 @@ index 0000000..9328a1f
|
||||||
+static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
|
+static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue)
|
||||||
+{
|
+{
|
||||||
+ struct bfq_sched_data *sd = entity->sched_data;
|
+ struct bfq_sched_data *sd = entity->sched_data;
|
||||||
+ struct bfq_service_tree *st = bfq_entity_service_tree(entity);
|
+ struct bfq_service_tree *st;
|
||||||
+ int was_in_service = entity == sd->in_service_entity;
|
+ int was_in_service;
|
||||||
+ int ret = 0;
|
+ int ret = 0;
|
||||||
+
|
+
|
||||||
+ if (!entity->on_st)
|
+ if (sd == NULL || !entity->on_st) /* never activated, or inactive */
|
||||||
+ return 0;
|
+ return 0;
|
||||||
+
|
+
|
||||||
|
+ st = bfq_entity_service_tree(entity);
|
||||||
|
+ was_in_service = entity == sd->in_service_entity;
|
||||||
|
+
|
||||||
+ BUG_ON(was_in_service && entity->tree);
|
+ BUG_ON(was_in_service && entity->tree);
|
||||||
+
|
+
|
||||||
+ if (was_in_service) {
|
+ if (was_in_service) {
|
||||||
|
@ -6305,12 +6287,12 @@ index 0000000..9328a1f
|
||||||
+}
|
+}
|
||||||
diff --git a/block/bfq.h b/block/bfq.h
|
diff --git a/block/bfq.h b/block/bfq.h
|
||||||
new file mode 100644
|
new file mode 100644
|
||||||
index 0000000..9b04d19
|
index 0000000..485d0c9
|
||||||
--- /dev/null
|
--- /dev/null
|
||||||
+++ b/block/bfq.h
|
+++ b/block/bfq.h
|
||||||
@@ -0,0 +1,807 @@
|
@@ -0,0 +1,801 @@
|
||||||
+/*
|
+/*
|
||||||
+ * BFQ-v7r10 for 4.4.0: data structures and common functions prototypes.
|
+ * BFQ-v7r11 for 4.5.0: data structures and common functions prototypes.
|
||||||
+ *
|
+ *
|
||||||
+ * Based on ideas and code from CFQ:
|
+ * Based on ideas and code from CFQ:
|
||||||
+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
|
+ * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
|
||||||
|
@ -6700,7 +6682,6 @@ index 0000000..9b04d19
|
||||||
+ * @peak_rate_samples: number of samples used to calculate @peak_rate.
|
+ * @peak_rate_samples: number of samples used to calculate @peak_rate.
|
||||||
+ * @bfq_max_budget: maximum budget allotted to a bfq_queue before
|
+ * @bfq_max_budget: maximum budget allotted to a bfq_queue before
|
||||||
+ * rescheduling.
|
+ * rescheduling.
|
||||||
+ * @group_list: list of all the bfq_groups active on the device.
|
|
||||||
+ * @active_list: list of all the bfq_queues active on the device.
|
+ * @active_list: list of all the bfq_queues active on the device.
|
||||||
+ * @idle_list: list of all the bfq_queues idle on the device.
|
+ * @idle_list: list of all the bfq_queues idle on the device.
|
||||||
+ * @bfq_fifo_expire: timeout for async/sync requests; when it expires
|
+ * @bfq_fifo_expire: timeout for async/sync requests; when it expires
|
||||||
|
@ -6805,7 +6786,6 @@ index 0000000..9b04d19
|
||||||
+ u64 peak_rate;
|
+ u64 peak_rate;
|
||||||
+ int bfq_max_budget;
|
+ int bfq_max_budget;
|
||||||
+
|
+
|
||||||
+ struct hlist_head group_list;
|
|
||||||
+ struct list_head active_list;
|
+ struct list_head active_list;
|
||||||
+ struct list_head idle_list;
|
+ struct list_head idle_list;
|
||||||
+
|
+
|
||||||
|
@ -6975,8 +6955,6 @@ index 0000000..9b04d19
|
||||||
+ * @entity: schedulable entity to insert into the parent group sched_data.
|
+ * @entity: schedulable entity to insert into the parent group sched_data.
|
||||||
+ * @sched_data: own sched_data, to contain child entities (they may be
|
+ * @sched_data: own sched_data, to contain child entities (they may be
|
||||||
+ * both bfq_queues and bfq_groups).
|
+ * both bfq_queues and bfq_groups).
|
||||||
+ * @bfqd_node: node to be inserted into the @bfqd->group_list list
|
|
||||||
+ * of the groups active on the same device; used for cleanup.
|
|
||||||
+ * @bfqd: the bfq_data for the device this group acts upon.
|
+ * @bfqd: the bfq_data for the device this group acts upon.
|
||||||
+ * @async_bfqq: array of async queues for all the tasks belonging to
|
+ * @async_bfqq: array of async queues for all the tasks belonging to
|
||||||
+ * the group, one queue per ioprio value per ioprio_class,
|
+ * the group, one queue per ioprio value per ioprio_class,
|
||||||
|
@ -7007,8 +6985,6 @@ index 0000000..9b04d19
|
||||||
+ struct bfq_entity entity;
|
+ struct bfq_entity entity;
|
||||||
+ struct bfq_sched_data sched_data;
|
+ struct bfq_sched_data sched_data;
|
||||||
+
|
+
|
||||||
+ struct hlist_node bfqd_node;
|
|
||||||
+
|
|
||||||
+ void *bfqd;
|
+ void *bfqd;
|
||||||
+
|
+
|
||||||
+ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
|
+ struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
|
|
@ -1,8 +1,8 @@
|
||||||
From 8a3e03e4d2c06f9eacf26eb786ccc312dcc2aa8e Mon Sep 17 00:00:00 2001
|
From 47de1e46ef5f462e9694e5b0607aec6ad658f1e0 Mon Sep 17 00:00:00 2001
|
||||||
From: Mauro Andreolini <mauro.andreolini@unimore.it>
|
From: Mauro Andreolini <mauro.andreolini@unimore.it>
|
||||||
Date: Sun, 6 Sep 2015 16:09:05 +0200
|
Date: Sun, 6 Sep 2015 16:09:05 +0200
|
||||||
Subject: [PATCH 3/3] block, bfq: add Early Queue Merge (EQM) to BFQ-v7r10 for
|
Subject: [PATCH 3/4] block, bfq: add Early Queue Merge (EQM) to BFQ-v7r11 for
|
||||||
4.4.0
|
4.7.0
|
||||||
|
|
||||||
A set of processes may happen to perform interleaved reads, i.e.,requests
|
A set of processes may happen to perform interleaved reads, i.e.,requests
|
||||||
whose union would give rise to a sequential read pattern. There are two
|
whose union would give rise to a sequential read pattern. There are two
|
||||||
|
@ -41,18 +41,18 @@ Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
|
||||||
3 files changed, 743 insertions(+), 14 deletions(-)
|
3 files changed, 743 insertions(+), 14 deletions(-)
|
||||||
|
|
||||||
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
|
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
|
||||||
index 707364a..7a61920 100644
|
index 8610cd6..5ee99ec 100644
|
||||||
--- a/block/bfq-cgroup.c
|
--- a/block/bfq-cgroup.c
|
||||||
+++ b/block/bfq-cgroup.c
|
+++ b/block/bfq-cgroup.c
|
||||||
@@ -420,6 +420,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd)
|
@@ -437,6 +437,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd)
|
||||||
*/
|
*/
|
||||||
bfqg->bfqd = bfqd;
|
bfqg->bfqd = bfqd;
|
||||||
bfqg->active_entities = 0;
|
bfqg->active_entities = 0;
|
||||||
+ bfqg->rq_pos_tree = RB_ROOT;
|
+ bfqg->rq_pos_tree = RB_ROOT;
|
||||||
|
}
|
||||||
|
|
||||||
/* if the root_group does not exist, we are handling it right now */
|
static void bfq_pd_free(struct blkg_policy_data *pd)
|
||||||
if (bfqd->root_group && bfqg != bfqd->root_group)
|
@@ -530,6 +531,8 @@ static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
|
||||||
@@ -526,6 +527,8 @@ static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
|
|
||||||
return bfqg;
|
return bfqg;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,7 +61,7 @@ index 707364a..7a61920 100644
|
||||||
/**
|
/**
|
||||||
* bfq_bfqq_move - migrate @bfqq to @bfqg.
|
* bfq_bfqq_move - migrate @bfqq to @bfqg.
|
||||||
* @bfqd: queue descriptor.
|
* @bfqd: queue descriptor.
|
||||||
@@ -573,6 +576,7 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
@@ -577,6 +580,7 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
|
||||||
bfqg_get(bfqg);
|
bfqg_get(bfqg);
|
||||||
|
|
||||||
if (busy) {
|
if (busy) {
|
||||||
|
@ -70,7 +70,7 @@ index 707364a..7a61920 100644
|
||||||
bfq_activate_bfqq(bfqd, bfqq);
|
bfq_activate_bfqq(bfqd, bfqq);
|
||||||
}
|
}
|
||||||
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
|
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
|
||||||
index d61e402..6f8e52a 100644
|
index f9787a6..d1f648d 100644
|
||||||
--- a/block/bfq-iosched.c
|
--- a/block/bfq-iosched.c
|
||||||
+++ b/block/bfq-iosched.c
|
+++ b/block/bfq-iosched.c
|
||||||
@@ -296,6 +296,72 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd,
|
@@ -296,6 +296,72 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd,
|
||||||
|
@ -946,7 +946,7 @@ index d61e402..6f8e52a 100644
|
||||||
spin_unlock_irqrestore(q->queue_lock, flags);
|
spin_unlock_irqrestore(q->queue_lock, flags);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
@@ -3289,6 +3945,7 @@ static void bfq_init_root_group(struct bfq_group *root_group,
|
@@ -3290,6 +3946,7 @@ static void bfq_init_root_group(struct bfq_group *root_group,
|
||||||
root_group->my_entity = NULL;
|
root_group->my_entity = NULL;
|
||||||
root_group->bfqd = bfqd;
|
root_group->bfqd = bfqd;
|
||||||
#endif
|
#endif
|
||||||
|
@ -954,7 +954,7 @@ index d61e402..6f8e52a 100644
|
||||||
for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
|
for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
|
||||||
root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
|
root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
|
||||||
}
|
}
|
||||||
@@ -3369,6 +4026,8 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
|
@@ -3370,6 +4027,8 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
|
||||||
bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async;
|
bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async;
|
||||||
bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync;
|
bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync;
|
||||||
|
|
||||||
|
@ -964,7 +964,7 @@ index d61e402..6f8e52a 100644
|
||||||
|
|
||||||
bfqd->bfq_large_burst_thresh = 11;
|
bfqd->bfq_large_burst_thresh = 11;
|
||||||
diff --git a/block/bfq.h b/block/bfq.h
|
diff --git a/block/bfq.h b/block/bfq.h
|
||||||
index 9b04d19..97a677f 100644
|
index 485d0c9..f73c942 100644
|
||||||
--- a/block/bfq.h
|
--- a/block/bfq.h
|
||||||
+++ b/block/bfq.h
|
+++ b/block/bfq.h
|
||||||
@@ -183,6 +183,8 @@ struct bfq_group;
|
@@ -183,6 +183,8 @@ struct bfq_group;
|
||||||
|
@ -1020,7 +1020,7 @@ index 9b04d19..97a677f 100644
|
||||||
};
|
};
|
||||||
|
|
||||||
enum bfq_device_speed {
|
enum bfq_device_speed {
|
||||||
@@ -559,6 +591,9 @@ enum bfqq_state_flags {
|
@@ -557,6 +589,9 @@ enum bfqq_state_flags {
|
||||||
* may need softrt-next-start
|
* may need softrt-next-start
|
||||||
* update
|
* update
|
||||||
*/
|
*/
|
||||||
|
@ -1030,7 +1030,7 @@ index 9b04d19..97a677f 100644
|
||||||
};
|
};
|
||||||
|
|
||||||
#define BFQ_BFQQ_FNS(name) \
|
#define BFQ_BFQQ_FNS(name) \
|
||||||
@@ -585,6 +620,9 @@ BFQ_BFQQ_FNS(budget_new);
|
@@ -583,6 +618,9 @@ BFQ_BFQQ_FNS(budget_new);
|
||||||
BFQ_BFQQ_FNS(IO_bound);
|
BFQ_BFQQ_FNS(IO_bound);
|
||||||
BFQ_BFQQ_FNS(in_large_burst);
|
BFQ_BFQQ_FNS(in_large_burst);
|
||||||
BFQ_BFQQ_FNS(constantly_seeky);
|
BFQ_BFQQ_FNS(constantly_seeky);
|
||||||
|
@ -1040,7 +1040,7 @@ index 9b04d19..97a677f 100644
|
||||||
BFQ_BFQQ_FNS(softrt_update);
|
BFQ_BFQQ_FNS(softrt_update);
|
||||||
#undef BFQ_BFQQ_FNS
|
#undef BFQ_BFQQ_FNS
|
||||||
|
|
||||||
@@ -679,6 +717,9 @@ struct bfq_group_data {
|
@@ -675,6 +713,9 @@ struct bfq_group_data {
|
||||||
* are groups with more than one active @bfq_entity
|
* are groups with more than one active @bfq_entity
|
||||||
* (see the comments to the function
|
* (see the comments to the function
|
||||||
* bfq_bfqq_must_not_expire()).
|
* bfq_bfqq_must_not_expire()).
|
||||||
|
@ -1050,7 +1050,7 @@ index 9b04d19..97a677f 100644
|
||||||
*
|
*
|
||||||
* Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
|
* Each (device, cgroup) pair has its own bfq_group, i.e., for each cgroup
|
||||||
* there is a set of bfq_groups, each one collecting the lower-level
|
* there is a set of bfq_groups, each one collecting the lower-level
|
||||||
@@ -707,6 +748,8 @@ struct bfq_group {
|
@@ -701,6 +742,8 @@ struct bfq_group {
|
||||||
|
|
||||||
int active_entities;
|
int active_entities;
|
||||||
|
|
||||||
|
@ -1059,7 +1059,7 @@ index 9b04d19..97a677f 100644
|
||||||
struct bfqg_stats stats;
|
struct bfqg_stats stats;
|
||||||
struct bfqg_stats dead_stats; /* stats pushed from dead children */
|
struct bfqg_stats dead_stats; /* stats pushed from dead children */
|
||||||
};
|
};
|
||||||
@@ -717,6 +760,8 @@ struct bfq_group {
|
@@ -711,6 +754,8 @@ struct bfq_group {
|
||||||
|
|
||||||
struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
|
struct bfq_queue *async_bfqq[2][IOPRIO_BE_NR];
|
||||||
struct bfq_queue *async_idle_bfqq;
|
struct bfq_queue *async_idle_bfqq;
|
||||||
|
@ -1068,7 +1068,7 @@ index 9b04d19..97a677f 100644
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@@ -793,6 +838,27 @@ static void bfq_put_bfqd_unlock(struct bfq_data *bfqd, unsigned long *flags)
|
@@ -787,6 +832,27 @@ static void bfq_put_bfqd_unlock(struct bfq_data *bfqd, unsigned long *flags)
|
||||||
spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
|
spin_unlock_irqrestore(bfqd->queue->queue_lock, *flags);
|
||||||
}
|
}
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,708 @@
|
||||||
|
BLD changes for Linux kernel version 4.7
|
||||||
|
|
||||||
|
---
|
||||||
|
|
||||||
|
diff --git a/init/Kconfig b/init/Kconfig
|
||||||
|
index c02d897..edf8697 100644
|
||||||
|
--- a/init/Kconfig
|
||||||
|
+++ b/init/Kconfig
|
||||||
|
@@ -36,6 +36,15 @@ config BROKEN_ON_SMP
|
||||||
|
depends on BROKEN || !SMP
|
||||||
|
default y
|
||||||
|
|
||||||
|
+config BLD
|
||||||
|
+ bool "An alternate CPU load distribution technique for task scheduler"
|
||||||
|
+ depends on SMP
|
||||||
|
+ default y
|
||||||
|
+ help
|
||||||
|
+ This is an alternate CPU load distribution technique based for task
|
||||||
|
+ scheduler based on The Barbershop Load Distribution algorithm. Not
|
||||||
|
+ suitable for NUMA, should work well on SMP.
|
||||||
|
+
|
||||||
|
config INIT_ENV_ARG_LIMIT
|
||||||
|
int
|
||||||
|
default 32 if !UML
|
||||||
|
diff --git a/kernel/sched/bld.h b/kernel/sched/bld.h
|
||||||
|
new file mode 100644
|
||||||
|
index 0000000..f1f9fba
|
||||||
|
--- /dev/null
|
||||||
|
+++ b/kernel/sched/bld.h
|
||||||
|
@@ -0,0 +1,215 @@
|
||||||
|
+#ifdef CONFIG_BLD
|
||||||
|
+
|
||||||
|
+static DEFINE_RWLOCK(rt_list_lock);
|
||||||
|
+static LIST_HEAD(rt_rq_head);
|
||||||
|
+static LIST_HEAD(cfs_rq_head);
|
||||||
|
+static DEFINE_RWLOCK(cfs_list_lock);
|
||||||
|
+
|
||||||
|
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
+static inline struct rq *rq_of_cfs(struct cfs_rq *cfs_rq)
|
||||||
|
+{
|
||||||
|
+ return cfs_rq->rq;
|
||||||
|
+}
|
||||||
|
+#else
|
||||||
|
+static inline struct rq *rq_of_cfs(struct cfs_rq *cfs_rq)
|
||||||
|
+{
|
||||||
|
+ return container_of(cfs_rq, struct rq, cfs);
|
||||||
|
+}
|
||||||
|
+#endif
|
||||||
|
+
|
||||||
|
+#ifdef CONFIG_RT_GROUP_SCHED
|
||||||
|
+static inline struct rq *rq_of_rt(struct rt_rq *rt_rq)
|
||||||
|
+{
|
||||||
|
+ return rt_rq->rq;
|
||||||
|
+}
|
||||||
|
+#else
|
||||||
|
+static inline struct rq *rq_of_rt(struct rt_rq *rt_rq)
|
||||||
|
+{
|
||||||
|
+ return container_of(rt_rq, struct rq, rt);
|
||||||
|
+}
|
||||||
|
+#endif
|
||||||
|
+
|
||||||
|
+static int select_cpu_for_wakeup(int task_type, struct cpumask *mask)
|
||||||
|
+{
|
||||||
|
+ int cpu = smp_processor_id(), i;
|
||||||
|
+ unsigned long load, varload;
|
||||||
|
+ struct rq *rq;
|
||||||
|
+
|
||||||
|
+ if (task_type) {
|
||||||
|
+ varload = ULONG_MAX;
|
||||||
|
+ for_each_cpu(i, mask) {
|
||||||
|
+ rq = cpu_rq(i);
|
||||||
|
+ load = rq->cfs.load.weight;
|
||||||
|
+ if (load < varload) {
|
||||||
|
+ varload = load;
|
||||||
|
+ cpu = i;
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+ } else {
|
||||||
|
+ /* Here's an attempt to get a CPU within the mask where
|
||||||
|
+ * we can preempt easily. To achieve this we tried to
|
||||||
|
+ * maintain a lowbit, which indicate the lowest bit set on
|
||||||
|
+ * array bitmap. Since all CPUs contains high priority
|
||||||
|
+ * kernel threads therefore we eliminate 0, so it might not
|
||||||
|
+ * be right every time, but it's just an indicator.
|
||||||
|
+ */
|
||||||
|
+ varload = 1;
|
||||||
|
+
|
||||||
|
+ for_each_cpu(i, mask) {
|
||||||
|
+ rq = cpu_rq(i);
|
||||||
|
+ load = rq->rt.lowbit;
|
||||||
|
+ if (load >= varload) {
|
||||||
|
+ varload = load;
|
||||||
|
+ cpu = i;
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ return cpu;
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+static int bld_pick_cpu_cfs(struct task_struct *p, int sd_flags, int wake_flags)
|
||||||
|
+{
|
||||||
|
+ struct cfs_rq *cfs;
|
||||||
|
+ unsigned long flags;
|
||||||
|
+ unsigned int cpu = smp_processor_id();
|
||||||
|
+
|
||||||
|
+ read_lock_irqsave(&cfs_list_lock, flags);
|
||||||
|
+ list_for_each_entry(cfs, &cfs_rq_head, bld_cfs_list) {
|
||||||
|
+ cpu = cpu_of(rq_of_cfs(cfs));
|
||||||
|
+ if (cpu_online(cpu))
|
||||||
|
+ break;
|
||||||
|
+ }
|
||||||
|
+ read_unlock_irqrestore(&cfs_list_lock, flags);
|
||||||
|
+ return cpu;
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+static int bld_pick_cpu_rt(struct task_struct *p, int sd_flags, int wake_flags)
|
||||||
|
+{
|
||||||
|
+ struct rt_rq *rt;
|
||||||
|
+ unsigned long flags;
|
||||||
|
+ unsigned int cpu = smp_processor_id();
|
||||||
|
+
|
||||||
|
+ read_lock_irqsave(&rt_list_lock, flags);
|
||||||
|
+ list_for_each_entry(rt, &rt_rq_head, bld_rt_list) {
|
||||||
|
+ cpu = cpu_of(rq_of_rt(rt));
|
||||||
|
+ if (cpu_online(cpu))
|
||||||
|
+ break;
|
||||||
|
+ }
|
||||||
|
+ read_unlock_irqrestore(&rt_list_lock, flags);
|
||||||
|
+ return cpu;
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+static int bld_pick_cpu_domain(struct task_struct *p, int sd_flags, int wake_flags)
|
||||||
|
+{
|
||||||
|
+ unsigned int cpu = smp_processor_id(), want_affine = 0;
|
||||||
|
+ struct cpumask *tmpmask;
|
||||||
|
+
|
||||||
|
+ if (p->nr_cpus_allowed == 1)
|
||||||
|
+ return task_cpu(p);
|
||||||
|
+
|
||||||
|
+ if (sd_flags & SD_BALANCE_WAKE) {
|
||||||
|
+ if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
|
||||||
|
+ want_affine = 1;
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ if (want_affine)
|
||||||
|
+ tmpmask = tsk_cpus_allowed(p);
|
||||||
|
+ else
|
||||||
|
+ tmpmask = sched_domain_span(cpu_rq(task_cpu(p))->sd);
|
||||||
|
+
|
||||||
|
+ if (rt_task(p))
|
||||||
|
+ cpu = select_cpu_for_wakeup(0, tmpmask);
|
||||||
|
+ else
|
||||||
|
+ cpu = select_cpu_for_wakeup(1, tmpmask);
|
||||||
|
+
|
||||||
|
+ return cpu;
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+static void track_load_rt(struct rq *rq, struct task_struct *p)
|
||||||
|
+{
|
||||||
|
+ unsigned long flag;
|
||||||
|
+ int firstbit;
|
||||||
|
+ struct rt_rq *first;
|
||||||
|
+ struct rt_prio_array *array = &rq->rt.active;
|
||||||
|
+
|
||||||
|
+ first = list_entry(rt_rq_head.next, struct rt_rq, bld_rt_list);
|
||||||
|
+ firstbit = sched_find_first_bit(array->bitmap);
|
||||||
|
+
|
||||||
|
+ /* Maintaining rt.lowbit */
|
||||||
|
+ if (firstbit > 0 && firstbit <= rq->rt.lowbit)
|
||||||
|
+ rq->rt.lowbit = firstbit;
|
||||||
|
+
|
||||||
|
+ if (rq->rt.lowbit < first->lowbit) {
|
||||||
|
+ write_lock_irqsave(&rt_list_lock, flag);
|
||||||
|
+ list_del(&rq->rt.bld_rt_list);
|
||||||
|
+ list_add_tail(&rq->rt.bld_rt_list, &rt_rq_head);
|
||||||
|
+ write_unlock_irqrestore(&rt_list_lock, flag);
|
||||||
|
+ }
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+static int bld_get_cpu(struct task_struct *p, int sd_flags, int wake_flags)
|
||||||
|
+{
|
||||||
|
+ unsigned int cpu;
|
||||||
|
+
|
||||||
|
+ if (sd_flags == SD_BALANCE_WAKE || (sd_flags == SD_BALANCE_EXEC && (get_nr_threads(p) > 1)))
|
||||||
|
+ cpu = bld_pick_cpu_domain(p, sd_flags, wake_flags);
|
||||||
|
+ else {
|
||||||
|
+ if (rt_task(p))
|
||||||
|
+ cpu = bld_pick_cpu_rt(p, sd_flags, wake_flags);
|
||||||
|
+ else
|
||||||
|
+ cpu = bld_pick_cpu_cfs(p, sd_flags, wake_flags);
|
||||||
|
+ }
|
||||||
|
+
|
||||||
|
+ return cpu;
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+static void bld_track_load_activate(struct rq *rq, struct task_struct *p)
|
||||||
|
+{
|
||||||
|
+ unsigned long flag;
|
||||||
|
+ if (rt_task(p)) {
|
||||||
|
+ track_load_rt(rq, p);
|
||||||
|
+ } else {
|
||||||
|
+ if (rq->cfs.pos != 2) {
|
||||||
|
+ struct cfs_rq *last;
|
||||||
|
+ last = list_entry(cfs_rq_head.prev, struct cfs_rq, bld_cfs_list);
|
||||||
|
+ if (rq->cfs.load.weight >= last->load.weight) {
|
||||||
|
+ write_lock_irqsave(&cfs_list_lock, flag);
|
||||||
|
+ list_del(&rq->cfs.bld_cfs_list);
|
||||||
|
+ list_add_tail(&rq->cfs.bld_cfs_list, &cfs_rq_head);
|
||||||
|
+ rq->cfs.pos = 2; last->pos = 1;
|
||||||
|
+ write_unlock_irqrestore(&cfs_list_lock, flag);
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+static void bld_track_load_deactivate(struct rq *rq, struct task_struct *p)
|
||||||
|
+{
|
||||||
|
+ unsigned long flag;
|
||||||
|
+ if (rt_task(p)) {
|
||||||
|
+ track_load_rt(rq, p);
|
||||||
|
+ } else {
|
||||||
|
+ if (rq->cfs.pos != 0) {
|
||||||
|
+ struct cfs_rq *first;
|
||||||
|
+ first = list_entry(cfs_rq_head.next, struct cfs_rq, bld_cfs_list);
|
||||||
|
+ if (rq->cfs.load.weight <= first->load.weight) {
|
||||||
|
+ write_lock_irqsave(&cfs_list_lock, flag);
|
||||||
|
+ list_del(&rq->cfs.bld_cfs_list);
|
||||||
|
+ list_add(&rq->cfs.bld_cfs_list, &cfs_rq_head);
|
||||||
|
+ rq->cfs.pos = 0; first->pos = 1;
|
||||||
|
+ write_unlock_irqrestore(&cfs_list_lock, flag);
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+ }
|
||||||
|
+}
|
||||||
|
+#else
|
||||||
|
+static inline void bld_track_load_activate(struct rq *rq, struct task_struct *p)
|
||||||
|
+{
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+static inline void bld_track_load_deactivate(struct rq *rq, struct task_struct *p)
|
||||||
|
+{
|
||||||
|
+}
|
||||||
|
+#endif /* CONFIG_BLD */
|
||||||
|
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
||||||
|
index 97ee9ac..b2ddabc 100644
|
||||||
|
--- a/kernel/sched/core.c
|
||||||
|
+++ b/kernel/sched/core.c
|
||||||
|
@@ -24,6 +24,8 @@
|
||||||
|
* 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
|
||||||
|
* 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
|
||||||
|
* Thomas Gleixner, Mike Kravetz
|
||||||
|
+ * 2012-Feb The Barbershop Load Distribution (BLD) algorithm - an alternate
|
||||||
|
+ * CPU load distribution technique for kernel scheduler by Rakib Mullick.
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include <linux/kasan.h>
|
||||||
|
@@ -86,6 +88,7 @@
|
||||||
|
#include "sched.h"
|
||||||
|
#include "../workqueue_internal.h"
|
||||||
|
#include "../smpboot.h"
|
||||||
|
+#include "bld.h"
|
||||||
|
|
||||||
|
#define CREATE_TRACE_POINTS
|
||||||
|
#include <trace/events/sched.h>
|
||||||
|
@@ -750,6 +753,8 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||||
|
if (!(flags & ENQUEUE_RESTORE))
|
||||||
|
sched_info_queued(rq, p);
|
||||||
|
p->sched_class->enqueue_task(rq, p, flags);
|
||||||
|
+ if (!dl_task(p))
|
||||||
|
+ bld_track_load_activate(rq, p);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||||
|
@@ -758,6 +763,8 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||||
|
if (!(flags & DEQUEUE_SAVE))
|
||||||
|
sched_info_dequeued(rq, p);
|
||||||
|
p->sched_class->dequeue_task(rq, p, flags);
|
||||||
|
+ if (!dl_task(p))
|
||||||
|
+ bld_track_load_deactivate(rq, p);
|
||||||
|
}
|
||||||
|
|
||||||
|
void activate_task(struct rq *rq, struct task_struct *p, int flags)
|
||||||
|
@@ -1587,11 +1594,17 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
|
||||||
|
{
|
||||||
|
lockdep_assert_held(&p->pi_lock);
|
||||||
|
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
if (tsk_nr_cpus_allowed(p) > 1)
|
||||||
|
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
|
||||||
|
else
|
||||||
|
cpu = cpumask_any(tsk_cpus_allowed(p));
|
||||||
|
-
|
||||||
|
+#else
|
||||||
|
+ if (dl_task(p))
|
||||||
|
+ cpu = dl_sched_class.select_task_rq(p, cpu, sd_flags, wake_flags);
|
||||||
|
+ else
|
||||||
|
+ cpu = bld_get_cpu(p, sd_flags, wake_flags);
|
||||||
|
+#endif
|
||||||
|
/*
|
||||||
|
* In order not to call set_task_cpu() on a blocking task we need
|
||||||
|
* to rely on ttwu() to place the task on a valid ->cpus_allowed
|
||||||
|
@@ -1794,7 +1807,11 @@ void scheduler_ipi(void)
|
||||||
|
*/
|
||||||
|
preempt_fold_need_resched();
|
||||||
|
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
|
||||||
|
+#else
|
||||||
|
+ if (llist_empty(&this_rq()->wake_list))
|
||||||
|
+#endif
|
||||||
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
@@ -1816,13 +1833,16 @@ void scheduler_ipi(void)
|
||||||
|
/*
|
||||||
|
* Check if someone kicked us for doing the nohz idle load balance.
|
||||||
|
*/
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
if (unlikely(got_nohz_idle_kick())) {
|
||||||
|
this_rq()->idle_balance = 1;
|
||||||
|
raise_softirq_irqoff(SCHED_SOFTIRQ);
|
||||||
|
}
|
||||||
|
+#endif
|
||||||
|
irq_exit();
|
||||||
|
}
|
||||||
|
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
|
||||||
|
{
|
||||||
|
struct rq *rq = cpu_rq(cpu);
|
||||||
|
@@ -1836,6 +1856,7 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
|
||||||
|
trace_sched_wake_idle_without_ipi(cpu);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
+#endif /* CONFIG_BLD */
|
||||||
|
|
||||||
|
void wake_up_if_idle(int cpu)
|
||||||
|
{
|
||||||
|
@@ -1872,7 +1893,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
|
||||||
|
struct rq *rq = cpu_rq(cpu);
|
||||||
|
struct pin_cookie cookie;
|
||||||
|
|
||||||
|
-#if defined(CONFIG_SMP)
|
||||||
|
+#if defined(CONFIG_SMP) && !defined(CONFIG_BLD)
|
||||||
|
if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
|
||||||
|
sched_clock_cpu(cpu); /* sync clocks x-cpu */
|
||||||
|
ttwu_queue_remote(p, cpu, wake_flags);
|
||||||
|
@@ -2394,7 +2415,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
||||||
|
* Silence PROVE_RCU.
|
||||||
|
*/
|
||||||
|
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||||
|
- set_task_cpu(p, cpu);
|
||||||
|
+ __set_task_cpu(p, cpu);
|
||||||
|
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||||
|
|
||||||
|
#ifdef CONFIG_SCHED_INFO
|
||||||
|
@@ -2941,7 +2962,14 @@ void sched_exec(void)
|
||||||
|
int dest_cpu;
|
||||||
|
|
||||||
|
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
|
||||||
|
+#else
|
||||||
|
+ if (dl_task(p))
|
||||||
|
+ dest_cpu = task_cpu(p);
|
||||||
|
+ else
|
||||||
|
+ dest_cpu = bld_get_cpu(p, SD_BALANCE_EXEC, 0);
|
||||||
|
+#endif
|
||||||
|
if (dest_cpu == smp_processor_id())
|
||||||
|
goto unlock;
|
||||||
|
|
||||||
|
@@ -3030,8 +3058,10 @@ void scheduler_tick(void)
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
rq->idle_balance = idle_cpu(cpu);
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
trigger_load_balance(rq);
|
||||||
|
#endif
|
||||||
|
+#endif
|
||||||
|
rq_last_tick_reset(rq);
|
||||||
|
}
|
||||||
|
|
||||||
|
@@ -7262,7 +7292,9 @@ int sched_cpu_dying(unsigned int cpu)
|
||||||
|
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||||
|
calc_load_migrate(rq);
|
||||||
|
update_max_interval();
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
nohz_balance_exit_idle(cpu);
|
||||||
|
+#endif
|
||||||
|
hrtick_clear(rq);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
@@ -7468,6 +7500,15 @@ void __init sched_init(void)
|
||||||
|
#endif /* CONFIG_SMP */
|
||||||
|
init_rq_hrtick(rq);
|
||||||
|
atomic_set(&rq->nr_iowait, 0);
|
||||||
|
+#ifdef CONFIG_BLD
|
||||||
|
+ INIT_LIST_HEAD(&rq->cfs.bld_cfs_list);
|
||||||
|
+ list_add_tail(&rq->cfs.bld_cfs_list, &cfs_rq_head);
|
||||||
|
+ rq->cfs.pos = 0;
|
||||||
|
+
|
||||||
|
+ INIT_LIST_HEAD(&rq->rt.bld_rt_list);
|
||||||
|
+ list_add_tail(&rq->rt.bld_rt_list, &rt_rq_head);
|
||||||
|
+ rq->rt.lowbit = INT_MAX;
|
||||||
|
+#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
set_load_weight(&init_task);
|
||||||
|
@@ -7510,6 +7551,9 @@ void __init sched_init(void)
|
||||||
|
init_schedstats();
|
||||||
|
|
||||||
|
scheduler_running = 1;
|
||||||
|
+#ifdef CONFIG_BLD
|
||||||
|
+ printk(KERN_INFO "BLD: An Alternate CPU load distributor activated.\n");
|
||||||
|
+#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
||||||
|
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
||||||
|
index c8c5d2d..5b694b3 100644
|
||||||
|
--- a/kernel/sched/fair.c
|
||||||
|
+++ b/kernel/sched/fair.c
|
||||||
|
@@ -4880,6 +4880,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
/*
|
||||||
|
* effective_load() calculates the load change as seen from the root_task_group
|
||||||
|
@@ -5411,6 +5412,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
||||||
|
|
||||||
|
return new_cpu;
|
||||||
|
}
|
||||||
|
+#endif /* CONFIG_BLD */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Called immediately before a task is migrated to a new cpu; task_cpu(p) and
|
||||||
|
@@ -5741,6 +5743,7 @@ idle:
|
||||||
|
* further scheduler activity on it and we're being very careful to
|
||||||
|
* re-start the picking loop.
|
||||||
|
*/
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
lockdep_unpin_lock(&rq->lock, cookie);
|
||||||
|
new_tasks = idle_balance(rq);
|
||||||
|
lockdep_repin_lock(&rq->lock, cookie);
|
||||||
|
@@ -5754,7 +5757,7 @@ idle:
|
||||||
|
|
||||||
|
if (new_tasks > 0)
|
||||||
|
goto again;
|
||||||
|
-
|
||||||
|
+#endif /* CONFIG_BLD */
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
@@ -6415,8 +6418,9 @@ static unsigned long task_h_load(struct task_struct *p)
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
-/********** Helpers for find_busiest_group ************************/
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
|
||||||
|
+/********** Helpers for find_busiest_group ************************/
|
||||||
|
enum group_type {
|
||||||
|
group_other = 0,
|
||||||
|
group_imbalanced,
|
||||||
|
@@ -6507,6 +6511,7 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
|
||||||
|
|
||||||
|
return load_idx;
|
||||||
|
}
|
||||||
|
+#endif /* CONFIG_BLD */
|
||||||
|
|
||||||
|
static unsigned long scale_rt_capacity(int cpu)
|
||||||
|
{
|
||||||
|
@@ -6615,6 +6620,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
|
||||||
|
sdg->sgc->capacity = capacity;
|
||||||
|
}
|
||||||
|
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
/*
|
||||||
|
* Check whether the capacity of the rq has been noticeably reduced by side
|
||||||
|
* activity. The imbalance_pct is used for the threshold.
|
||||||
|
@@ -7848,6 +7854,7 @@ static inline int on_null_domain(struct rq *rq)
|
||||||
|
{
|
||||||
|
return unlikely(!rcu_dereference_sched(rq->sd));
|
||||||
|
}
|
||||||
|
+#endif /* CONFIG_BLD */
|
||||||
|
|
||||||
|
#ifdef CONFIG_NO_HZ_COMMON
|
||||||
|
/*
|
||||||
|
@@ -7856,12 +7863,39 @@ static inline int on_null_domain(struct rq *rq)
|
||||||
|
* needed, they will kick the idle load balancer, which then does idle
|
||||||
|
* load balancing for all the idle CPUs.
|
||||||
|
*/
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
static struct {
|
||||||
|
cpumask_var_t idle_cpus_mask;
|
||||||
|
atomic_t nr_cpus;
|
||||||
|
unsigned long next_balance; /* in jiffy units */
|
||||||
|
} nohz ____cacheline_aligned;
|
||||||
|
|
||||||
|
+void nohz_balance_exit_idle(unsigned int cpu)
|
||||||
|
+{
|
||||||
|
+ if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
|
||||||
|
+ /*
|
||||||
|
+ * Completely isolated CPUs don't ever set, so we must test.
|
||||||
|
+ */
|
||||||
|
+ if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
|
||||||
|
+ cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
|
||||||
|
+ atomic_dec(&nohz.nr_cpus);
|
||||||
|
+ }
|
||||||
|
+ clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
||||||
|
+ }
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
+static int sched_ilb_notifier(struct notifier_block *nfb,
|
||||||
|
+ unsigned long action, void *hcpu)
|
||||||
|
+{
|
||||||
|
+ switch (action & ~CPU_TASKS_FROZEN) {
|
||||||
|
+ case CPU_DYING:
|
||||||
|
+ nohz_balance_exit_idle(smp_processor_id());
|
||||||
|
+ return NOTIFY_OK;
|
||||||
|
+ default:
|
||||||
|
+ return NOTIFY_DONE;
|
||||||
|
+ }
|
||||||
|
+}
|
||||||
|
+
|
||||||
|
static inline int find_new_ilb(void)
|
||||||
|
{
|
||||||
|
int ilb = cpumask_first(nohz.idle_cpus_mask);
|
||||||
|
@@ -7900,20 +7934,6 @@ static void nohz_balancer_kick(void)
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
-void nohz_balance_exit_idle(unsigned int cpu)
|
||||||
|
-{
|
||||||
|
- if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
|
||||||
|
- /*
|
||||||
|
- * Completely isolated CPUs don't ever set, so we must test.
|
||||||
|
- */
|
||||||
|
- if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
|
||||||
|
- cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
|
||||||
|
- atomic_dec(&nohz.nr_cpus);
|
||||||
|
- }
|
||||||
|
- clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
||||||
|
- }
|
||||||
|
-}
|
||||||
|
-
|
||||||
|
static inline void set_cpu_sd_state_busy(void)
|
||||||
|
{
|
||||||
|
struct sched_domain *sd;
|
||||||
|
@@ -7930,6 +7950,8 @@ static inline void set_cpu_sd_state_busy(void)
|
||||||
|
unlock:
|
||||||
|
rcu_read_unlock();
|
||||||
|
}
|
||||||
|
+#endif /* CONFIG_BLD */
|
||||||
|
+#endif /* NO_HZ_COMMON */
|
||||||
|
|
||||||
|
void set_cpu_sd_state_idle(void)
|
||||||
|
{
|
||||||
|
@@ -7954,6 +7976,7 @@ unlock:
|
||||||
|
*/
|
||||||
|
void nohz_balance_enter_idle(int cpu)
|
||||||
|
{
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
/*
|
||||||
|
* If this cpu is going down, then nothing needs to be done.
|
||||||
|
*/
|
||||||
|
@@ -7972,10 +7995,8 @@ void nohz_balance_enter_idle(int cpu)
|
||||||
|
cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
|
||||||
|
atomic_inc(&nohz.nr_cpus);
|
||||||
|
set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
||||||
|
-}
|
||||||
|
#endif
|
||||||
|
-
|
||||||
|
-static DEFINE_SPINLOCK(balancing);
|
||||||
|
+}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Scale the max load_balance interval with the number of CPUs in the system.
|
||||||
|
@@ -7986,6 +8007,9 @@ void update_max_interval(void)
|
||||||
|
max_load_balance_interval = HZ*num_online_cpus()/10;
|
||||||
|
}
|
||||||
|
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
+static DEFINE_SPINLOCK(balancing);
|
||||||
|
+
|
||||||
|
/*
|
||||||
|
* It checks each scheduling domain to see if it is due to be balanced,
|
||||||
|
* and initiates a balancing operation if so.
|
||||||
|
@@ -8273,6 +8297,7 @@ void trigger_load_balance(struct rq *rq)
|
||||||
|
nohz_balancer_kick();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
+#endif /* CONFIG_BLD */
|
||||||
|
|
||||||
|
static void rq_online_fair(struct rq *rq)
|
||||||
|
{
|
||||||
|
@@ -8288,7 +8313,6 @@ static void rq_offline_fair(struct rq *rq)
|
||||||
|
/* Ensure any throttled groups are reachable by pick_next_task */
|
||||||
|
unthrottle_offline_cfs_rqs(rq);
|
||||||
|
}
|
||||||
|
-
|
||||||
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
|
/*
|
||||||
|
@@ -8716,7 +8740,9 @@ const struct sched_class fair_sched_class = {
|
||||||
|
.put_prev_task = put_prev_task_fair,
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
.select_task_rq = select_task_rq_fair,
|
||||||
|
+#endif
|
||||||
|
.migrate_task_rq = migrate_task_rq_fair,
|
||||||
|
|
||||||
|
.rq_online = rq_online_fair,
|
||||||
|
@@ -8777,6 +8803,7 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
|
||||||
|
|
||||||
|
__init void init_sched_fair_class(void)
|
||||||
|
{
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
|
||||||
|
|
||||||
|
@@ -8785,5 +8812,5 @@ __init void init_sched_fair_class(void)
|
||||||
|
zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
|
||||||
|
#endif
|
||||||
|
#endif /* SMP */
|
||||||
|
-
|
||||||
|
+#endif /* BLD */
|
||||||
|
}
|
||||||
|
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
|
||||||
|
index d5690b7..6f3589e 100644
|
||||||
|
--- a/kernel/sched/rt.c
|
||||||
|
+++ b/kernel/sched/rt.c
|
||||||
|
@@ -1375,6 +1375,7 @@ static void yield_task_rt(struct rq *rq)
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
static int find_lowest_rq(struct task_struct *task);
|
||||||
|
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
static int
|
||||||
|
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
||||||
|
{
|
||||||
|
@@ -1430,6 +1431,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
||||||
|
out:
|
||||||
|
return cpu;
|
||||||
|
}
|
||||||
|
+#endif /* CONFIG_BLD */
|
||||||
|
|
||||||
|
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
|
||||||
|
{
|
||||||
|
@@ -2335,7 +2337,9 @@ const struct sched_class rt_sched_class = {
|
||||||
|
.put_prev_task = put_prev_task_rt,
|
||||||
|
|
||||||
|
#ifdef CONFIG_SMP
|
||||||
|
+#ifndef CONFIG_BLD
|
||||||
|
.select_task_rq = select_task_rq_rt,
|
||||||
|
+#endif
|
||||||
|
|
||||||
|
.set_cpus_allowed = set_cpus_allowed_common,
|
||||||
|
.rq_online = rq_online_rt,
|
||||||
|
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
|
||||||
|
index 898c0d2..720d524 100644
|
||||||
|
--- a/kernel/sched/sched.h
|
||||||
|
+++ b/kernel/sched/sched.h
|
||||||
|
@@ -415,9 +415,8 @@ struct cfs_rq {
|
||||||
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||||
|
#endif /* CONFIG_SMP */
|
||||||
|
|
||||||
|
-#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
|
||||||
|
-
|
||||||
|
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||||
|
/*
|
||||||
|
* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
|
||||||
|
* a hierarchy). Non-leaf lrqs hold other higher schedulable entities
|
||||||
|
@@ -441,6 +440,11 @@ struct cfs_rq {
|
||||||
|
struct list_head throttled_list;
|
||||||
|
#endif /* CONFIG_CFS_BANDWIDTH */
|
||||||
|
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||||
|
+
|
||||||
|
+#ifdef CONFIG_BLD
|
||||||
|
+ struct list_head bld_cfs_list;
|
||||||
|
+ char pos;
|
||||||
|
+#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
static inline int rt_bandwidth_enabled(void)
|
||||||
|
@@ -486,12 +490,16 @@ struct rt_rq {
|
||||||
|
/* Nests inside the rq lock: */
|
||||||
|
raw_spinlock_t rt_runtime_lock;
|
||||||
|
|
||||||
|
+ struct rq *rq;
|
||||||
|
#ifdef CONFIG_RT_GROUP_SCHED
|
||||||
|
unsigned long rt_nr_boosted;
|
||||||
|
|
||||||
|
- struct rq *rq;
|
||||||
|
struct task_group *tg;
|
||||||
|
#endif
|
||||||
|
+#ifdef CONFIG_BLD
|
||||||
|
+ struct list_head bld_rt_list;
|
||||||
|
+ int lowbit;
|
||||||
|
+#endif
|
||||||
|
};
|
||||||
|
|
||||||
|
/* Deadline class' related fields in a runqueue */
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue