1
0
Fork 0

macbook pro준비

This commit is contained in:
Sangbum Kim 2016-12-06 14:23:25 +09:00
parent 876c08a90f
commit 5510415b51
12 changed files with 3300 additions and 1327 deletions

1
.gitignore vendored
View File

@ -45,3 +45,4 @@
# Linux trash folder which might appear on any partition or disk # Linux trash folder which might appear on any partition or disk
.Trash-* .Trash-*
/linux-spica/

View File

@ -7,9 +7,9 @@ pkgname=$pkgbase
# comment the following line to build a single package containing the kernel and the headers # comment the following line to build a single package containing the kernel and the headers
(( 1 )) && pkgname=("$pkgbase" "$pkgbase-headers" "$pkgbase-docs") (( 1 )) && pkgname=("$pkgbase" "$pkgbase-headers" "$pkgbase-docs")
pkgdesc="The Linux Kernel and modules from Linus' git tree" pkgdesc="The Linux Kernel and modules from Linus' git tree"
depends=('coreutils' 'linux-firmware-git' 'mkinitcpio') depends=('coreutils' 'linux-firmware' 'mkinitcpio')
pkgver=4.8.rc8 pkgver=4.9.rc7
pkgrel=1 pkgrel=1
url="http://www.kernel.org/" url="http://www.kernel.org/"
arch=(i686 x86_64) arch=(i686 x86_64)
@ -24,7 +24,7 @@ sha256sums=('SKIP')
# set _gitrev to a git revision (man gitrevisions) like a tag, a commit sha1 # set _gitrev to a git revision (man gitrevisions) like a tag, a commit sha1
# hash or a branch name to build from this tree instead of master # hash or a branch name to build from this tree instead of master
_gitrev="v4.7.5" _gitrev="v4.8.12"
#################################################################### ####################################################################
# KERNEL CONFIG FILES # KERNEL CONFIG FILES

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
pkgname=linux-spica pkgname=linux-spica
kernver=4.7.5-1spica-dirty kernver=4.8.12-1spica-dirty
#bootdevice="BOOT_IMAGE=/boot/vmlinuz-$pkgname root=UUID=d670564f-2cb3-4981-9d51-6ed9c1327d47" #bootdevice="BOOT_IMAGE=/boot/vmlinuz-$pkgname root=UUID=d670564f-2cb3-4981-9d51-6ed9c1327d47"
#option="rw quiet clocksource=hpet initrd=EFI/spi-ca/initrd intel_iommu=on pci-stub.ids=1002:683f,1002:aab0 vfio_iommu_type1.allow_unsafe_interrupts=1,kvm.ignore_msrs=1" #option="rw quiet clocksource=hpet initrd=EFI/spi-ca/initrd intel_iommu=on pci-stub.ids=1002:683f,1002:aab0 vfio_iommu_type1.allow_unsafe_interrupts=1,kvm.ignore_msrs=1"
#option="rw quiet clocksource=hpet initrd=EFI/spi-ca/initrd quiet intremap=no_x2apic_optout zswap.enabled=1 zswap.max_pool_percent=25 zswap.compressor=lz4" #option="rw quiet clocksource=hpet initrd=EFI/spi-ca/initrd quiet intremap=no_x2apic_optout zswap.enabled=1 zswap.max_pool_percent=25 zswap.compressor=lz4"
@ -12,19 +12,19 @@ post_install () {
echo "> Generating initramfs, using mkinitcpio. Please wait..." echo "> Generating initramfs, using mkinitcpio. Please wait..."
echo ">" echo ">"
mkinitcpio -p $pkgname mkinitcpio -p $pkgname
echo "> Modifing efibootmgr..." # echo "> Modifing efibootmgr..."
efibootmgr|awk 'match($0,/^Boot([0-9a-fA-F]{4})\* spi-ca_v(.+)$/,m){printf "efibootmgr -b %s -B;echo \">> remove entry : %s\";",m[1],m[2]}'|sh # efibootmgr|awk 'match($0,/^Boot([0-9a-fA-F]{4})\* spi-ca_v(.+)$/,m){printf "efibootmgr -b %s -B;echo \">> remove entry : %s\";",m[1],m[2]}'|sh
echo "> Copy efistub from boot" # echo "> Copy efistub from boot"
cp -fv "boot/vmlinuz-$pkgname" "boot/efi/EFI/spi-ca/kernel.efi" # cp -fv "boot/vmlinuz-$pkgname" "boot/efi/EFI/spi-ca/kernel.efi"
cp -fv "boot/initramfs-$pkgname.img" "boot/efi/EFI/spi-ca/initrd" # cp -fv "boot/initramfs-$pkgname.img" "boot/efi/EFI/spi-ca/initrd"
echo "> Registering efistub " # echo "> Registering efistub "
#echo 'efibootmgr -c -g -d /dev/sda -p 1 -L "spi-ca_v$kernver" -l "\EFI\spi-ca\kernel.efi" #-u "$bootdevice $option"' #echo 'efibootmgr -c -g -d /dev/sda -p 1 -L "spi-ca_v$kernver" -l "\EFI\spi-ca\kernel.efi" #-u "$bootdevice $option"'
efibootmgr -c -g -d /dev/sde -p 1 -L "spi-ca_v$kernver" -l "\EFI\spi-ca\kernel" # -u "$bootdevice $option" # efibootmgr -c -g -d /dev/sde -p 1 -L "spi-ca_v$kernver" -l "\EFI\spi-ca\kernel" # -u "$bootdevice $option"
echo "> Reordering Bootorder..." # echo "> Reordering Bootorder..."
newentry=`efibootmgr|awk 'match($0,/^Boot([0-9a-fA-F]{4})\* spi-ca_v(.+)$/,m){print m[1]}'` # newentry=`efibootmgr|awk 'match($0,/^Boot([0-9a-fA-F]{4})\* spi-ca_v(.+)$/,m){print m[1]}'`
prebootorder=`efibootmgr |grep BootOrder |cut -d : -f 2 |tr -d ' '` # prebootorder=`efibootmgr |grep BootOrder |cut -d : -f 2 |tr -d ' '`
efibootmgr -O # efibootmgr -O
efibootmgr -o ${newentry},${prebootorder} # efibootmgr -o ${newentry},${prebootorder}
echo "> OK!" echo "> OK!"
} }

View File

@ -1,7 +1,7 @@
From 22ee35ec82fa543b65c1b6d516a086a21f723846 Mon Sep 17 00:00:00 2001 From f2ebe596e7d72e96e0fb2be87be90f0b96e6f1b3 Mon Sep 17 00:00:00 2001
From: Paolo Valente <paolo.valente@unimore.it> From: Paolo Valente <paolo.valente@unimore.it>
Date: Tue, 7 Apr 2015 13:39:12 +0200 Date: Tue, 7 Apr 2015 13:39:12 +0200
Subject: [PATCH 1/4] block: cgroups, kconfig, build bits for BFQ-v7r11-4.7.0 Subject: [PATCH 1/4] block: cgroups, kconfig, build bits for BFQ-v7r11-4.8.0
Update Kconfig.iosched and do the related Makefile changes to include Update Kconfig.iosched and do the related Makefile changes to include
kernel configuration options for BFQ. Also increase the number of kernel configuration options for BFQ. Also increase the number of
@ -86,7 +86,7 @@ index 9eda232..4a36683 100644
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3d9cf32..8d862a0 100644 index e79055c..931ff1e 100644
--- a/include/linux/blkdev.h --- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h +++ b/include/linux/blkdev.h
@@ -45,7 +45,7 @@ struct pr_ops; @@ -45,7 +45,7 @@ struct pr_ops;
@ -96,8 +96,8 @@ index 3d9cf32..8d862a0 100644
-#define BLKCG_MAX_POLS 2 -#define BLKCG_MAX_POLS 2
+#define BLKCG_MAX_POLS 3 +#define BLKCG_MAX_POLS 3
struct request;
typedef void (rq_end_io_fn)(struct request *, int); typedef void (rq_end_io_fn)(struct request *, int);
--
1.9.1 --
2.7.4 (Apple Git-66)

View File

@ -1,7 +1,8 @@
From 2aae32be2a18a7d0da104ae42c08cb9bce9d9c7c Mon Sep 17 00:00:00 2001 From d9af6fcc4167cbb8433b10bbf3663c8297487f52 Mon Sep 17 00:00:00 2001
From: Paolo Valente <paolo.valente@unimore.it> From: Paolo Valente <paolo.valente@unimore.it>
Date: Thu, 9 May 2013 19:10:02 +0200 Date: Thu, 9 May 2013 19:10:02 +0200
Subject: [PATCH 2/4] block: introduce the BFQ-v7r11 I/O sched for 4.7.0 Subject: [PATCH 2/4] block: introduce the BFQ-v7r11 I/O sched, to be ported to
4.8.0
The general structure is borrowed from CFQ, as much of the code for The general structure is borrowed from CFQ, as much of the code for
handling I/O contexts. Over time, several useful features have been handling I/O contexts. Over time, several useful features have been
@ -56,12 +57,12 @@ Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
Signed-off-by: Arianna Avanzini <avanzini@google.com> Signed-off-by: Arianna Avanzini <avanzini@google.com>
--- ---
block/Kconfig.iosched | 6 +- block/Kconfig.iosched | 6 +-
block/bfq-cgroup.c | 1182 ++++++++++++++++ block/bfq-cgroup.c | 1186 ++++++++++++++++
block/bfq-ioc.c | 36 + block/bfq-ioc.c | 36 +
block/bfq-iosched.c | 3754 +++++++++++++++++++++++++++++++++++++++++++++++++ block/bfq-iosched.c | 3763 +++++++++++++++++++++++++++++++++++++++++++++++++
block/bfq-sched.c | 1200 ++++++++++++++++ block/bfq-sched.c | 1199 ++++++++++++++++
block/bfq.h | 801 +++++++++++ block/bfq.h | 801 +++++++++++
6 files changed, 6975 insertions(+), 4 deletions(-) 6 files changed, 6987 insertions(+), 4 deletions(-)
create mode 100644 block/bfq-cgroup.c create mode 100644 block/bfq-cgroup.c
create mode 100644 block/bfq-ioc.c create mode 100644 block/bfq-ioc.c
create mode 100644 block/bfq-iosched.c create mode 100644 block/bfq-iosched.c
@ -91,10 +92,10 @@ index 0ee5f0f..f78cd1a 100644
prompt "Default I/O scheduler" prompt "Default I/O scheduler"
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
new file mode 100644 new file mode 100644
index 0000000..8610cd6 index 0000000..8b08a57
--- /dev/null --- /dev/null
+++ b/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c
@@ -0,0 +1,1182 @@ @@ -0,0 +1,1186 @@
+/* +/*
+ * BFQ: CGROUPS support. + * BFQ: CGROUPS support.
+ * + *
@ -259,7 +260,9 @@ index 0000000..8610cd6
+static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) +static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
+{ +{
+ struct blkg_policy_data *pd = blkg_to_pd(blkg, &blkcg_policy_bfq); + struct blkg_policy_data *pd = blkg_to_pd(blkg, &blkcg_policy_bfq);
+
+ BUG_ON(!pd); + BUG_ON(!pd);
+
+ return pd_to_bfqg(pd); + return pd_to_bfqg(pd);
+} +}
+ +
@ -379,7 +382,8 @@ index 0000000..8610cd6
+ blkg_stat_add_aux(&from->time, &from->time); + blkg_stat_add_aux(&from->time, &from->time);
+ blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time); + blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time);
+ blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); + blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
+ blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples); + blkg_stat_add_aux(&to->avg_queue_size_samples,
+ &from->avg_queue_size_samples);
+ blkg_stat_add_aux(&to->dequeue, &from->dequeue); + blkg_stat_add_aux(&to->dequeue, &from->dequeue);
+ blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time); + blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
+ blkg_stat_add_aux(&to->idle_time, &from->idle_time); + blkg_stat_add_aux(&to->idle_time, &from->idle_time);
@ -471,9 +475,9 @@ index 0000000..8610cd6
+} +}
+ +
+static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd) +static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
+ { +{
+ return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL; + return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
+ } +}
+ +
+static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) +static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
+{ +{
@ -562,8 +566,8 @@ index 0000000..8610cd6
+} +}
+ +
+/* to be used by recursive prfill, sums live and dead rwstats recursively */ +/* to be used by recursive prfill, sums live and dead rwstats recursively */
+static struct blkg_rwstat bfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd, +static struct blkg_rwstat
+ int off) +bfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd, int off)
+{ +{
+ struct blkg_rwstat a, b; + struct blkg_rwstat a, b;
+ +
@ -776,7 +780,6 @@ index 0000000..8610cd6
+ +
+ BUG_ON(!bfqq); + BUG_ON(!bfqq);
+ bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group); + bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group);
+ return;
+} +}
+ +
+/** +/**
@ -804,8 +807,6 @@ index 0000000..8610cd6
+ if (bfqg->sched_data.in_service_entity) + if (bfqg->sched_data.in_service_entity)
+ bfq_reparent_leaf_entity(bfqd, + bfq_reparent_leaf_entity(bfqd,
+ bfqg->sched_data.in_service_entity); + bfqg->sched_data.in_service_entity);
+
+ return;
+} +}
+ +
+/** +/**
@ -930,6 +931,7 @@ index 0000000..8610cd6
+ bfqgd->weight = (unsigned short)val; + bfqgd->weight = (unsigned short)val;
+ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { + hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+ struct bfq_group *bfqg = blkg_to_bfqg(blkg); + struct bfq_group *bfqg = blkg_to_bfqg(blkg);
+
+ if (!bfqg) + if (!bfqg)
+ continue; + continue;
+ /* + /*
@ -1043,7 +1045,8 @@ index 0000000..8610cd6
+ return 0; + return 0;
+} +}
+ +
+static struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) +static struct bfq_group *
+bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
+{ +{
+ int ret; + int ret;
+ +
@ -1051,22 +1054,22 @@ index 0000000..8610cd6
+ if (ret) + if (ret)
+ return NULL; + return NULL;
+ +
+ return blkg_to_bfqg(bfqd->queue->root_blkg); + return blkg_to_bfqg(bfqd->queue->root_blkg);
+} +}
+ +
+static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) +static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
+{ +{
+ struct bfq_group_data *bgd; + struct bfq_group_data *bgd;
+ +
+ bgd = kzalloc(sizeof(*bgd), GFP_KERNEL); + bgd = kzalloc(sizeof(*bgd), GFP_KERNEL);
+ if (!bgd) + if (!bgd)
+ return NULL; + return NULL;
+ return &bgd->pd; + return &bgd->pd;
+} +}
+ +
+static void bfq_cpd_free(struct blkcg_policy_data *cpd) +static void bfq_cpd_free(struct blkcg_policy_data *cpd)
+{ +{
+ kfree(cpd_to_bfqgd(cpd)); + kfree(cpd_to_bfqgd(cpd));
+} +}
+ +
+static struct cftype bfqio_files_dfl[] = { +static struct cftype bfqio_files_dfl[] = {
@ -1201,20 +1204,19 @@ index 0000000..8610cd6
+}; +};
+ +
+static struct blkcg_policy blkcg_policy_bfq = { +static struct blkcg_policy blkcg_policy_bfq = {
+ .dfl_cftypes = bfqio_files_dfl, + .dfl_cftypes = bfqio_files_dfl,
+ .legacy_cftypes = bfqio_files, + .legacy_cftypes = bfqio_files,
+ +
+ .pd_alloc_fn = bfq_pd_alloc, + .pd_alloc_fn = bfq_pd_alloc,
+ .pd_init_fn = bfq_pd_init, + .pd_init_fn = bfq_pd_init,
+ .pd_offline_fn = bfq_pd_offline, + .pd_offline_fn = bfq_pd_offline,
+ .pd_free_fn = bfq_pd_free, + .pd_free_fn = bfq_pd_free,
+ .pd_reset_stats_fn = bfq_pd_reset_stats, + .pd_reset_stats_fn = bfq_pd_reset_stats,
+
+ .cpd_alloc_fn = bfq_cpd_alloc,
+ .cpd_init_fn = bfq_cpd_init,
+ .cpd_bind_fn = bfq_cpd_init,
+ .cpd_free_fn = bfq_cpd_free,
+ +
+ .cpd_alloc_fn = bfq_cpd_alloc,
+ .cpd_init_fn = bfq_cpd_init,
+ .cpd_bind_fn = bfq_cpd_init,
+ .cpd_free_fn = bfq_cpd_free,
+}; +};
+ +
+#else +#else
@ -1223,6 +1225,7 @@ index 0000000..8610cd6
+ struct bfq_group *bfqg) + struct bfq_group *bfqg)
+{ +{
+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
+
+ entity->weight = entity->new_weight; + entity->weight = entity->new_weight;
+ entity->orig_weight = entity->new_weight; + entity->orig_weight = entity->new_weight;
+ if (bfqq) { + if (bfqq) {
@ -1236,6 +1239,7 @@ index 0000000..8610cd6
+bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) +bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
+{ +{
+ struct bfq_data *bfqd = bic_to_bfqd(bic); + struct bfq_data *bfqd = bic_to_bfqd(bic);
+
+ return bfqd->root_group; + return bfqd->root_group;
+} +}
+ +
@ -1257,12 +1261,13 @@ index 0000000..8610cd6
+} +}
+ +
+static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, +static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
+ struct blkcg *blkcg) + struct blkcg *blkcg)
+{ +{
+ return bfqd->root_group; + return bfqd->root_group;
+} +}
+ +
+static struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) +static struct bfq_group *
+bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
+{ +{
+ struct bfq_group *bfqg; + struct bfq_group *bfqg;
+ int i; + int i;
@ -1321,10 +1326,10 @@ index 0000000..fb7bb8f
+} +}
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
new file mode 100644 new file mode 100644
index 0000000..f9787a6 index 0000000..85e2169
--- /dev/null --- /dev/null
+++ b/block/bfq-iosched.c +++ b/block/bfq-iosched.c
@@ -0,0 +1,3754 @@ @@ -0,0 +1,3763 @@
+/* +/*
+ * Budget Fair Queueing (BFQ) disk scheduler. + * Budget Fair Queueing (BFQ) disk scheduler.
+ * + *
@ -1542,7 +1547,7 @@ index 0000000..f9787a6
+ unsigned long back_max; + unsigned long back_max;
+#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */ +#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
+#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */ +#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
+ unsigned wrap = 0; /* bit mask: requests behind the disk head? */ + unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
+ +
+ if (!rq1 || rq1 == rq2) + if (!rq1 || rq1 == rq2)
+ return rq2; + return rq2;
@ -1597,12 +1602,11 @@ index 0000000..f9787a6
+ return rq1; + return rq1;
+ else if (d2 < d1) + else if (d2 < d1)
+ return rq2; + return rq2;
+ else { +
+ if (s1 >= s2) + if (s1 >= s2)
+ return rq1; + return rq1;
+ else + else
+ return rq2; + return rq2;
+ }
+ +
+ case BFQ_RQ2_WRAP: + case BFQ_RQ2_WRAP:
+ return rq1; + return rq1;
@ -1889,7 +1893,7 @@ index 0000000..f9787a6
+ */ + */
+ hlist_for_each_entry(bfqq_item, &bfqd->burst_list, + hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
+ burst_list_node) + burst_list_node)
+ bfq_mark_bfqq_in_large_burst(bfqq_item); + bfq_mark_bfqq_in_large_burst(bfqq_item);
+ bfq_mark_bfqq_in_large_burst(bfqq); + bfq_mark_bfqq_in_large_burst(bfqq);
+ +
+ /* + /*
@ -2288,7 +2292,7 @@ index 0000000..f9787a6
+ bfqd->rq_in_driver++; + bfqd->rq_in_driver++;
+ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); + bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
+ bfq_log(bfqd, "activate_request: new bfqd->last_position %llu", + bfq_log(bfqd, "activate_request: new bfqd->last_position %llu",
+ (long long unsigned)bfqd->last_position); + (unsigned long long) bfqd->last_position);
+} +}
+ +
+static void bfq_deactivate_request(struct request_queue *q, struct request *rq) +static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
@ -2595,6 +2599,7 @@ index 0000000..f9787a6
+{ +{
+ struct bfq_queue *bfqq = bfqd->in_service_queue; + struct bfq_queue *bfqq = bfqd->in_service_queue;
+ unsigned int timeout_coeff; + unsigned int timeout_coeff;
+
+ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) + if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
+ timeout_coeff = 1; + timeout_coeff = 1;
+ else + else
@ -2667,6 +2672,7 @@ index 0000000..f9787a6
+static int bfq_bfqq_budget_left(struct bfq_queue *bfqq) +static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
+{ +{
+ struct bfq_entity *entity = &bfqq->entity; + struct bfq_entity *entity = &bfqq->entity;
+
+ return entity->budget - entity->service; + return entity->budget - entity->service;
+} +}
+ +
@ -2906,6 +2912,7 @@ index 0000000..f9787a6
+ if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES && + if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES &&
+ update) { + update) {
+ int dev_type = blk_queue_nonrot(bfqd->queue); + int dev_type = blk_queue_nonrot(bfqd->queue);
+
+ if (bfqd->bfq_user_max_budget == 0) { + if (bfqd->bfq_user_max_budget == 0) {
+ bfqd->bfq_max_budget = + bfqd->bfq_max_budget =
+ bfq_calc_max_budget(bfqd->peak_rate, + bfq_calc_max_budget(bfqd->peak_rate,
@ -3065,6 +3072,7 @@ index 0000000..f9787a6
+ enum bfqq_expiration reason) + enum bfqq_expiration reason)
+{ +{
+ bool slow; + bool slow;
+
+ BUG_ON(bfqq != bfqd->in_service_queue); + BUG_ON(bfqq != bfqd->in_service_queue);
+ +
+ /* + /*
@ -3098,7 +3106,7 @@ index 0000000..f9787a6
+ } + }
+ +
+ if (reason == BFQ_BFQQ_TOO_IDLE && + if (reason == BFQ_BFQQ_TOO_IDLE &&
+ bfqq->entity.service <= 2 * bfqq->entity.budget / 10 ) + bfqq->entity.service <= 2 * bfqq->entity.budget / 10)
+ bfq_clear_bfqq_IO_bound(bfqq); + bfq_clear_bfqq_IO_bound(bfqq);
+ +
+ if (bfqd->low_latency && bfqq->wr_coeff == 1) + if (bfqd->low_latency && bfqq->wr_coeff == 1)
@ -3244,7 +3252,7 @@ index 0000000..f9787a6
+ */ + */
+ idling_boosts_thr = !bfqd->hw_tag || + idling_boosts_thr = !bfqd->hw_tag ||
+ (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) && + (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) &&
+ bfq_bfqq_idle_window(bfqq)) ; + bfq_bfqq_idle_window(bfqq));
+ +
+ /* + /*
+ * The value of the next variable, + * The value of the next variable,
@ -3356,7 +3364,7 @@ index 0000000..f9787a6
+ * (i) each of these processes must get the same throughput as + * (i) each of these processes must get the same throughput as
+ * the others; + * the others;
+ * (ii) all these processes have the same I/O pattern + * (ii) all these processes have the same I/O pattern
+ (either sequential or random). + * (either sequential or random).
+ * In fact, in such a scenario, the drive will tend to treat + * In fact, in such a scenario, the drive will tend to treat
+ * the requests of each of these processes in about the same + * the requests of each of these processes in about the same
+ * way as the requests of the others, and thus to provide + * way as the requests of the others, and thus to provide
@ -3553,6 +3561,7 @@ index 0000000..f9787a6
+static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) +static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+{ +{
+ struct bfq_entity *entity = &bfqq->entity; + struct bfq_entity *entity = &bfqq->entity;
+
+ if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ + if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
+ bfq_log_bfqq(bfqd, bfqq, + bfq_log_bfqq(bfqd, bfqq,
+ "raising period dur %u/%u msec, old coeff %u, w %d(%d)", + "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
@ -3643,7 +3652,7 @@ index 0000000..f9787a6
+ bfq_log_bfqq(bfqd, bfqq, + bfq_log_bfqq(bfqd, bfqq,
+ "dispatched %u sec req (%llu), budg left %d", + "dispatched %u sec req (%llu), budg left %d",
+ blk_rq_sectors(rq), + blk_rq_sectors(rq),
+ (long long unsigned)blk_rq_pos(rq), + (unsigned long long) blk_rq_pos(rq),
+ bfq_bfqq_budget_left(bfqq)); + bfq_bfqq_budget_left(bfqq));
+ +
+ dispatched++; + dispatched++;
@ -3841,7 +3850,8 @@ index 0000000..f9787a6
+ * Update the entity prio values; note that the new values will not + * Update the entity prio values; note that the new values will not
+ * be used until the next (re)activation. + * be used until the next (re)activation.
+ */ + */
+static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) +static void
+bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
+{ +{
+ struct task_struct *tsk = current; + struct task_struct *tsk = current;
+ int ioprio_class; + int ioprio_class;
@ -3874,8 +3884,8 @@ index 0000000..f9787a6
+ } + }
+ +
+ if (bfqq->new_ioprio < 0 || bfqq->new_ioprio >= IOPRIO_BE_NR) { + if (bfqq->new_ioprio < 0 || bfqq->new_ioprio >= IOPRIO_BE_NR) {
+ printk(KERN_CRIT "bfq_set_next_ioprio_data: new_ioprio %d\n", + pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
+ bfqq->new_ioprio); + bfqq->new_ioprio);
+ BUG(); + BUG();
+ } + }
+ +
@ -3999,7 +4009,7 @@ index 0000000..f9787a6
+ +
+ if (bfqq) { + if (bfqq) {
+ bfq_init_bfqq(bfqd, bfqq, bic, current->pid, + bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
+ is_sync); + is_sync);
+ bfq_init_entity(&bfqq->entity, bfqg); + bfq_init_entity(&bfqq->entity, bfqg);
+ bfq_log_bfqq(bfqd, bfqq, "allocated"); + bfq_log_bfqq(bfqd, bfqq, "allocated");
+ } else { + } else {
@ -4187,7 +4197,7 @@ index 0000000..f9787a6
+ bfq_log_bfqq(bfqd, bfqq, + bfq_log_bfqq(bfqd, bfqq,
+ "rq_enqueued: idle_window=%d (seeky %d, mean %llu)", + "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
+ bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq), + bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq),
+ (long long unsigned)bfqq->seek_mean); + (unsigned long long) bfqq->seek_mean);
+ +
+ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); + bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
+ +
@ -4738,8 +4748,7 @@ index 0000000..f9787a6
+ +
+static void bfq_slab_kill(void) +static void bfq_slab_kill(void)
+{ +{
+ if (bfq_pool) + kmem_cache_destroy(bfq_pool);
+ kmem_cache_destroy(bfq_pool);
+} +}
+ +
+static int __init bfq_slab_setup(void) +static int __init bfq_slab_setup(void)
@ -4770,6 +4779,7 @@ index 0000000..f9787a6
+static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page) +static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page)
+{ +{
+ struct bfq_data *bfqd = e->elevator_data; + struct bfq_data *bfqd = e->elevator_data;
+
+ return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ? + return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ?
+ jiffies_to_msecs(bfqd->bfq_wr_max_time) : + jiffies_to_msecs(bfqd->bfq_wr_max_time) :
+ jiffies_to_msecs(bfq_wr_duration(bfqd))); + jiffies_to_msecs(bfq_wr_duration(bfqd)));
@ -4788,25 +4798,29 @@ index 0000000..f9787a6
+ +
+ num_char += sprintf(page + num_char, "Active:\n"); + num_char += sprintf(page + num_char, "Active:\n");
+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) { + list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
+ num_char += sprintf(page + num_char, + num_char += sprintf(page + num_char,
+ "pid%d: weight %hu, nr_queued %d %d, dur %d/%u\n", + "pid%d: weight %hu, nr_queued %d %d, ",
+ bfqq->pid, + bfqq->pid,
+ bfqq->entity.weight, + bfqq->entity.weight,
+ bfqq->queued[0], + bfqq->queued[0],
+ bfqq->queued[1], + bfqq->queued[1]);
+ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), + num_char += sprintf(page + num_char,
+ jiffies_to_msecs(bfqq->wr_cur_max_time)); + "dur %d/%u\n",
+ jiffies_to_msecs(
+ jiffies -
+ bfqq->last_wr_start_finish),
+ jiffies_to_msecs(bfqq->wr_cur_max_time));
+ } + }
+ +
+ num_char += sprintf(page + num_char, "Idle:\n"); + num_char += sprintf(page + num_char, "Idle:\n");
+ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) { + list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
+ num_char += sprintf(page + num_char, + num_char += sprintf(page + num_char,
+ "pid%d: weight %hu, dur %d/%u\n", + "pid%d: weight %hu, dur %d/%u\n",
+ bfqq->pid, + bfqq->pid,
+ bfqq->entity.weight, + bfqq->entity.weight,
+ jiffies_to_msecs(jiffies - + jiffies_to_msecs(jiffies -
+ bfqq->last_wr_start_finish), + bfqq->last_wr_start_finish),
+ jiffies_to_msecs(bfqq->wr_cur_max_time)); + jiffies_to_msecs(bfqq->wr_cur_max_time));
+ } + }
+ +
+ spin_unlock_irq(bfqd->queue->queue_lock); + spin_unlock_irq(bfqd->queue->queue_lock);
@ -5081,10 +5095,10 @@ index 0000000..f9787a6
+MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL");
diff --git a/block/bfq-sched.c b/block/bfq-sched.c diff --git a/block/bfq-sched.c b/block/bfq-sched.c
new file mode 100644 new file mode 100644
index 0000000..a64fec1 index 0000000..a5ed694
--- /dev/null --- /dev/null
+++ b/block/bfq-sched.c +++ b/block/bfq-sched.c
@@ -0,0 +1,1200 @@ @@ -0,0 +1,1199 @@
+/* +/*
+ * BFQ: Hierarchical B-WF2Q+ scheduler. + * BFQ: Hierarchical B-WF2Q+ scheduler.
+ * + *
@ -5715,8 +5729,7 @@ index 0000000..a64fec1
+ if (entity->new_weight != entity->orig_weight) { + if (entity->new_weight != entity->orig_weight) {
+ if (entity->new_weight < BFQ_MIN_WEIGHT || + if (entity->new_weight < BFQ_MIN_WEIGHT ||
+ entity->new_weight > BFQ_MAX_WEIGHT) { + entity->new_weight > BFQ_MAX_WEIGHT) {
+ printk(KERN_CRIT "update_weight_prio: " + pr_crit("update_weight_prio: new_weight %d\n",
+ "new_weight %d\n",
+ entity->new_weight); + entity->new_weight);
+ BUG(); + BUG();
+ } + }
@ -6287,7 +6300,7 @@ index 0000000..a64fec1
+} +}
diff --git a/block/bfq.h b/block/bfq.h diff --git a/block/bfq.h b/block/bfq.h
new file mode 100644 new file mode 100644
index 0000000..485d0c9 index 0000000..2bf54ae
--- /dev/null --- /dev/null
+++ b/block/bfq.h +++ b/block/bfq.h
@@ -0,0 +1,801 @@ @@ -0,0 +1,801 @@
@ -6722,10 +6735,10 @@ index 0000000..485d0c9
+ * @last_ins_in_burst. + * @last_ins_in_burst.
+ * @burst_size: number of queues in the current burst of queue activations. + * @burst_size: number of queues in the current burst of queue activations.
+ * @bfq_large_burst_thresh: maximum burst size above which the current + * @bfq_large_burst_thresh: maximum burst size above which the current
+ * queue-activation burst is deemed as 'large'. + * queue-activation burst is deemed as 'large'.
+ * @large_burst: true if a large queue-activation burst is in progress. + * @large_burst: true if a large queue-activation burst is in progress.
+ * @burst_list: head of the burst list (as for the above fields, more details + * @burst_list: head of the burst list (as for the above fields, more details
+ * in the comments to the function bfq_handle_burst). + * in the comments to the function bfq_handle_burst).
+ * @low_latency: if set to true, low-latency heuristics are enabled. + * @low_latency: if set to true, low-latency heuristics are enabled.
+ * @bfq_wr_coeff: maximum factor by which the weight of a weight-raised + * @bfq_wr_coeff: maximum factor by which the weight of a weight-raised
+ * queue is multiplied. + * queue is multiplied.
@ -7093,5 +7106,5 @@ index 0000000..485d0c9
+ +
+#endif /* _BFQ_H */ +#endif /* _BFQ_H */
-- --
1.9.1 2.7.4 (Apple Git-66)

View File

@ -1,8 +1,8 @@
From 47de1e46ef5f462e9694e5b0607aec6ad658f1e0 Mon Sep 17 00:00:00 2001 From 409e62551360d2802992b0175062237352793a2a Mon Sep 17 00:00:00 2001
From: Mauro Andreolini <mauro.andreolini@unimore.it> From: Mauro Andreolini <mauro.andreolini@unimore.it>
Date: Sun, 6 Sep 2015 16:09:05 +0200 Date: Sun, 6 Sep 2015 16:09:05 +0200
Subject: [PATCH 3/4] block, bfq: add Early Queue Merge (EQM) to BFQ-v7r11 for Subject: [PATCH 3/4] block, bfq: add Early Queue Merge (EQM) to BFQ-v7r11, to
4.7.0 port to 4.8.0
A set of processes may happen to perform interleaved reads, i.e.,requests A set of processes may happen to perform interleaved reads, i.e.,requests
whose union would give rise to a sequential read pattern. There are two whose union would give rise to a sequential read pattern. There are two
@ -35,16 +35,16 @@ Signed-off-by: Arianna Avanzini <avanzini@google.com>
Signed-off-by: Paolo Valente <paolo.valente@unimore.it> Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
--- ---
block/bfq-cgroup.c | 4 + block/bfq-cgroup.c | 5 +
block/bfq-iosched.c | 687 ++++++++++++++++++++++++++++++++++++++++++++++++++-- block/bfq-iosched.c | 685 +++++++++++++++++++++++++++++++++++++++++++++++++++-
block/bfq.h | 66 +++++ block/bfq.h | 66 +++++
3 files changed, 743 insertions(+), 14 deletions(-) 3 files changed, 743 insertions(+), 13 deletions(-)
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 8610cd6..5ee99ec 100644 index 8b08a57..0367996 100644
--- a/block/bfq-cgroup.c --- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c
@@ -437,6 +437,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd) @@ -440,6 +440,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd)
*/ */
bfqg->bfqd = bfqd; bfqg->bfqd = bfqd;
bfqg->active_entities = 0; bfqg->active_entities = 0;
@ -52,16 +52,17 @@ index 8610cd6..5ee99ec 100644
} }
static void bfq_pd_free(struct blkg_policy_data *pd) static void bfq_pd_free(struct blkg_policy_data *pd)
@@ -530,6 +531,8 @@ static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, @@ -533,6 +534,9 @@ static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
return bfqg; return bfqg;
} }
+static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq); +static void bfq_pos_tree_add_move(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq);
+ +
/** /**
* bfq_bfqq_move - migrate @bfqq to @bfqg. * bfq_bfqq_move - migrate @bfqq to @bfqg.
* @bfqd: queue descriptor. * @bfqd: queue descriptor.
@@ -577,6 +580,7 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, @@ -580,6 +584,7 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqg_get(bfqg); bfqg_get(bfqg);
if (busy) { if (busy) {
@ -70,10 +71,10 @@ index 8610cd6..5ee99ec 100644
bfq_activate_bfqq(bfqd, bfqq); bfq_activate_bfqq(bfqd, bfqq);
} }
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index f9787a6..d1f648d 100644 index 85e2169..cf3e9b1 100644
--- a/block/bfq-iosched.c --- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c +++ b/block/bfq-iosched.c
@@ -296,6 +296,72 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd, @@ -295,6 +295,72 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd,
} }
} }
@ -112,7 +113,7 @@ index f9787a6..d1f648d 100644
+ *rb_link = p; + *rb_link = p;
+ +
+ bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d", + bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
+ (long long unsigned)sector, + (unsigned long long) sector,
+ bfqq ? bfqq->pid : 0); + bfqq ? bfqq->pid : 0);
+ +
+ return bfqq; + return bfqq;
@ -146,11 +147,11 @@ index f9787a6..d1f648d 100644
/* /*
* Tell whether there are active queues or groups with differentiated weights. * Tell whether there are active queues or groups with differentiated weights.
*/ */
@@ -528,6 +594,57 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd) @@ -527,6 +593,57 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
return dur; return dur;
} }
+static unsigned bfq_bfqq_cooperations(struct bfq_queue *bfqq) +static unsigned int bfq_bfqq_cooperations(struct bfq_queue *bfqq)
+{ +{
+ return bfqq->bic ? bfqq->bic->cooperations : 0; + return bfqq->bic ? bfqq->bic->cooperations : 0;
+} +}
@ -204,7 +205,7 @@ index f9787a6..d1f648d 100644
/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */ /* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{ {
@@ -764,8 +881,14 @@ static void bfq_add_request(struct request *rq) @@ -763,8 +880,14 @@ static void bfq_add_request(struct request *rq)
BUG_ON(!next_rq); BUG_ON(!next_rq);
bfqq->next_rq = next_rq; bfqq->next_rq = next_rq;
@ -220,7 +221,7 @@ index f9787a6..d1f648d 100644
idle_for_long_time = time_is_before_jiffies( idle_for_long_time = time_is_before_jiffies(
bfqq->budget_timeout + bfqq->budget_timeout +
bfqd->bfq_wr_min_idle_time); bfqd->bfq_wr_min_idle_time);
@@ -793,11 +916,12 @@ static void bfq_add_request(struct request *rq) @@ -792,11 +915,12 @@ static void bfq_add_request(struct request *rq)
bfqd->last_ins_in_burst = jiffies; bfqd->last_ins_in_burst = jiffies;
} }
@ -236,7 +237,7 @@ index f9787a6..d1f648d 100644
entity->budget = max_t(unsigned long, bfqq->max_budget, entity->budget = max_t(unsigned long, bfqq->max_budget,
bfq_serv_to_charge(next_rq, bfqq)); bfq_serv_to_charge(next_rq, bfqq));
@@ -816,6 +940,9 @@ static void bfq_add_request(struct request *rq) @@ -815,6 +939,9 @@ static void bfq_add_request(struct request *rq)
if (!bfqd->low_latency) if (!bfqd->low_latency)
goto add_bfqq_busy; goto add_bfqq_busy;
@ -246,7 +247,7 @@ index f9787a6..d1f648d 100644
/* /*
* If the queue: * If the queue:
* - is not being boosted, * - is not being boosted,
@@ -840,7 +967,7 @@ static void bfq_add_request(struct request *rq) @@ -839,7 +966,7 @@ static void bfq_add_request(struct request *rq)
} else if (old_wr_coeff > 1) { } else if (old_wr_coeff > 1) {
if (interactive) if (interactive)
bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
@ -255,7 +256,7 @@ index f9787a6..d1f648d 100644
(bfqq->wr_cur_max_time == (bfqq->wr_cur_max_time ==
bfqd->bfq_wr_rt_max_time && bfqd->bfq_wr_rt_max_time &&
!soft_rt)) { !soft_rt)) {
@@ -905,6 +1032,7 @@ static void bfq_add_request(struct request *rq) @@ -904,6 +1031,7 @@ static void bfq_add_request(struct request *rq)
bfqd->bfq_wr_rt_max_time; bfqd->bfq_wr_rt_max_time;
} }
} }
@ -263,7 +264,7 @@ index f9787a6..d1f648d 100644
if (old_wr_coeff != bfqq->wr_coeff) if (old_wr_coeff != bfqq->wr_coeff)
entity->prio_changed = 1; entity->prio_changed = 1;
add_bfqq_busy: add_bfqq_busy:
@@ -1047,6 +1175,15 @@ static void bfq_merged_request(struct request_queue *q, struct request *req, @@ -1046,6 +1174,15 @@ static void bfq_merged_request(struct request_queue *q, struct request *req,
bfqd->last_position); bfqd->last_position);
BUG_ON(!next_rq); BUG_ON(!next_rq);
bfqq->next_rq = next_rq; bfqq->next_rq = next_rq;
@ -279,7 +280,7 @@ index f9787a6..d1f648d 100644
} }
} }
@@ -1129,11 +1266,346 @@ static void bfq_end_wr(struct bfq_data *bfqd) @@ -1128,11 +1265,346 @@ static void bfq_end_wr(struct bfq_data *bfqd)
spin_unlock_irq(bfqd->queue->queue_lock); spin_unlock_irq(bfqd->queue->queue_lock);
} }
@ -572,7 +573,7 @@ index f9787a6..d1f648d 100644
+ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) + struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+{ +{
+ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", + bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
+ (long unsigned)new_bfqq->pid); + (unsigned long) new_bfqq->pid);
+ /* Save weight raising and idle window of the merged queues */ + /* Save weight raising and idle window of the merged queues */
+ bfq_bfqq_save_state(bfqq); + bfq_bfqq_save_state(bfqq);
+ bfq_bfqq_save_state(new_bfqq); + bfq_bfqq_save_state(new_bfqq);
@ -626,7 +627,7 @@ index f9787a6..d1f648d 100644
/* /*
* Disallow merge of a sync bio into an async request. * Disallow merge of a sync bio into an async request.
@@ -1150,7 +1622,26 @@ static int bfq_allow_merge(struct request_queue *q, struct request *rq, @@ -1149,7 +1621,26 @@ static int bfq_allow_merge(struct request_queue *q, struct request *rq,
if (!bic) if (!bic)
return 0; return 0;
@ -654,7 +655,7 @@ index f9787a6..d1f648d 100644
} }
static void __bfq_set_in_service_queue(struct bfq_data *bfqd, static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
@@ -1349,6 +1840,15 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) @@ -1350,6 +1841,15 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
__bfq_bfqd_reset_in_service(bfqd); __bfq_bfqd_reset_in_service(bfqd);
@ -670,7 +671,7 @@ index f9787a6..d1f648d 100644
if (RB_EMPTY_ROOT(&bfqq->sort_list)) { if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
/* /*
* Overloading budget_timeout field to store the time * Overloading budget_timeout field to store the time
@@ -1357,8 +1857,13 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) @@ -1358,8 +1858,13 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
*/ */
bfqq->budget_timeout = jiffies; bfqq->budget_timeout = jiffies;
bfq_del_bfqq_busy(bfqd, bfqq, 1); bfq_del_bfqq_busy(bfqd, bfqq, 1);
@ -685,7 +686,7 @@ index f9787a6..d1f648d 100644
} }
/** /**
@@ -2242,10 +2747,12 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) @@ -2246,10 +2751,12 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
/* /*
* If the queue was activated in a burst, or * If the queue was activated in a burst, or
* too much time has elapsed from the beginning * too much time has elapsed from the beginning
@ -700,7 +701,7 @@ index f9787a6..d1f648d 100644
time_is_before_jiffies(bfqq->last_wr_start_finish + time_is_before_jiffies(bfqq->last_wr_start_finish +
bfqq->wr_cur_max_time)) { bfqq->wr_cur_max_time)) {
bfqq->last_wr_start_finish = jiffies; bfqq->last_wr_start_finish = jiffies;
@@ -2474,6 +2981,25 @@ static void bfq_put_queue(struct bfq_queue *bfqq) @@ -2478,6 +2985,25 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
#endif #endif
} }
@ -726,7 +727,7 @@ index f9787a6..d1f648d 100644
static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{ {
if (bfqq == bfqd->in_service_queue) { if (bfqq == bfqd->in_service_queue) {
@@ -2484,6 +3010,8 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) @@ -2488,6 +3014,8 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq,
atomic_read(&bfqq->ref)); atomic_read(&bfqq->ref));
@ -735,7 +736,7 @@ index f9787a6..d1f648d 100644
bfq_put_queue(bfqq); bfq_put_queue(bfqq);
} }
@@ -2492,6 +3020,25 @@ static void bfq_init_icq(struct io_cq *icq) @@ -2496,6 +3024,25 @@ static void bfq_init_icq(struct io_cq *icq)
struct bfq_io_cq *bic = icq_to_bic(icq); struct bfq_io_cq *bic = icq_to_bic(icq);
bic->ttime.last_end_request = jiffies; bic->ttime.last_end_request = jiffies;
@ -761,7 +762,7 @@ index f9787a6..d1f648d 100644
} }
static void bfq_exit_icq(struct io_cq *icq) static void bfq_exit_icq(struct io_cq *icq)
@@ -2505,6 +3052,13 @@ static void bfq_exit_icq(struct io_cq *icq) @@ -2509,6 +3056,13 @@ static void bfq_exit_icq(struct io_cq *icq)
} }
if (bic->bfqq[BLK_RW_SYNC]) { if (bic->bfqq[BLK_RW_SYNC]) {
@ -775,7 +776,7 @@ index f9787a6..d1f648d 100644
bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]); bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
bic->bfqq[BLK_RW_SYNC] = NULL; bic->bfqq[BLK_RW_SYNC] = NULL;
} }
@@ -2809,6 +3363,10 @@ static void bfq_update_idle_window(struct bfq_data *bfqd, @@ -2814,6 +3368,10 @@ static void bfq_update_idle_window(struct bfq_data *bfqd,
if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq)) if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
return; return;
@ -786,7 +787,7 @@ index f9787a6..d1f648d 100644
enable_idle = bfq_bfqq_idle_window(bfqq); enable_idle = bfq_bfqq_idle_window(bfqq);
if (atomic_read(&bic->icq.ioc->active_ref) == 0 || if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
@@ -2856,6 +3414,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, @@ -2861,6 +3419,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 || if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
!BFQQ_SEEKY(bfqq)) !BFQQ_SEEKY(bfqq))
bfq_update_idle_window(bfqd, bfqq, bic); bfq_update_idle_window(bfqd, bfqq, bic);
@ -794,7 +795,7 @@ index f9787a6..d1f648d 100644
bfq_log_bfqq(bfqd, bfqq, bfq_log_bfqq(bfqd, bfqq,
"rq_enqueued: idle_window=%d (seeky %d, mean %llu)", "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
@@ -2920,12 +3479,47 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, @@ -2925,12 +3484,47 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
static void bfq_insert_request(struct request_queue *q, struct request *rq) static void bfq_insert_request(struct request_queue *q, struct request *rq)
{ {
struct bfq_data *bfqd = q->elevator->elevator_data; struct bfq_data *bfqd = q->elevator->elevator_data;
@ -843,7 +844,7 @@ index f9787a6..d1f648d 100644
rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
list_add_tail(&rq->queuelist, &bfqq->fifo); list_add_tail(&rq->queuelist, &bfqq->fifo);
@@ -3094,6 +3688,32 @@ static void bfq_put_request(struct request *rq) @@ -3099,6 +3693,32 @@ static void bfq_put_request(struct request *rq)
} }
/* /*
@ -876,7 +877,7 @@ index f9787a6..d1f648d 100644
* Allocate bfq data structures associated with this request. * Allocate bfq data structures associated with this request.
*/ */
static int bfq_set_request(struct request_queue *q, struct request *rq, static int bfq_set_request(struct request_queue *q, struct request *rq,
@@ -3105,6 +3725,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, @@ -3110,6 +3730,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
const int is_sync = rq_is_sync(rq); const int is_sync = rq_is_sync(rq);
struct bfq_queue *bfqq; struct bfq_queue *bfqq;
unsigned long flags; unsigned long flags;
@ -884,7 +885,7 @@ index f9787a6..d1f648d 100644
might_sleep_if(gfpflags_allow_blocking(gfp_mask)); might_sleep_if(gfpflags_allow_blocking(gfp_mask));
@@ -3117,15 +3738,30 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, @@ -3122,15 +3743,30 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
bfq_bic_update_cgroup(bic, bio); bfq_bic_update_cgroup(bic, bio);
@ -900,12 +901,11 @@ index f9787a6..d1f648d 100644
+ bic->saved_in_large_burst) + bic->saved_in_large_burst)
bfq_mark_bfqq_in_large_burst(bfqq); bfq_mark_bfqq_in_large_burst(bfqq);
- else - else
- bfq_clear_bfqq_in_large_burst(bfqq);
+ else { + else {
+ bfq_clear_bfqq_in_large_burst(bfqq); bfq_clear_bfqq_in_large_burst(bfqq);
+ if (bic->was_in_burst_list) + if (bic->was_in_burst_list)
+ hlist_add_head(&bfqq->burst_list_node, + hlist_add_head(&bfqq->burst_list_node,
+ &bfqd->burst_list); + &bfqd->burst_list);
+ } + }
+ } + }
+ } else { + } else {
@ -919,7 +919,7 @@ index f9787a6..d1f648d 100644
} }
} }
@@ -3137,6 +3773,26 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, @@ -3142,6 +3778,26 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
rq->elv.priv[0] = bic; rq->elv.priv[0] = bic;
rq->elv.priv[1] = bfqq; rq->elv.priv[1] = bfqq;
@ -946,7 +946,7 @@ index f9787a6..d1f648d 100644
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
return 0; return 0;
@@ -3290,6 +3946,7 @@ static void bfq_init_root_group(struct bfq_group *root_group, @@ -3295,6 +3951,7 @@ static void bfq_init_root_group(struct bfq_group *root_group,
root_group->my_entity = NULL; root_group->my_entity = NULL;
root_group->bfqd = bfqd; root_group->bfqd = bfqd;
#endif #endif
@ -954,7 +954,7 @@ index f9787a6..d1f648d 100644
for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
} }
@@ -3370,6 +4027,8 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) @@ -3375,6 +4032,8 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async; bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async;
bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync; bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync;
@ -964,7 +964,7 @@ index f9787a6..d1f648d 100644
bfqd->bfq_large_burst_thresh = 11; bfqd->bfq_large_burst_thresh = 11;
diff --git a/block/bfq.h b/block/bfq.h diff --git a/block/bfq.h b/block/bfq.h
index 485d0c9..f73c942 100644 index 2bf54ae..fcce855 100644
--- a/block/bfq.h --- a/block/bfq.h
+++ b/block/bfq.h +++ b/block/bfq.h
@@ -183,6 +183,8 @@ struct bfq_group; @@ -183,6 +183,8 @@ struct bfq_group;
@ -1097,5 +1097,5 @@ index 485d0c9..f73c942 100644
static void bfq_put_queue(struct bfq_queue *bfqq); static void bfq_put_queue(struct bfq_queue *bfqq);
static void bfq_dispatch_insert(struct request_queue *q, struct request *rq); static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
-- --
1.9.1 2.7.4 (Apple Git-66)

View File

@ -347,15 +347,6 @@ index 97ee9ac..b2ddabc 100644
if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
sched_clock_cpu(cpu); /* sync clocks x-cpu */ sched_clock_cpu(cpu); /* sync clocks x-cpu */
ttwu_queue_remote(p, cpu, wake_flags); ttwu_queue_remote(p, cpu, wake_flags);
@@ -2394,7 +2415,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
* Silence PROVE_RCU.
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
- set_task_cpu(p, cpu);
+ __set_task_cpu(p, cpu);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
#ifdef CONFIG_SCHED_INFO
@@ -2941,7 +2962,14 @@ void sched_exec(void) @@ -2941,7 +2962,14 @@ void sched_exec(void)
int dest_cpu; int dest_cpu;

View File

@ -24,9 +24,13 @@ processors including: AMD K10-family, AMD Family 10h (Barcelona), AMD Family
Family 15h (Steamroller), Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7 Family 15h (Steamroller), Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7
(Nehalem), Intel 1.5 Gen Core i3/i5/i7 (Westmere), Intel 2nd Gen Core i3/i5/i7 (Nehalem), Intel 1.5 Gen Core i3/i5/i7 (Westmere), Intel 2nd Gen Core i3/i5/i7
(Sandybridge), Intel 3rd Gen Core i3/i5/i7 (Ivybridge), Intel 4th Gen Core (Sandybridge), Intel 3rd Gen Core i3/i5/i7 (Ivybridge), Intel 4th Gen Core
i3/i5/i7 (Haswell), Intel 5th Gen Core i3/i5/i7 (Broadwell), and the low power i3/i5/i7 (Haswell), Intel 5th Gen Core i3/i5/i7 (Broadwell), Intel 6th Gen Core
Silvermont series of Atom processors (Silvermont). It also offers the compiler i3/i5.i7 (Skylake), and the low power Silvermont series of Atom processors
the 'native' flag. (Silvermont). It also offers the compiler the 'native' flag.
A warning to atom users: it is not recommended for you to compile with the
native option based on https://github.com/graysky2/kernel_gcc_patch/issues/15
Instead, use the atom option.
Small but real speed increases are measurable using a make endpoint comparing Small but real speed increases are measurable using a make endpoint comparing
a generic kernel to one built with one of the respective microarchs. a generic kernel to one built with one of the respective microarchs.

View File

@ -0,0 +1,115 @@
diff -urN a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
--- a/drivers/hid/hid-apple.c 2016-12-05 17:27:09.777555651 +0900
+++ b/drivers/hid/hid-apple.c 2016-08-03 13:24:57.000000000 +0900
@@ -52,6 +52,22 @@
"(For people who want to keep Windows PC keyboard muscle memory. "
"[0] = as-is, Mac layout. 1 = swapped, Windows layout.)");
+static unsigned int swap_fn_leftctrl;
+module_param(swap_fn_leftctrl, uint, 0644);
+MODULE_PARM_DESC(swap_fn_leftctrl, "Swap the Fn and left Control keys. "
+ "(For people who want to keep PC keyboard muscle memory. "
+ "[0] = as-is, Mac layout, 1 = swapped, PC layout)");
+
+static unsigned int rightalt_as_rightctrl;
+module_param(rightalt_as_rightctrl, uint, 0644);
+MODULE_PARM_DESC(rightalt_as_rightctrl, "Use the right Alt key as a right Ctrl key. "
+ "[0] = as-is, Mac layout. 1 = Right Alt is right Ctrl");
+
+static unsigned int ejectcd_as_delete;
+module_param(ejectcd_as_delete, uint, 0644);
+MODULE_PARM_DESC(ejectcd_as_delete, "Use Eject-CD key as Delete key. "
+ "([0] = disabled, 1 = enabled)");
+
struct apple_sc {
unsigned long quirks;
unsigned int fn_on;
@@ -164,6 +180,21 @@
{ }
};
+static const struct apple_key_translation swapped_fn_leftctrl_keys[] = {
+ { KEY_FN, KEY_LEFTCTRL },
+ { }
+};
+
+static const struct apple_key_translation rightalt_as_rightctrl_keys[] = {
+ { KEY_RIGHTALT, KEY_RIGHTCTRL },
+ { }
+};
+
+static const struct apple_key_translation ejectcd_as_delete_keys[] = {
+ { KEY_EJECTCD, KEY_DELETE },
+ { }
+};
+
static const struct apple_key_translation *apple_find_translation(
const struct apple_key_translation *table, u16 from)
{
@@ -183,9 +214,11 @@
struct apple_sc *asc = hid_get_drvdata(hid);
const struct apple_key_translation *trans, *table;
- if (usage->code == KEY_FN) {
+ u16 fn_keycode = (swap_fn_leftctrl) ? (KEY_LEFTCTRL) : (KEY_FN);
+
+ if (usage->code == fn_keycode) {
asc->fn_on = !!value;
- input_event(input, usage->type, usage->code, value);
+ input_event(input, usage->type, KEY_FN, value);
return 1;
}
@@ -264,6 +297,30 @@
}
}
+ if (swap_fn_leftctrl) {
+ trans = apple_find_translation(swapped_fn_leftctrl_keys, usage->code);
+ if (trans) {
+ input_event(input, usage->type, trans->to, value);
+ return 1;
+ }
+ }
+
+ if (ejectcd_as_delete) {
+ trans = apple_find_translation(ejectcd_as_delete_keys, usage->code);
+ if (trans) {
+ input_event(input, usage->type, trans->to, value);
+ return 1;
+ }
+ }
+
+ if (rightalt_as_rightctrl) {
+ trans = apple_find_translation(rightalt_as_rightctrl_keys, usage->code);
+ if (trans) {
+ input_event(input, usage->type, trans->to, value);
+ return 1;
+ }
+ }
+
return 0;
}
@@ -327,6 +384,21 @@
for (trans = apple_iso_keyboard; trans->from; trans++)
set_bit(trans->to, input->keybit);
+
+ if (swap_fn_leftctrl) {
+ for (trans = swapped_fn_leftctrl_keys; trans->from; trans++)
+ set_bit(trans->to, input->keybit);
+ }
+
+ if (ejectcd_as_delete) {
+ for (trans = ejectcd_as_delete_keys; trans->from; trans++)
+ set_bit(trans->to, input->keybit);
+ }
+
+ if (rightalt_as_rightctrl) {
+ for (trans = rightalt_as_rightctrl_keys; trans->from; trans++)
+ set_bit(trans->to, input->keybit);
+ }
}
static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,

View File

@ -78,7 +78,7 @@ index 0000000..8fce86f
+2015-04-22 UKSM 0.1.2.4 Fix a race condition that can sometimes trigger anonying warnings. +2015-04-22 UKSM 0.1.2.4 Fix a race condition that can sometimes trigger anonying warnings.
+2016-09-10 UKSM 0.1.2.5 Fix a bug in dedup ratio calculation. +2016-09-10 UKSM 0.1.2.5 Fix a bug in dedup ratio calculation.
diff --git a/fs/exec.c b/fs/exec.c diff --git a/fs/exec.c b/fs/exec.c
index 887c1c9..2bee16e 100644 index 6fcfb3f..ef87e0f 100644
--- a/fs/exec.c --- a/fs/exec.c
+++ b/fs/exec.c +++ b/fs/exec.c
@@ -19,7 +19,7 @@ @@ -19,7 +19,7 @@
@ -98,7 +98,7 @@ index 887c1c9..2bee16e 100644
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
@@ -1273,6 +1274,7 @@ void setup_new_exec(struct linux_binprm * bprm) @@ -1309,6 +1310,7 @@ void setup_new_exec(struct linux_binprm * bprm)
/* An exec changes our domain. We are no longer part of the thread /* An exec changes our domain. We are no longer part of the thread
group */ group */
current->self_exec_id++; current->self_exec_id++;
@ -107,7 +107,7 @@ index 887c1c9..2bee16e 100644
do_close_on_exec(current->files); do_close_on_exec(current->files);
} }
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 8372046..82aa2f4 100644 index b9a8c81..9765269 100644
--- a/fs/proc/meminfo.c --- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c
@@ -89,6 +89,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) @@ -89,6 +89,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
@ -120,9 +120,9 @@ index 8372046..82aa2f4 100644
#ifdef CONFIG_QUICKLIST #ifdef CONFIG_QUICKLIST
"Quicklists: %8lu kB\n" "Quicklists: %8lu kB\n"
#endif #endif
@@ -147,6 +150,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) @@ -149,6 +152,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
K(global_page_state(NR_SLAB_UNRECLAIMABLE)), K(global_page_state(NR_SLAB_UNRECLAIMABLE)),
global_page_state(NR_KERNEL_STACK) * THREAD_SIZE / 1024, global_page_state(NR_KERNEL_STACK_KB),
K(global_page_state(NR_PAGETABLE)), K(global_page_state(NR_PAGETABLE)),
+#ifdef CONFIG_UKSM +#ifdef CONFIG_UKSM
+ K(global_page_state(NR_UKSM_ZERO_PAGES)), + K(global_page_state(NR_UKSM_ZERO_PAGES)),
@ -171,7 +171,7 @@ index d4458b6..172ceb9 100644
static inline unsigned long my_zero_pfn(unsigned long addr) static inline unsigned long my_zero_pfn(unsigned long addr)
diff --git a/include/linux/ksm.h b/include/linux/ksm.h diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 7ae216a..06861d8 100644 index 481c8c4..5329b23 100644
--- a/include/linux/ksm.h --- a/include/linux/ksm.h
+++ b/include/linux/ksm.h +++ b/include/linux/ksm.h
@@ -19,21 +19,6 @@ struct mem_cgroup; @@ -19,21 +19,6 @@ struct mem_cgroup;
@ -196,7 +196,7 @@ index 7ae216a..06861d8 100644
static inline struct stable_node *page_stable_node(struct page *page) static inline struct stable_node *page_stable_node(struct page *page)
{ {
@@ -64,6 +49,33 @@ struct page *ksm_might_need_to_copy(struct page *page, @@ -63,6 +48,33 @@ struct page *ksm_might_need_to_copy(struct page *page,
int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage); void ksm_migrate_page(struct page *newpage, struct page *oldpage);
@ -230,7 +230,7 @@ index 7ae216a..06861d8 100644
#else /* !CONFIG_KSM */ #else /* !CONFIG_KSM */
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
@@ -106,4 +118,6 @@ static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) @@ -105,4 +117,6 @@ static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
#endif /* !CONFIG_KSM */ #endif /* !CONFIG_KSM */
@ -238,10 +238,10 @@ index 7ae216a..06861d8 100644
+ +
#endif /* __LINUX_KSM_H */ #endif /* __LINUX_KSM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index ca3e517..ae62e7d1 100644 index 903200f..6c7d900 100644
--- a/include/linux/mm_types.h --- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h +++ b/include/linux/mm_types.h
@@ -357,6 +357,9 @@ struct vm_area_struct { @@ -358,6 +358,9 @@ struct vm_area_struct {
struct mempolicy *vm_policy; /* NUMA policy for the VMA */ struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif #endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx; struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
@ -252,20 +252,20 @@ index ca3e517..ae62e7d1 100644
struct core_thread { struct core_thread {
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 02069c2..f7cce50 100644 index 7f2ae99..89f7dd8 100644
--- a/include/linux/mmzone.h --- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h +++ b/include/linux/mmzone.h
@@ -153,6 +153,9 @@ enum zone_stat_item { @@ -138,6 +138,9 @@ enum zone_stat_item {
WORKINGSET_NODERECLAIM, NUMA_OTHER, /* allocation from other node */
NR_ANON_TRANSPARENT_HUGEPAGES, #endif
NR_FREE_CMA_PAGES, NR_FREE_CMA_PAGES,
+#ifdef CONFIG_UKSM +#ifdef CONFIG_UKSM
+ NR_UKSM_ZERO_PAGES, + NR_UKSM_ZERO_PAGES,
+#endif +#endif
NR_VM_ZONE_STAT_ITEMS }; NR_VM_ZONE_STAT_ITEMS };
/* enum node_stat_item {
@@ -817,7 +820,7 @@ static inline int is_highmem_idx(enum zone_type idx) @@ -869,7 +872,7 @@ static inline int is_highmem_idx(enum zone_type idx)
} }
/** /**
@ -513,10 +513,10 @@ index 0000000..825f05e
+#endif /* !CONFIG_UKSM */ +#endif /* !CONFIG_UKSM */
+#endif /* __LINUX_UKSM_H */ +#endif /* __LINUX_UKSM_H */
diff --git a/kernel/fork.c b/kernel/fork.c diff --git a/kernel/fork.c b/kernel/fork.c
index aea4f4d..f93e114 100644 index beb3172..569893a 100644
--- a/kernel/fork.c --- a/kernel/fork.c
+++ b/kernel/fork.c +++ b/kernel/fork.c
@@ -459,7 +459,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) @@ -457,7 +457,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
goto fail_nomem; goto fail_nomem;
charge = len; charge = len;
} }
@ -525,7 +525,7 @@ index aea4f4d..f93e114 100644
if (!tmp) if (!tmp)
goto fail_nomem; goto fail_nomem;
*tmp = *mpnt; *tmp = *mpnt;
@@ -512,7 +512,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) @@ -510,7 +510,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
__vma_link_rb(mm, tmp, rb_link, rb_parent); __vma_link_rb(mm, tmp, rb_link, rb_parent);
rb_link = &tmp->vm_rb.rb_right; rb_link = &tmp->vm_rb.rb_right;
rb_parent = &tmp->vm_rb; rb_parent = &tmp->vm_rb;
@ -535,17 +535,17 @@ index aea4f4d..f93e114 100644
retval = copy_page_range(mm, oldmm, mpnt); retval = copy_page_range(mm, oldmm, mpnt);
diff --git a/lib/Makefile b/lib/Makefile diff --git a/lib/Makefile b/lib/Makefile
index ff6a7a6..ac0bb55 100644 index 5dc77a8..b63a823 100644
--- a/lib/Makefile --- a/lib/Makefile
+++ b/lib/Makefile +++ b/lib/Makefile
@@ -20,7 +20,7 @@ KCOV_INSTRUMENT_dynamic_debug.o := n @@ -17,7 +17,7 @@ KCOV_INSTRUMENT_debugobjects.o := n
KCOV_INSTRUMENT_hweight.o := n KCOV_INSTRUMENT_dynamic_debug.o := n
lib-y := ctype.o string.o vsprintf.o cmdline.o \ lib-y := ctype.o string.o vsprintf.o cmdline.o \
- rbtree.o radix-tree.o dump_stack.o timerqueue.o\ - rbtree.o radix-tree.o dump_stack.o timerqueue.o\
+ rbtree.o radix-tree.o sradix-tree.o dump_stack.o timerqueue.o\ + rbtree.o radix-tree.o sradix-tree.o dump_stack.o timerqueue.o\
idr.o int_sqrt.o extable.o \ idr.o int_sqrt.o extable.o \
sha1.o md5.o irq_regs.o argv_split.o \ sha1.o chacha20.o md5.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \ flex_proportions.o ratelimit.o show_mem.o \
diff --git a/lib/sradix-tree.c b/lib/sradix-tree.c diff --git a/lib/sradix-tree.c b/lib/sradix-tree.c
new file mode 100644 new file mode 100644
@ -1030,10 +1030,10 @@ index 0000000..8d06329
+ return 0; + return 0;
+} +}
diff --git a/mm/Kconfig b/mm/Kconfig diff --git a/mm/Kconfig b/mm/Kconfig
index 3e2daef..165b60e 100644 index be0ee11..64fd3bc 100644
--- a/mm/Kconfig --- a/mm/Kconfig
+++ b/mm/Kconfig +++ b/mm/Kconfig
@@ -332,6 +332,32 @@ config KSM @@ -340,6 +340,32 @@ config KSM
See Documentation/vm/ksm.txt for more information: KSM is inactive See Documentation/vm/ksm.txt for more information: KSM is inactive
until a program has madvised that an area is MADV_MERGEABLE, and until a program has madvised that an area is MADV_MERGEABLE, and
root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
@ -1067,10 +1067,10 @@ index 3e2daef..165b60e 100644
config DEFAULT_MMAP_MIN_ADDR config DEFAULT_MMAP_MIN_ADDR
int "Low address space to protect from user allocation" int "Low address space to protect from user allocation"
diff --git a/mm/Makefile b/mm/Makefile diff --git a/mm/Makefile b/mm/Makefile
index 78c6f7d..7e7cd8a 100644 index 2ca1faf..980c8dd 100644
--- a/mm/Makefile --- a/mm/Makefile
+++ b/mm/Makefile +++ b/mm/Makefile
@@ -63,7 +63,8 @@ obj-$(CONFIG_SPARSEMEM) += sparse.o @@ -66,7 +66,8 @@ obj-$(CONFIG_SPARSEMEM) += sparse.o
obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
obj-$(CONFIG_SLOB) += slob.o obj-$(CONFIG_SLOB) += slob.o
obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
@ -1081,10 +1081,10 @@ index 78c6f7d..7e7cd8a 100644
obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_SLUB) += slub.o obj-$(CONFIG_SLUB) += slub.o
diff --git a/mm/memory.c b/mm/memory.c diff --git a/mm/memory.c b/mm/memory.c
index 9e04681..02200d3 100644 index 793fe0f..0464507 100644
--- a/mm/memory.c --- a/mm/memory.c
+++ b/mm/memory.c +++ b/mm/memory.c
@@ -124,6 +124,28 @@ unsigned long highest_memmap_pfn __read_mostly; @@ -124,6 +124,25 @@ unsigned long highest_memmap_pfn __read_mostly;
EXPORT_SYMBOL(zero_pfn); EXPORT_SYMBOL(zero_pfn);
@ -1095,14 +1095,11 @@ index 9e04681..02200d3 100644
+ +
+static int __init setup_uksm_zero_page(void) +static int __init setup_uksm_zero_page(void)
+{ +{
+ unsigned long addr; + empty_uksm_zero_page = alloc_pages(__GFP_ZERO & ~__GFP_MOVABLE, 0);
+ addr = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 0); + if (!empty_uksm_zero_page)
+ if (!addr)
+ panic("Oh boy, that early out of memory?"); + panic("Oh boy, that early out of memory?");
+ +
+ empty_uksm_zero_page = virt_to_page((void *) addr);
+ SetPageReserved(empty_uksm_zero_page); + SetPageReserved(empty_uksm_zero_page);
+
+ uksm_zero_pfn = page_to_pfn(empty_uksm_zero_page); + uksm_zero_pfn = page_to_pfn(empty_uksm_zero_page);
+ +
+ return 0; + return 0;
@ -1113,7 +1110,7 @@ index 9e04681..02200d3 100644
/* /*
* CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
*/ */
@@ -135,6 +157,7 @@ static int __init init_zero_pfn(void) @@ -135,6 +154,7 @@ static int __init init_zero_pfn(void)
core_initcall(init_zero_pfn); core_initcall(init_zero_pfn);
@ -1121,7 +1118,7 @@ index 9e04681..02200d3 100644
#if defined(SPLIT_RSS_COUNTING) #if defined(SPLIT_RSS_COUNTING)
void sync_mm_rss(struct mm_struct *mm) void sync_mm_rss(struct mm_struct *mm)
@@ -905,6 +928,11 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, @@ -914,6 +934,11 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
get_page(page); get_page(page);
page_dup_rmap(page, false); page_dup_rmap(page, false);
rss[mm_counter(page)]++; rss[mm_counter(page)]++;
@ -1133,7 +1130,7 @@ index 9e04681..02200d3 100644
} }
out_set_pte: out_set_pte:
@@ -1138,8 +1166,10 @@ again: @@ -1148,8 +1173,10 @@ again:
ptent = ptep_get_and_clear_full(mm, addr, pte, ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm); tlb->fullmm);
tlb_remove_tlb_entry(tlb, pte, addr); tlb_remove_tlb_entry(tlb, pte, addr);
@ -1145,7 +1142,7 @@ index 9e04681..02200d3 100644
if (!PageAnon(page)) { if (!PageAnon(page)) {
if (pte_dirty(ptent)) { if (pte_dirty(ptent)) {
@@ -1995,8 +2025,10 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo @@ -2010,8 +2037,10 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
clear_page(kaddr); clear_page(kaddr);
kunmap_atomic(kaddr); kunmap_atomic(kaddr);
flush_dcache_page(dst); flush_dcache_page(dst);
@ -1157,15 +1154,15 @@ index 9e04681..02200d3 100644
} }
static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
@@ -2141,6 +2173,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, @@ -2154,6 +2183,7 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte,
new_page = alloc_zeroed_user_highpage_movable(vma, address); new_page = alloc_zeroed_user_highpage_movable(vma, fe->address);
if (!new_page) if (!new_page)
goto oom; goto oom;
+ uksm_cow_pte(vma, orig_pte); + uksm_cow_pte(vma, orig_pte);
} else { } else {
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
if (!new_page) fe->address);
@@ -2166,7 +2199,9 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, @@ -2180,7 +2210,9 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte,
mm_counter_file(old_page)); mm_counter_file(old_page));
inc_mm_counter_fast(mm, MM_ANONPAGES); inc_mm_counter_fast(mm, MM_ANONPAGES);
} }
@ -1174,12 +1171,12 @@ index 9e04681..02200d3 100644
+ uksm_unmap_zero_page(orig_pte); + uksm_unmap_zero_page(orig_pte);
inc_mm_counter_fast(mm, MM_ANONPAGES); inc_mm_counter_fast(mm, MM_ANONPAGES);
} }
flush_cache_page(vma, address, pte_pfn(orig_pte)); flush_cache_page(vma, fe->address, pte_pfn(orig_pte));
diff --git a/mm/mmap.c b/mm/mmap.c diff --git a/mm/mmap.c b/mm/mmap.c
index de2c176..ce60715 100644 index ca9d91b..cf565b7 100644
--- a/mm/mmap.c --- a/mm/mmap.c
+++ b/mm/mmap.c +++ b/mm/mmap.c
@@ -43,6 +43,7 @@ @@ -44,6 +44,7 @@
#include <linux/userfaultfd_k.h> #include <linux/userfaultfd_k.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/pkeys.h> #include <linux/pkeys.h>
@ -1187,7 +1184,7 @@ index de2c176..ce60715 100644
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
@@ -164,6 +165,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) @@ -165,6 +166,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
if (vma->vm_file) if (vma->vm_file)
fput(vma->vm_file); fput(vma->vm_file);
mpol_put(vma_policy(vma)); mpol_put(vma_policy(vma));
@ -1206,13 +1203,13 @@ index de2c176..ce60715 100644
+ uksm_remove_vma(vma); + uksm_remove_vma(vma);
+ +
if (next && !insert) { if (next && !insert) {
struct vm_area_struct *exporter = NULL; struct vm_area_struct *exporter = NULL, *importer = NULL;
+ uksm_remove_vma(next); + uksm_remove_vma(next);
if (end >= next->vm_end) { if (end >= next->vm_end) {
/* /*
* vma expands, overlapping all the next, and * vma expands, overlapping all the next, and
@@ -725,6 +734,7 @@ again: remove_next = 1 + (end > next->vm_end); @@ -733,6 +742,7 @@ again:
end_changed = true; end_changed = true;
} }
vma->vm_pgoff = pgoff; vma->vm_pgoff = pgoff;
@ -1220,14 +1217,13 @@ index de2c176..ce60715 100644
if (adjust_next) { if (adjust_next) {
next->vm_start += adjust_next << PAGE_SHIFT; next->vm_start += adjust_next << PAGE_SHIFT;
next->vm_pgoff += adjust_next; next->vm_pgoff += adjust_next;
@@ -795,16 +805,22 @@ again: remove_next = 1 + (end > next->vm_end); @@ -806,16 +816,21 @@ again:
* up the code too much to do both in one go. if (remove_next == 2) {
*/ remove_next = 1;
next = vma->vm_next; end = next->vm_end;
- if (remove_next == 2)
+ if (remove_next == 2) {
+ uksm_remove_vma(next); + uksm_remove_vma(next);
goto again; goto again;
- }
- else if (next) - else if (next)
+ } else if (next) { + } else if (next) {
vma_gap_update(next); vma_gap_update(next);
@ -1246,7 +1242,7 @@ index de2c176..ce60715 100644
validate_mm(mm); validate_mm(mm);
return 0; return 0;
@@ -1196,6 +1212,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr, @@ -1207,6 +1222,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
@ -1256,7 +1252,7 @@ index de2c176..ce60715 100644
if (flags & MAP_LOCKED) if (flags & MAP_LOCKED)
if (!can_do_mlock()) if (!can_do_mlock())
return -EPERM; return -EPERM;
@@ -1534,6 +1553,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, @@ -1545,6 +1563,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
allow_write_access(file); allow_write_access(file);
} }
file = vma->vm_file; file = vma->vm_file;
@ -1264,7 +1260,7 @@ index de2c176..ce60715 100644
out: out:
perf_event_mmap(vma); perf_event_mmap(vma);
@@ -1575,6 +1595,7 @@ allow_write_and_free_vma: @@ -1586,6 +1605,7 @@ allow_write_and_free_vma:
if (vm_flags & VM_DENYWRITE) if (vm_flags & VM_DENYWRITE)
allow_write_access(file); allow_write_access(file);
free_vma: free_vma:
@ -1272,7 +1268,7 @@ index de2c176..ce60715 100644
kmem_cache_free(vm_area_cachep, vma); kmem_cache_free(vm_area_cachep, vma);
unacct_error: unacct_error:
if (charged) if (charged)
@@ -2369,6 +2390,8 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, @@ -2391,6 +2411,8 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
else else
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
@ -1281,7 +1277,7 @@ index de2c176..ce60715 100644
/* Success. */ /* Success. */
if (!err) if (!err)
return 0; return 0;
@@ -2639,6 +2662,7 @@ static int do_brk(unsigned long addr, unsigned long len) @@ -2669,6 +2691,7 @@ static int do_brk(unsigned long addr, unsigned long request)
return 0; return 0;
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@ -1289,7 +1285,7 @@ index de2c176..ce60715 100644
error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
if (offset_in_page(error)) if (offset_in_page(error))
@@ -2696,6 +2720,7 @@ static int do_brk(unsigned long addr, unsigned long len) @@ -2726,6 +2749,7 @@ static int do_brk(unsigned long addr, unsigned long request)
vma->vm_flags = flags; vma->vm_flags = flags;
vma->vm_page_prot = vm_get_page_prot(flags); vma->vm_page_prot = vm_get_page_prot(flags);
vma_link(mm, vma, prev, rb_link, rb_parent); vma_link(mm, vma, prev, rb_link, rb_parent);
@ -1297,7 +1293,7 @@ index de2c176..ce60715 100644
out: out:
perf_event_mmap(vma); perf_event_mmap(vma);
mm->total_vm += len >> PAGE_SHIFT; mm->total_vm += len >> PAGE_SHIFT;
@@ -2734,6 +2759,12 @@ void exit_mmap(struct mm_struct *mm) @@ -2764,6 +2788,12 @@ void exit_mmap(struct mm_struct *mm)
/* mm's last user has gone, and its about to be pulled down */ /* mm's last user has gone, and its about to be pulled down */
mmu_notifier_release(mm); mmu_notifier_release(mm);
@ -1310,7 +1306,7 @@ index de2c176..ce60715 100644
if (mm->locked_vm) { if (mm->locked_vm) {
vma = mm->mmap; vma = mm->mmap;
while (vma) { while (vma) {
@@ -2769,6 +2800,11 @@ void exit_mmap(struct mm_struct *mm) @@ -2799,6 +2829,11 @@ void exit_mmap(struct mm_struct *mm)
vma = remove_vma(vma); vma = remove_vma(vma);
} }
vm_unacct_memory(nr_accounted); vm_unacct_memory(nr_accounted);
@ -1322,7 +1318,7 @@ index de2c176..ce60715 100644
} }
/* Insert vm structure into process list sorted by address /* Insert vm structure into process list sorted by address
@@ -2878,6 +2914,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, @@ -2908,6 +2943,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
new_vma->vm_ops->open(new_vma); new_vma->vm_ops->open(new_vma);
vma_link(mm, new_vma, prev, rb_link, rb_parent); vma_link(mm, new_vma, prev, rb_link, rb_parent);
*need_rmap_locks = false; *need_rmap_locks = false;
@ -1330,7 +1326,7 @@ index de2c176..ce60715 100644
} }
return new_vma; return new_vma;
@@ -3015,6 +3052,7 @@ static struct vm_area_struct *__install_special_mapping( @@ -3055,6 +3091,7 @@ static struct vm_area_struct *__install_special_mapping(
vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
perf_event_mmap(vma); perf_event_mmap(vma);
@ -1339,7 +1335,7 @@ index de2c176..ce60715 100644
return vma; return vma;
diff --git a/mm/rmap.c b/mm/rmap.c diff --git a/mm/rmap.c b/mm/rmap.c
index 701b93f..64ba784 100644 index 1ef3640..1c40463 100644
--- a/mm/rmap.c --- a/mm/rmap.c
+++ b/mm/rmap.c +++ b/mm/rmap.c
@@ -1110,9 +1110,9 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) @@ -1110,9 +1110,9 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
@ -1356,10 +1352,10 @@ index 701b93f..64ba784 100644
static void __page_set_anon_rmap(struct page *page, static void __page_set_anon_rmap(struct page *page,
diff --git a/mm/uksm.c b/mm/uksm.c diff --git a/mm/uksm.c b/mm/uksm.c
new file mode 100644 new file mode 100644
index 0000000..039192f index 0000000..56852a5
--- /dev/null --- /dev/null
+++ b/mm/uksm.c +++ b/mm/uksm.c
@@ -0,0 +1,5518 @@ @@ -0,0 +1,5524 @@
+/* +/*
+ * Ultra KSM. Copyright (C) 2011-2012 Nai Xia + * Ultra KSM. Copyright (C) 2011-2012 Nai Xia
+ * + *
@ -1558,7 +1554,8 @@ index 0000000..039192f
+static struct sradix_tree_node *slot_tree_node_alloc(void) +static struct sradix_tree_node *slot_tree_node_alloc(void)
+{ +{
+ struct slot_tree_node *p; + struct slot_tree_node *p;
+ p = kmem_cache_zalloc(slot_tree_node_cachep, GFP_KERNEL); + p = kmem_cache_zalloc(slot_tree_node_cachep, GFP_KERNEL |
+ __GFP_NORETRY | __GFP_NOWARN);
+ if (!p) + if (!p)
+ return NULL; + return NULL;
+ +
@ -2044,7 +2041,8 @@ index 0000000..039192f
+static inline struct node_vma *alloc_node_vma(void) +static inline struct node_vma *alloc_node_vma(void)
+{ +{
+ struct node_vma *node_vma; + struct node_vma *node_vma;
+ node_vma = kmem_cache_zalloc(node_vma_cache, GFP_KERNEL); + node_vma = kmem_cache_zalloc(node_vma_cache, GFP_KERNEL |
+ __GFP_NORETRY | __GFP_NOWARN);
+ if (node_vma) { + if (node_vma) {
+ INIT_HLIST_HEAD(&node_vma->rmap_hlist); + INIT_HLIST_HEAD(&node_vma->rmap_hlist);
+ INIT_HLIST_NODE(&node_vma->hlist); + INIT_HLIST_NODE(&node_vma->hlist);
@ -2069,7 +2067,8 @@ index 0000000..039192f
+ if (!vma_slot_cache) + if (!vma_slot_cache)
+ return NULL; + return NULL;
+ +
+ slot = kmem_cache_zalloc(vma_slot_cache, GFP_KERNEL); + slot = kmem_cache_zalloc(vma_slot_cache, GFP_KERNEL |
+ __GFP_NORETRY | __GFP_NOWARN);
+ if (slot) { + if (slot) {
+ INIT_LIST_HEAD(&slot->slot_list); + INIT_LIST_HEAD(&slot->slot_list);
+ INIT_LIST_HEAD(&slot->dedup_list); + INIT_LIST_HEAD(&slot->dedup_list);
@ -2089,7 +2088,8 @@ index 0000000..039192f
+{ +{
+ struct rmap_item *rmap_item; + struct rmap_item *rmap_item;
+ +
+ rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); + rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
+ __GFP_NORETRY | __GFP_NOWARN);
+ if (rmap_item) { + if (rmap_item) {
+ /* bug on lowest bit is not clear for flag use */ + /* bug on lowest bit is not clear for flag use */
+ BUG_ON(is_addr(rmap_item)); + BUG_ON(is_addr(rmap_item));
@ -2106,7 +2106,8 @@ index 0000000..039192f
+static inline struct stable_node *alloc_stable_node(void) +static inline struct stable_node *alloc_stable_node(void)
+{ +{
+ struct stable_node *node; + struct stable_node *node;
+ node = kmem_cache_alloc(stable_node_cache, GFP_KERNEL | GFP_ATOMIC); + node = kmem_cache_alloc(stable_node_cache, GFP_KERNEL |
+ __GFP_NORETRY | __GFP_NOWARN);
+ if (!node) + if (!node)
+ return NULL; + return NULL;
+ +
@ -2124,7 +2125,8 @@ index 0000000..039192f
+static inline struct tree_node *alloc_tree_node(struct list_head *list) +static inline struct tree_node *alloc_tree_node(struct list_head *list)
+{ +{
+ struct tree_node *node; + struct tree_node *node;
+ node = kmem_cache_zalloc(tree_node_cache, GFP_KERNEL | GFP_ATOMIC); + node = kmem_cache_zalloc(tree_node_cache, GFP_KERNEL |
+ __GFP_NORETRY | __GFP_NOWARN);
+ if (!node) + if (!node)
+ return NULL; + return NULL;
+ +
@ -2241,8 +2243,8 @@ index 0000000..039192f
+ void *expected_mapping; + void *expected_mapping;
+ +
+ page = pfn_to_page(stable_node->kpfn); + page = pfn_to_page(stable_node->kpfn);
+ expected_mapping = (void *)stable_node + + expected_mapping = (void *)((unsigned long)stable_node |
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); + PAGE_MAPPING_KSM);
+ rcu_read_lock(); + rcu_read_lock();
+ if (page->mapping != expected_mapping) + if (page->mapping != expected_mapping)
+ goto stale; + goto stale;
@ -2919,6 +2921,7 @@ index 0000000..039192f
+ (page_to_pfn(kpage) == zero_pfn)) { + (page_to_pfn(kpage) == zero_pfn)) {
+ entry = pte_mkspecial(entry); + entry = pte_mkspecial(entry);
+ dec_mm_counter(mm, MM_ANONPAGES); + dec_mm_counter(mm, MM_ANONPAGES);
+ inc_zone_page_state(page, NR_UKSM_ZERO_PAGES);
+ } else { + } else {
+ get_page(kpage); + get_page(kpage);
+ page_add_anon_rmap(kpage, vma, addr, false); + page_add_anon_rmap(kpage, vma, addr, false);
@ -3986,7 +3989,7 @@ index 0000000..039192f
+ if (IS_ERR_OR_NULL(page)) + if (IS_ERR_OR_NULL(page))
+ break; + break;
+ if (PageKsm(page)) { + if (PageKsm(page)) {
+ ret = handle_mm_fault(vma->vm_mm, vma, addr, + ret = handle_mm_fault(vma, addr,
+ FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE); + FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE);
+ } else + } else
+ ret = VM_FAULT_WRITE; + ret = VM_FAULT_WRITE;
@ -4634,7 +4637,6 @@ index 0000000..039192f
+ if (find_zero_page_hash(hash_strength, *hash)) { + if (find_zero_page_hash(hash_strength, *hash)) {
+ if (!cmp_and_merge_zero_page(slot->vma, page)) { + if (!cmp_and_merge_zero_page(slot->vma, page)) {
+ slot->pages_merged++; + slot->pages_merged++;
+ inc_zone_page_state(page, NR_UKSM_ZERO_PAGES);
+ +
+ /* For full-zero pages, no need to create rmap item */ + /* For full-zero pages, no need to create rmap item */
+ goto putpage; + goto putpage;
@ -6879,12 +6881,12 @@ index 0000000..039192f
+#endif +#endif
+ +
diff --git a/mm/vmstat.c b/mm/vmstat.c diff --git a/mm/vmstat.c b/mm/vmstat.c
index cb2a67b..912b86f 100644 index 89cec42..188ce43 100644
--- a/mm/vmstat.c --- a/mm/vmstat.c
+++ b/mm/vmstat.c +++ b/mm/vmstat.c
@@ -733,6 +733,9 @@ const char * const vmstat_text[] = { @@ -974,6 +974,9 @@ const char * const vmstat_text[] = {
"nr_anon_transparent_hugepages", "nr_dirtied",
"nr_free_cma", "nr_written",
+#ifdef CONFIG_UKSM +#ifdef CONFIG_UKSM
+ "nr_uksm_zero_pages", + "nr_uksm_zero_pages",