diff --git a/PKGBUILD b/PKGBUILD index f522bb0..3d3b485 100644 --- a/PKGBUILD +++ b/PKGBUILD @@ -7,9 +7,9 @@ pkgname=$pkgbase # comment the following line to build a single package containing the kernel and the headers (( 1 )) && pkgname=("$pkgbase" "$pkgbase-headers" "$pkgbase-docs") pkgdesc="The Linux Kernel and modules from Linus' git tree" -depends=('coreutils' 'linux-firmware-git' 'mkinitcpio') +depends=('coreutils' 'linux-firmware' 'module-init-tools' 'mkinitcpio') -pkgver=4.8.rc8 +pkgver=4.11.rc5 pkgrel=1 url="http://www.kernel.org/" arch=(i686 x86_64) @@ -24,7 +24,7 @@ sha256sums=('SKIP') # set _gitrev to a git revision (man gitrevisions) like a tag, a commit sha1 # hash or a branch name to build from this tree instead of master -_gitrev="v4.7.5" +_gitrev="v4.10.8" #################################################################### # KERNEL CONFIG FILES @@ -32,9 +32,9 @@ _gitrev="v4.7.5" # This PKGBUILD searches for config files in the current directory # and will use the first one it finds from the following # list as base configuration: -# config.local -# config.saved.$CARCH -# config.$CARCH +# config.local +# config.saved.$CARCH +# config.$CARCH # #################################################################### @@ -61,7 +61,15 @@ _gitrev="v4.7.5" # # Uncomment desired options ############################# -#_make_modules=0 +#_make_modules=1 + + +####### +# Skip the merge of Linus's kernel tree +# +# _skip_merge=1 + + MAKEFLAGS="-j $(expr $(cat /proc/cpuinfo |grep processor |wc -l) \* 2)" ####### @@ -87,12 +95,29 @@ _config_cmd="${_config_cmd:-menuconfig}" # _configure_only=1 +####### +# The directory where the kernel should be built +# +# Can be useful, for example, if you want to compile on a +# tmpfs mount, which can speed up the compilation process +# +#_build_dir="${_build_dir:-$srcdir}" + + ####### # Append the date to the localversion # -# e.g. -ARCH -> -ARCH-20090422 +# e.g. -ARCH -> -ARCH-20090422 # -# _date_localversion=1 +#_date_localversion=0 + + + +####### +# Set the pkgver to the kernel version +# rather than the build date +# +# _kernel_pkgver=1 ####### @@ -105,7 +130,7 @@ _save_config=1 ####### # Do not compress kernel modules # -# _no_modules_compression=1 +_no_modules_compression=0 ####### @@ -117,245 +142,250 @@ _save_config=1 # internal variables (( 1 )) && _kernel_src="$pkgname" #(( 1 )) && _kernel_src="$BUILDDIR/$(find . -maxdepth 1 -type d -name "linux-*" -printf "%f\n" | head -1)" +#(( 1 )) && _kernel_src="$_build_dir/$pkgname_$" + ####### # define required functions pkgver() { - cd "$_kernel_src" - git describe --always | sed 's/^v//;s/-/./g' + cd "$_kernel_src" + git describe --always | sed 's/^v//;s/-/./g' } # single package package() { - eval package_$pkgbase-headers - eval package_$pkgbase + eval package_$pkgbase-headers + eval package_$pkgbase } # split package functions -eval "package_$pkgbase() { _generic_package_linux; }" -eval "package_$pkgbase-headers() { _generic_package_linux-headers; }" -eval "package_$pkgbase-docs() { _generic_package_linux-docs; }" +eval "package_$pkgbase() { _generic_package_kernel; }" +eval "package_$pkgbase-headers() { _generic_package_kernel-headers; }" +eval "package_$pkgbase-docs() { _generic_package_kernel-docs; }" ############################## # where the magic happens... ############################## build() { - cd "$_kernel_src" - msg "Sanitizing source tree.." - [[ -n $_gitrev ]] && git reset --hard "$_gitrev" - # cleaning source trees - git clean -f + cd "$_kernel_src" + msg "Sanitizing source tree.." + [[ -n $_gitrev ]] && git reset --hard "$_gitrev" + # cleaning source trees + git clean -f - ################# - # Apply patches - ################# - msg "Applying patches..." - local i patches - for i in "${source[@]}"; do - i=${i##*/} - [[ $i =~ .*\.patch$ ]] && patches=("${patches[@]}" "$srcdir/$i") - [[ ${i%.*} =~ .*\.patch$ ]] && patches=("${patches[@]}" "$srcdir/${i%.*}") - done + ################# + # Apply patches + ################# + msg "Applying patches..." + local i patches + for i in "${source[@]}"; do + i=${i##*/} + [[ $i =~ .*\.patch$ ]] && patches=("${patches[@]}" "$srcdir/$i") + [[ ${i%.*} =~ .*\.patch$ ]] && patches=("${patches[@]}" "$srcdir/${i%.*}") + done - shopt -s nullglob - for i in "${patches[@]}" "$startdir/patches/"*; do - msg2 "Applying ${i##*/}..." - patch -Np1 -i "$i" || (error "Applying ${i##*/} failed" && return 1) - done - shopt -u nullglob + shopt -s nullglob + for i in "${patches[@]}" "$startdir/patches/"*; do + msg2 "Applying ${i##*/}..." + patch -Np1 -i "$i" || (error "Applying ${i##*/} failed" && return 1) + done + shopt -u nullglob - ################# - # CONFIGURATION - ################# + ################# + # CONFIGURATION + ################# - ######################### - # Loading configuration - ######################### - msg "Loading configuration..." - for i in local "saved.$CARCH" "$CARCH"; do - if [[ -e $startdir/config.$i ]]; then - msg2 "Using kernel config file config.$i..." - cp -f "$startdir/config.$i" .config - break - fi - done + ######################### + # Loading configuration + ######################### + msg "Loading configuration..." + for i in local "saved.$CARCH" "$CARCH"; do + if [[ -e $startdir/config.$i ]]; then + msg2 "Using kernel config file config.$i..." + cp -f "$startdir/config.$i" .config + break + fi + done - [[ ! -e .config ]] && - warning "No suitable kernel config file was found. You'll have to configure the kernel from scratch." + [[ ! -e .config ]] && + warning "No suitable kernel config file was found. You'll have to configure the kernel from scratch." - ########################### - # Start the configuration - ########################### - msg "Updating configuration..." - yes "" | make config > /dev/null + ########################### + # Start the configuration + ########################### + msg "Updating configuration..." + yes "" | make config > /dev/null - if [[ -f "$startdir/config.saved.$CARCH" ]]; then - msg2 "migrating previous config..." - cp "$startdir/config.saved.$CARCH" .config - make oldconfig - else - msg2 "migrating default config..." - cp "$startdir/config.$CARCH" .config - make oldconfig - fi - if [[ -n ${_config_cmd// /} ]]; then - msg2 "Running make $_config_cmd..." - make $_config_cmd - else - warning "Unknown config command: $_config_cmd" - fi + if [[ -f "$startdir/config.saved.$CARCH" ]]; then + msg2 "migrating previous config..." + cp "$startdir/config.saved.$CARCH" .config + make oldconfig + else + msg2 "migrating default config..." + cp "$startdir/config.$CARCH" .config + make oldconfig + fi + if [[ -n ${_config_cmd// /} ]]; then + msg2 "Running make $_config_cmd..." + make $_config_cmd + else + warning "Unknown config command: $_config_cmd" + fi - ############################################## - # Save the config file the package directory - ############################################## - if [[ -n $_save_config || -n $_configure_only ]]; then - msg "Saving configuration..." - msg2 "Saving $_kernel_src/.config as $startdir/config.saved.$CARCH" - cp .config "$startdir/config.saved.$CARCH" - fi + ############################################## + # Save the config file the package directory + ############################################## + if [[ -n $_save_config || -n $_configure_only ]]; then + msg "Saving configuration..." + msg2 "Saving $_kernel_src/.config as $startdir/config.saved.$CARCH" + cp .config "$startdir/config.saved.$CARCH" + fi - ####################################### - # Stop after configuration if desired - ####################################### - if [[ -n $_configure_only ]]; then - rm -rf "$srcdir" "$pkgdir" - return 1 - fi + ####################################### + # Stop after configuration if desired + ####################################### + if [[ -n $_configure_only ]]; then + rm -rf "$_kernel_src" "$srcdir" "$pkgdir" + return 1 + fi - ############################### - # Append date to localversion - ############################### - if [[ -n $_date_localversion ]]; then - local _localversion="$(sed -rn 's/^CONFIG_LOCALVERSION="([^"]*)"$/\1/p' .config)" - [[ -n $_localversion ]] && msg2 "CONFIG_LOCALVERSION is set to: $_localversion" + ############################### + # Append date to localversion + ############################### + if [[ -n $_date_localversion ]]; then + local _localversion="$(sed -rn 's/^CONFIG_LOCALVERSION="([^"]*)"$/\1/p' .config)" + [[ -n $_localversion ]] && msg2 "CONFIG_LOCALVERSION is set to: $_localversion" - # since this is a git package, the $pkgver is equal to $(date +%Y%m%d) - msg2 "Appending $pkgver to CONFIG_LOCALVERSION..." - sed -ri "s/^(CONFIG_LOCALVERSION=).*$/\1\"$_localversion-$pkgver\"/" .config - fi + # since this is a git package, the $pkgver is equal to $(date +%Y%m%d) + msg2 "Appending $pkgver to CONFIG_LOCALVERSION..." + sed -ri "s/^(CONFIG_LOCALVERSION=).*$/\1\"$_localversion-$pkgver\"/" .config + fi - #################################### - # Append pkgrel to kernel version - #################################### - sed -ri "s/^(EXTRAVERSION =).*$/\1 -$pkgrel/" Makefile + ################# + # BUILD PROCESS + ################# - #################################### - # don't run depmod on 'make install' - #################################### - sed -i '2iexit 0' scripts/depmod.sh - git update-index --assume-unchanged scripts/depmod.sh + ################################ + # Build the kernel and modules + ################################ + msg "Building kernel and modules..." + if [[ -n $_make_modules ]]; then + make $MAKEFLAGS V="$_verbose" bzImage modules + else + make $MAKEFLAGS V="$_verbose" bzImage + fi + ############ + # CLEANUP + ############ - ################# - # BUILD PROCESS - ################# - - ################################ - # Build the kernel and modules - ################################ - msg "Building kernel and modules..." -if [[ -n $_make_modules ]]; then - make $MAKEFLAGS V="$_verbose" bzImage modules -else - make $MAKEFLAGS V="$_verbose" bzImage -fi + ################################### + # Copy files from build directory + #################################### +# if (( ! CLEANUP )) && [[ $_build_dir != $srcdir ]]; then +# msg "Saving $_kernel_src to $srcdir/${_kernel_src##*/}..." +# mv "$_kernel_src" "$srcdir" +# rm -rf "$_kernel_src" +# fi } _generic_package_initialization() { - cd "$srcdir/${_kernel_src##*/}" + cd "$_kernel_src" - _karch="x86" + _karch="x86" + + ###################### + # Get kernel version + ###################### + _kernver=$(make kernelrelease) + _basekernel=${_kernver%%-*} + + ############################################################ + # Use kernel version instead of the current date as pkgver + ############################################################ + if [[ -n $_kernel_pkgver ]]; then + pkgver=${_kernver//-/_} + msg "Setting pkgver to kernel version: $pkgver" + fi - ###################### - # Get kernel version - ###################### - _kernver=$(make kernelrelease) - _basekernver=${_kernver%%-*} } -_generic_package_linux() { - pkgdesc="The Linux Kernel and modules from Linus' git tree" - backup=(etc/mkinitcpio.d/$pkgname.preset) - install=$pkgname.install - changelog=$pkgname.changelog +_generic_package_kernel() { + pkgdesc="The Linux Kernel and modules from Linus' git tree" + depends=('coreutils' 'linux-firmware' 'module-init-tools' 'mkinitcpio') + backup=(etc/mkinitcpio.d/$pkgname.preset) + install=$pkgname.install + changelog=$pkgname.changelog - # set required variables - _generic_package_initialization + # set required variables + _generic_package_initialization - ############################################################# - # Provide linux - # (probably someone wants to use this kernel exclusively?) - ############################################################# - provides=("${provides[@]}" "linux=${_kernver//-/_}") + ############################################################# + # Provide linux + # (probably someone wants to use this kernel exclusively?) + ############################################################# + provides=("${provides[@]}" "linux=${_kernver//-/_}") - ################ - # INSTALLATION - ################ + ################ + # INSTALLATION + ################ - ##################### - # Install the image - ##################### - msg "Installing kernel image..." - install -Dm644 arch/$_karch/boot/bzImage "$pkgdir/boot/vmlinuz-$pkgname" + ##################### + # Install the image + ##################### + msg "Installing kernel image..." + install -Dm644 arch/$_karch/boot/bzImage "$pkgdir/boot/vmlinuz-$pkgname" - ########################## - # Install kernel modules - ########################## - msg "Installing kernel modules..." - if [[ -n $_make_modules ]]; then - # force -j1 to work around make 3.82 bug - make -j1 INSTALL_MOD_PATH="$pkgdir/usr" modules_install - [[ -z $_no_modules_compression ]] && find "$pkgdir" -name "*.ko" -exec gzip -9 {} + + ########################## + # Install kernel modules + ########################## + if [[ -n $_make_modules ]]; then + msg "Installing kernel modules..." + make INSTALL_MOD_PATH="$pkgdir" modules_install + [[ -z $_no_modules_compression ]] && find "$pkgdir" -name "*.ko" +-exec gzip -9 {} + - ######################################################### - # Set up extramodules directory (for external modules) - ######################################################### - local extramodules="$pkgdir/usr/lib/modules/extramodules-$(cut -d. -f1,2 <<<$_basekernver)" - local modversion=$(grep '^CONFIG_LOCALVERSION=' .config | cut -d'"' -f2) - [[ -n $modversion ]] && extramodules+=$modversion - install -dm755 "${extramodules}${_pkgext}" - echo $_kernver > "${extramodules}${_pkgext}/version" - ln -s "../${extramodules##*/}${_pkgext}" "$pkgdir/usr/lib/modules/$_kernver/extramodules" - - ################################## - # Create important symlinks - ################################## - msg "Creating important symlinks..." + ################################## + # Create important symlinks + ################################## + msg "Creating important symlinks..." - # Create generic modules symlink - if [[ $_kernver != ${_basekernver}${_pkgext} ]]; then - cd "$pkgdir/usr/lib/modules" - ln -s "$_kernver" "${_basekernver}${_pkgext}" - cd "$OLDPWD" + # Create generic modules symlink + + if [[ $_kernver != ${_basekernel}${_pkgext} ]]; then + cd "$pkgdir/lib/modules" + ln -s "$_kernver" "${_basekernel}${_pkgext}" + cd "$OLDPWD" + + + # remove header symlinks + cd "$pkgdir/lib/modules/$_kernver" + rm -rf source build + cd "$OLDPWD" + fi fi - # remove header symlinks - cd "$pkgdir/usr/lib/modules/$_kernver" - rm -rf source build - cd "$OLDPWD" - fi + ############################ + # Install mkinitcpio files + ############################ + install -d "$pkgdir/etc/mkinitcpio.d" + msg "Generating $pkgname.preset..." + cat > "$pkgdir/etc/mkinitcpio.d/$pkgname.preset" < "$pkgdir/etc/mkinitcpio.d/$pkgname.preset" < "$pkgdir/etc/mkinitcpio.d/$pkgname.kver" - ####################### - # Remove the firmware - ####################### - rm -rf "$pkgdir/usr/lib/firmware" + + ####################### + # Update install file + ####################### + msg "Updating install file..." + sed -ri "s/^(pkgname=).*$/\1$pkgname/" "$startdir/$pkgname.install" + sed -ri "s/^(kernver=).*$/\1$_kernver/" "$startdir/$pkgname.install" - ####################### - # Run depmod - ####################### - if [[ -n $_make_modules ]]; then - depmod -a "$pkgdir/usr" - depmod -b "$pkgdir/usr" -F System.map "$_kernver" + + ####################### + # Remove the firmware + ####################### + + # remove the firmware + rm -rf "${pkgdir}/lib/firmware" + if [[ -n $_make_modules ]]; then + # Now we call depmod... + depmod -b "${pkgdir}" -F System.map "${_kernver}" + + # move module tree /lib -> /usr/lib + mkdir -p "${pkgdir}/usr" + mv "${pkgdir}/lib" "${pkgdir}/usr/" fi + } +_generic_package_kernel-headers() { + pkgdesc="Header files and scripts for building modules for $pkgbase" + depends=("$pkgbase") -_generic_package_linux-headers() { - pkgdesc="Header files and scripts for building modules for $pkgbase" - depends=("$pkgbase") + # set required variables + _generic_package_initialization - # set required variables - _generic_package_initialization - - ############################################################# - # Provide linux-headers - # (probably someone wants to use this kernel exclusively?) - ############################################################# - provides=("${provides[@]}" "linux-headers=${_kernver//-/_}") + ############################################################# + # Provide linux-headers + # (probably someone wants to use this kernel exclusively?) + ############################################################# + provides=("${provides[@]}" "linux-headers=${_kernver//-/_}") - ############################## - # Install fake kernel source - ############################## - install -Dm644 Module.symvers "$pkgdir/usr/src/linux-$_kernver/Module.symvers" - install -Dm644 Makefile "$pkgdir/usr/src/linux-$_kernver/Makefile" - install -Dm644 kernel/Makefile "$pkgdir/usr/src/linux-$_kernver/kernel/Makefile" - install -Dm644 .config "$pkgdir/usr/src/linux-$_kernver/.config" - install -Dm644 .config "$pkgdir/usr/lib/modules/$_kernver/.config" + ############################## + # Install fake kernel source + ############################## + install -Dm644 Module.symvers "$pkgdir/usr/src/linux-$_kernver/Module.symvers" + install -Dm644 Makefile "$pkgdir/usr/src/linux-$_kernver/Makefile" + install -Dm644 kernel/Makefile "$pkgdir/usr/src/linux-$_kernver/kernel/Makefile" + install -Dm644 .config "$pkgdir/usr/lib/modules/$_kernver/.config" - ####################################################### - # Install scripts directory and fix permissions on it - ####################################################### - cp -a scripts "$pkgdir/usr/src/linux-$_kernver" + + ####################################################### + # Install scripts directory and fix permissions on it + ####################################################### + cp -a scripts "$pkgdir/usr/src/linux-$_kernver" - ########################## - # Install header files - ########################## - msg "Installing header files..." + ########################## + # Install header files + ########################## + msg "Installing header files..." - for i in net/ipv4/netfilter/ipt_CLUSTERIP.c \ - $(find include/ net/mac80211/ drivers/md -iname "*.h") \ - $(find include/config/ -type f) \ - $(find . -name "Kconfig*") - do - mkdir -p "$pkgdir/usr/src/linux-$_kernver/${i%/*}" - cp -af "$i" "$pkgdir/usr/src/linux-$_kernver/$i" - done + for i in net/ipv4/netfilter/ipt_CLUSTERIP.c \ + $(find include/ net/mac80211/ drivers/{md,media/video/} -iname "*.h") \ + $(find include/config/ -type f) \ + $(find . -name "Kconfig*") + do + mkdir -p "$pkgdir/usr/src/linux-$_kernver/${i%/*}" + cp -af "$i" "$pkgdir/usr/src/linux-$_kernver/$i" + done - # required by virtualbox and probably others - ln -s "../generated/autoconf.h" "$pkgdir/usr/src/linux-$_kernver/include/linux/" + # required by virtualbox and probably others + ln -s "../generated/autoconf.h" "$pkgdir/usr/src/linux-$_kernver/include/linux/" - ######################################## - # Install architecture dependent files - ######################################## - msg "Installing architecture files..." - mkdir -p "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/kernel" - cp -a arch/$_karch/kernel/asm-offsets.s "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/kernel/" + ######################################## + # Install architecture dependent files + ######################################## + msg "Installing architecture files..." + mkdir -p "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/kernel" + cp -a arch/$_karch/kernel/asm-offsets.s "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/kernel/" - cp -a arch/$_karch/Makefile* "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/" - cp -a arch/$_karch/configs "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/" + cp -a arch/$_karch/Makefile* "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/" + cp -a arch/$_karch/configs "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/" - # copy arch includes for external modules and fix the nVidia issue - mkdir -p "$pkgdir/usr/src/linux-$_kernver/arch/$_karch" - cp -a "arch/$_karch/include" "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/" + # copy arch includes for external modules and fix the nVidia issue + mkdir -p "$pkgdir/usr/src/linux-$_kernver/arch/$_karch" + cp -a "arch/$_karch/include" "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/" - # create a necessary symlink to the arch folder - cd "$pkgdir/usr/src/linux-$_kernver/arch" + # create a necessary symlink to the arch folder + cd "$pkgdir/usr/src/linux-$_kernver/arch" - if [[ $CARCH = "x86_64" ]]; then - ln -s $_karch x86_64 - else - ln -s $_karch i386 + if [[ $CARCH = "x86_64" ]]; then + ln -s $_karch x86_64 + else + ln -s $_karch i386 + fi + + cd "$OLDPWD" + + + ################################ + # Remove unneeded architecures + ################################ + msg "Removing unneeded architectures..." + for i in "$pkgdir/usr/src/linux-$_kernver/arch/"*; do + [[ ${i##*/} =~ ($_karch|Kconfig) ]] || rm -rf "$i" + done + + + ############################ + # Remove .gitignore files + ############################ + msg "Removing .gitignore files from kernel source..." + find "$pkgdir/usr/src/linux-$_kernver/" -name ".gitignore" -delete + + + ################################## + # Create important symlinks + ################################## + msg "Creating important symlinks..." + + # the build symlink needs to be relative + if [[ -n $_make_modules ]]; then + cd "$pkgdir/usr/lib/modules/$_kernver" + rm -rf source build + ln -s "/usr/src/linux-$_kernver" build + cd "$OLDPWD" fi - cd "$OLDPWD" - - - ################################ - # Remove unneeded architecures - ################################ - msg "Removing unneeded architectures..." - for i in "$pkgdir/usr/src/linux-$_kernver/arch/"*; do - [[ ${i##*/} =~ ($_karch|Kconfig) ]] || rm -rf "$i" - done - - - ############################ - # Remove .gitignore files - ############################ - msg "Removing .gitignore files from kernel source..." - find "$pkgdir/usr/src/linux-$_kernver/" -name ".gitignore" -delete - - - ################################## - # Create important symlinks - ################################## - msg "Creating important symlinks..." - - # the build symlink needs to be relative - cd "$pkgdir/usr/lib/modules/$_kernver" - rm -rf source build - ln -s "/usr/src/linux-$_kernver" build - cd "$OLDPWD" - - if [[ $_kernver != ${_basekernver}${_pkgext} ]]; then - cd "$pkgdir/usr/src" - ln -s "linux-$_kernver" "linux-${_basekernver}${_pkgext}" - cd "$OLDPWD" - fi + if [[ $_kernver != ${_basekernver}${_pkgext} ]]; then + cd "$pkgdir/usr/src" + ln -s "linux-$_kernver" "linux-${_basekernel}${_pkgext}" + cd "$OLDPWD" + fi } -_generic_package_linux-docs() { - pkgdesc="Kernel hackers manual - HTML documentation that comes with the Linux kernel." - depends=("$pkgbase") +_generic_package_kernel-docs() { + pkgdesc="Kernel hackers manual - HTML documentation that comes with the Linux kernel." + depends=("$pkgbase") - # set required variables - _generic_package_initialization + # set required variables + _generic_package_initialization - mkdir -p "$pkgdir/usr/src/linux-$_kernver" - cp -aL Documentation "$pkgdir/usr/src/linux-$_kernver/" + mkdir -p "$pkgdir/usr/src/linux-$_kernver" + cp -a Documentation "$pkgdir/usr/src/linux-$_kernver/" } # vim: set fenc=utf-8 ts=2 sw=2 noet: + diff --git a/config.saved.x86_64 b/config.saved.x86_64 index 198fda5..d2fca51 100644 --- a/config.saved.x86_64 +++ b/config.saved.x86_64 @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 4.7.5 Kernel Configuration +# Linux/x86 4.10.8 Kernel Configuration # CONFIG_64BIT=y CONFIG_X86_64=y @@ -39,7 +39,6 @@ CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y CONFIG_HAVE_INTEL_TXT=y CONFIG_X86_64_SMP=y -CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" CONFIG_ARCH_SUPPORTS_UPROBES=y CONFIG_FIX_EARLYCON_MEM=y CONFIG_DEBUG_RODATA=y @@ -47,11 +46,11 @@ CONFIG_PGTABLE_LEVELS=4 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" CONFIG_IRQ_WORK=y CONFIG_BUILDTIME_EXTABLE_SORT=y +CONFIG_THREAD_INFO_IN_TASK=y # # General setup # -CONFIG_BLD=y CONFIG_INIT_ENV_ARG_LIMIT=32 CONFIG_CROSS_COMPILE="" # CONFIG_COMPILE_TEST is not set @@ -92,6 +91,7 @@ CONFIG_IRQ_DOMAIN_HIERARCHY=y CONFIG_GENERIC_MSI_IRQ=y CONFIG_GENERIC_MSI_IRQ_DOMAIN=y CONFIG_IRQ_FORCED_THREADING=y +# CONFIG_FORCE_IRQ_THREADING is not set CONFIG_SPARSE_IRQ=y CONFIG_CLOCKSOURCE_WATCHDOG=y CONFIG_ARCH_CLOCKSOURCE_DATA=y @@ -121,20 +121,22 @@ CONFIG_TICK_CPU_ACCOUNTING=y # CONFIG_IRQ_TIME_ACCOUNTING is not set CONFIG_BSD_PROCESS_ACCT=y CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_TASKSTATS is not set # # RCU Subsystem # CONFIG_TREE_RCU=y -# CONFIG_RCU_EXPERT is not set +CONFIG_RCU_EXPERT=y CONFIG_SRCU=y # CONFIG_TASKS_RCU is not set CONFIG_RCU_STALL_COMMON=y +CONFIG_RCU_FANOUT=64 +CONFIG_RCU_FANOUT_LEAF=16 +CONFIG_RCU_FAST_NO_HZ=y # CONFIG_TREE_RCU_TRACE is not set +CONFIG_RCU_KTHREAD_PRIO=0 +# CONFIG_RCU_NOCB_CPU is not set # CONFIG_RCU_EXPEDITE_BOOT is not set CONFIG_BUILD_BIN2C=y CONFIG_IKCONFIG=y @@ -165,7 +167,9 @@ CONFIG_CPUSETS=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y # CONFIG_CGROUP_PERF is not set +CONFIG_CGROUP_BPF=y # CONFIG_CGROUP_DEBUG is not set +CONFIG_SOCK_CGROUP_DATA=y CONFIG_CHECKPOINT_RESTORE=y CONFIG_NAMESPACES=y CONFIG_UTS_NS=y @@ -184,6 +188,7 @@ CONFIG_RD_GZIP=y # CONFIG_RD_XZ is not set # CONFIG_RD_LZO is not set CONFIG_RD_LZ4=y +CONFIG_INITRAMFS_COMPRESSION=".gz" CONFIG_CC_OPTIMIZE_FOR_PERFORMANCE=y # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y @@ -196,6 +201,7 @@ CONFIG_MULTIUSER=y # CONFIG_SGETMASK_SYSCALL is not set # CONFIG_SYSFS_SYSCALL is not set # CONFIG_SYSCTL_SYSCALL is not set +CONFIG_POSIX_TIMERS=y # CONFIG_KALLSYMS is not set CONFIG_PRINTK=y CONFIG_PRINTK_NMI=y @@ -228,6 +234,7 @@ CONFIG_VM_EVENT_COUNTERS=y # CONFIG_SLAB is not set CONFIG_SLUB=y # CONFIG_SLOB is not set +CONFIG_SLAB_FREELIST_RANDOM=y CONFIG_SLUB_CPU_PARTIAL=y # CONFIG_SYSTEM_DATA_VERIFICATION is not set # CONFIG_PROFILING is not set @@ -265,11 +272,14 @@ CONFIG_HAVE_CMPXCHG_LOCAL=y CONFIG_HAVE_CMPXCHG_DOUBLE=y CONFIG_HAVE_ARCH_SECCOMP_FILTER=y CONFIG_SECCOMP_FILTER=y +CONFIG_HAVE_GCC_PLUGINS=y +# CONFIG_GCC_PLUGINS is not set CONFIG_HAVE_CC_STACKPROTECTOR=y CONFIG_CC_STACKPROTECTOR=y # CONFIG_CC_STACKPROTECTOR_NONE is not set # CONFIG_CC_STACKPROTECTOR_REGULAR is not set CONFIG_CC_STACKPROTECTOR_STRONG=y +CONFIG_HAVE_ARCH_WITHIN_STACK_FRAMES=y CONFIG_HAVE_CONTEXT_TRACKING=y CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y @@ -287,6 +297,8 @@ CONFIG_HAVE_STACK_VALIDATION=y # CONFIG_HAVE_ARCH_HASH is not set # CONFIG_ISA_BUS_API is not set # CONFIG_CPU_NO_EFFICIENT_FFS is not set +CONFIG_HAVE_ARCH_VMAP_STACK=y +CONFIG_VMAP_STACK=y # # GCOV-based kernel profiling @@ -301,8 +313,12 @@ CONFIG_BLOCK=y CONFIG_BLK_DEV_BSG=y CONFIG_BLK_DEV_BSGLIB=y CONFIG_BLK_DEV_INTEGRITY=y +CONFIG_BLK_DEV_ZONED=y CONFIG_BLK_DEV_THROTTLING=y # CONFIG_BLK_CMDLINE_PARSER is not set +CONFIG_BLK_WBT=y +CONFIG_BLK_WBT_SQ=y +CONFIG_BLK_WBT_MQ=y # # Partition Types @@ -327,18 +343,16 @@ CONFIG_MSDOS_PARTITION=y CONFIG_EFI_PARTITION=y # CONFIG_SYSV68_PARTITION is not set # CONFIG_CMDLINE_PARTITION is not set +CONFIG_BLK_MQ_PCI=y # # IO Schedulers # CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -CONFIG_CFQ_GROUP_IOSCHED=y +# CONFIG_IOSCHED_DEADLINE is not set +# CONFIG_IOSCHED_CFQ is not set CONFIG_IOSCHED_BFQ=y CONFIG_BFQ_GROUP_IOSCHED=y -# CONFIG_DEFAULT_DEADLINE is not set -# CONFIG_DEFAULT_CFQ is not set CONFIG_DEFAULT_BFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="bfq" @@ -367,8 +381,9 @@ CONFIG_SMP=y CONFIG_X86_FEATURE_NAMES=y CONFIG_X86_FAST_FEATURE_TESTS=y CONFIG_X86_X2APIC=y -CONFIG_X86_MPPARSE=y +# CONFIG_X86_MPPARSE is not set # CONFIG_GOLDFISH is not set +CONFIG_INTEL_RDT_A=y # CONFIG_X86_EXTENDED_PLATFORM is not set CONFIG_X86_INTEL_LPSS=y # CONFIG_X86_AMD_PLATFORM_DEVICE is not set @@ -382,10 +397,12 @@ CONFIG_NO_BOOTMEM=y # CONFIG_MK10 is not set # CONFIG_MBARCELONA is not set # CONFIG_MBOBCAT is not set +# CONFIG_MJAGUAR is not set # CONFIG_MBULLDOZER is not set # CONFIG_MPILEDRIVER is not set # CONFIG_MSTEAMROLLER is not set -# CONFIG_MJAGUAR is not set +# CONFIG_MEXCAVATOR is not set +# CONFIG_MZEN is not set # CONFIG_MPSC is not set # CONFIG_MATOM is not set # CONFIG_MCORE2 is not set @@ -403,6 +420,7 @@ CONFIG_X86_INTERNODE_CACHE_SHIFT=6 CONFIG_X86_L1_CACHE_SHIFT=6 CONFIG_X86_INTEL_USERCOPY=y CONFIG_X86_USE_PPRO_CHECKSUM=y +CONFIG_X86_P6_NOP=y CONFIG_X86_TSC=y CONFIG_X86_CMPXCHG64=y CONFIG_X86_CMOV=y @@ -418,10 +436,11 @@ CONFIG_DMI=y # CONFIG_CALGARY_IOMMU is not set CONFIG_SWIOTLB=y CONFIG_IOMMU_HELPER=y -# CONFIG_MAXSMP is not set -CONFIG_NR_CPUS=16 +CONFIG_MAXSMP=y +CONFIG_NR_CPUS=8192 CONFIG_SCHED_SMT=y CONFIG_SCHED_MC=y +CONFIG_SCHED_MC_PRIO=y CONFIG_PREEMPT_NONE=y # CONFIG_PREEMPT_VOLUNTARY is not set # CONFIG_PREEMPT is not set @@ -430,7 +449,6 @@ CONFIG_X86_IO_APIC=y CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y CONFIG_X86_MCE=y CONFIG_X86_MCE_INTEL=y -# CONFIG_X86_MCE_AMD is not set CONFIG_X86_MCE_THRESHOLD=y # CONFIG_X86_MCE_INJECT is not set CONFIG_X86_THERMAL_VECTOR=y @@ -492,6 +510,7 @@ CONFIG_HWPOISON_INJECT=y CONFIG_TRANSPARENT_HUGEPAGE=y CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y # CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set +CONFIG_TRANSPARENT_HUGE_PAGECACHE=y CONFIG_CLEANCACHE=y CONFIG_FRONTSWAP=y # CONFIG_CMA is not set @@ -540,6 +559,8 @@ CONFIG_RELOCATABLE=y CONFIG_RANDOMIZE_BASE=y CONFIG_X86_NEED_RELOCS=y CONFIG_PHYSICAL_ALIGN=0x1000000 +CONFIG_RANDOMIZE_MEMORY=y +CONFIG_RANDOMIZE_MEMORY_PHYSICAL_PADDING=0x0 CONFIG_HOTPLUG_CPU=y # CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set # CONFIG_DEBUG_HOTPLUG_CPU0 is not set @@ -583,19 +604,23 @@ CONFIG_ACPI_BUTTON=y CONFIG_ACPI_FAN=y CONFIG_ACPI_DOCK=y CONFIG_ACPI_CPU_FREQ_PSS=y +CONFIG_ACPI_PROCESSOR_CSTATE=y CONFIG_ACPI_PROCESSOR_IDLE=y +CONFIG_ACPI_CPPC_LIB=y CONFIG_ACPI_PROCESSOR=y +CONFIG_ACPI_IPMI=y CONFIG_ACPI_HOTPLUG_CPU=y CONFIG_ACPI_PROCESSOR_AGGREGATOR=y CONFIG_ACPI_THERMAL=y # CONFIG_ACPI_CUSTOM_DSDT is not set +CONFIG_ARCH_HAS_ACPI_TABLE_UPGRADE=y CONFIG_ACPI_TABLE_UPGRADE=y # CONFIG_ACPI_DEBUG is not set CONFIG_ACPI_PCI_SLOT=y CONFIG_X86_PM_TIMER=y CONFIG_ACPI_CONTAINER=y CONFIG_ACPI_HOTPLUG_IOAPIC=y -CONFIG_ACPI_SBS=y +# CONFIG_ACPI_SBS is not set CONFIG_ACPI_HED=y CONFIG_ACPI_BGRT=y CONFIG_ACPI_REDUCED_HARDWARE_ONLY=y @@ -606,9 +631,11 @@ CONFIG_ACPI_APEI=y CONFIG_ACPI_APEI_GHES=y CONFIG_ACPI_APEI_PCIEAER=y CONFIG_ACPI_APEI_MEMORY_FAILURE=y -# CONFIG_ACPI_APEI_ERST_DEBUG is not set +CONFIG_ACPI_APEI_ERST_DEBUG=y +# CONFIG_DPTF_POWER is not set CONFIG_ACPI_EXTLOG=y -CONFIG_PMIC_OPREGION=y +# CONFIG_PMIC_OPREGION is not set +CONFIG_ACPI_CONFIGFS=y # CONFIG_SFI is not set # @@ -635,9 +662,8 @@ CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y # CPU frequency scaling drivers # CONFIG_X86_INTEL_PSTATE=y -CONFIG_X86_PCC_CPUFREQ=y -CONFIG_X86_ACPI_CPUFREQ=y -# CONFIG_X86_POWERNOW_K8 is not set +# CONFIG_X86_PCC_CPUFREQ is not set +# CONFIG_X86_ACPI_CPUFREQ is not set # CONFIG_X86_SPEEDSTEP_CENTRINO is not set # CONFIG_X86_P4_CLOCKMOD is not set @@ -655,12 +681,6 @@ CONFIG_CPU_IDLE_GOV_MENU=y # CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set CONFIG_INTEL_IDLE=y -# -# Memory power savings -# -CONFIG_I7300_IDLE_IOAT_CHANNEL=y -CONFIG_I7300_IDLE=y - # # Bus options (PCI etc.) # @@ -676,18 +696,19 @@ CONFIG_PCIE_ECRC=y CONFIG_PCIEAER_INJECT=y CONFIG_PCIEASPM=y # CONFIG_PCIEASPM_DEBUG is not set -# CONFIG_PCIEASPM_DEFAULT is not set -CONFIG_PCIEASPM_POWERSAVE=y +CONFIG_PCIEASPM_DEFAULT=y +# CONFIG_PCIEASPM_POWERSAVE is not set # CONFIG_PCIEASPM_PERFORMANCE is not set CONFIG_PCIE_PME=y # CONFIG_PCIE_DPC is not set +# CONFIG_PCIE_PTM is not set CONFIG_PCI_BUS_ADDR_T_64BIT=y CONFIG_PCI_MSI=y CONFIG_PCI_MSI_IRQ_DOMAIN=y # CONFIG_PCI_DEBUG is not set CONFIG_PCI_REALLOC_ENABLE_AUTO=y -CONFIG_PCI_STUB=y -CONFIG_HT_IRQ=y +# CONFIG_PCI_STUB is not set +# CONFIG_HT_IRQ is not set CONFIG_PCI_ATS=y CONFIG_PCI_IOV=y CONFIG_PCI_PRI=y @@ -697,12 +718,13 @@ CONFIG_HOTPLUG_PCI=y CONFIG_HOTPLUG_PCI_ACPI=y # CONFIG_HOTPLUG_PCI_ACPI_IBM is not set # CONFIG_HOTPLUG_PCI_CPCI is not set -# CONFIG_HOTPLUG_PCI_SHPC is not set +CONFIG_HOTPLUG_PCI_SHPC=y # # PCI host controller drivers # # CONFIG_PCIE_DW_PLAT is not set +# CONFIG_VMD is not set # CONFIG_ISA_BUS is not set CONFIG_ISA_DMA_API=y # CONFIG_PCCARD is not set @@ -722,7 +744,6 @@ CONFIG_BINFMT_MISC=y # CONFIG_X86_X32 is not set CONFIG_X86_DEV_DMA_OPS=y CONFIG_PMC_ATOM=y -# CONFIG_VMD is not set CONFIG_NET=y CONFIG_NET_INGRESS=y CONFIG_NET_EGRESS=y @@ -781,6 +802,7 @@ CONFIG_TCP_CONG_CUBIC=y # CONFIG_TCP_CONG_HSTCP is not set # CONFIG_TCP_CONG_HYBLA is not set # CONFIG_TCP_CONG_VEGAS is not set +# CONFIG_TCP_CONG_NV is not set # CONFIG_TCP_CONG_SCALABLE is not set CONFIG_TCP_CONG_LP=y # CONFIG_TCP_CONG_VENO is not set @@ -788,6 +810,7 @@ CONFIG_TCP_CONG_LP=y # CONFIG_TCP_CONG_ILLINOIS is not set # CONFIG_TCP_CONG_DCTCP is not set # CONFIG_TCP_CONG_CDG is not set +# CONFIG_TCP_CONG_BBR is not set CONFIG_DEFAULT_CUBIC=y # CONFIG_DEFAULT_RENO is not set CONFIG_DEFAULT_TCP_CONG="cubic" @@ -800,7 +823,7 @@ CONFIG_INET6_AH=y CONFIG_INET6_ESP=y CONFIG_INET6_IPCOMP=y CONFIG_IPV6_MIP6=y -# CONFIG_IPV6_ILA is not set +CONFIG_IPV6_ILA=y CONFIG_INET6_XFRM_TUNNEL=y CONFIG_INET6_TUNNEL=y CONFIG_INET6_XFRM_MODE_TRANSPORT=y @@ -817,8 +840,12 @@ CONFIG_IPV6_SUBTREES=y CONFIG_IPV6_MROUTE=y CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y CONFIG_IPV6_PIMSM_V2=y +CONFIG_IPV6_SEG6_LWTUNNEL=y +# CONFIG_IPV6_SEG6_INLINE is not set +# CONFIG_IPV6_SEG6_HMAC is not set +# CONFIG_NETLABEL is not set # CONFIG_NETWORK_SECMARK is not set -CONFIG_NET_PTP_CLASSIFY=y +# CONFIG_NET_PTP_CLASSIFY is not set # CONFIG_NETWORK_PHY_TIMESTAMPING is not set CONFIG_NETFILTER=y # CONFIG_NETFILTER_DEBUG is not set @@ -835,6 +862,7 @@ CONFIG_NETFILTER_NETLINK=y # CONFIG_NETFILTER_NETLINK_LOG is not set # CONFIG_NF_CONNTRACK is not set CONFIG_NF_LOG_COMMON=y +# CONFIG_NF_LOG_NETDEV is not set # CONFIG_NF_TABLES is not set CONFIG_NETFILTER_XTABLES=y @@ -853,6 +881,7 @@ CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y CONFIG_NETFILTER_XT_TARGET_HL=y # CONFIG_NETFILTER_XT_TARGET_HMARK is not set # CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set +# CONFIG_NETFILTER_XT_TARGET_LED is not set CONFIG_NETFILTER_XT_TARGET_LOG=y # CONFIG_NETFILTER_XT_TARGET_MARK is not set # CONFIG_NETFILTER_XT_TARGET_NFLOG is not set @@ -897,7 +926,6 @@ CONFIG_NETFILTER_XT_MATCH_RATEEST=y # CONFIG_NETFILTER_XT_MATCH_REALM is not set CONFIG_NETFILTER_XT_MATCH_RECENT=y # CONFIG_NETFILTER_XT_MATCH_SCTP is not set -CONFIG_NETFILTER_XT_MATCH_SOCKET=y # CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set CONFIG_NETFILTER_XT_MATCH_STRING=y CONFIG_NETFILTER_XT_MATCH_TCPMSS=y @@ -913,6 +941,7 @@ CONFIG_IP_SET_HASH_IPMARK=y CONFIG_IP_SET_HASH_IPPORT=y CONFIG_IP_SET_HASH_IPPORTIP=y CONFIG_IP_SET_HASH_IPPORTNET=y +CONFIG_IP_SET_HASH_IPMAC=y CONFIG_IP_SET_HASH_MAC=y CONFIG_IP_SET_HASH_NETPORTNET=y CONFIG_IP_SET_HASH_NET=y @@ -925,7 +954,8 @@ CONFIG_IP_SET_LIST_SET=y # # IP: Netfilter Configuration # -CONFIG_NF_DEFRAG_IPV4=y +# CONFIG_NF_DEFRAG_IPV4 is not set +# CONFIG_NF_SOCKET_IPV4 is not set # CONFIG_NF_DUP_IPV4 is not set CONFIG_NF_LOG_ARP=y CONFIG_NF_LOG_IPV4=y @@ -941,12 +971,14 @@ CONFIG_IP_NF_MANGLE=y # CONFIG_IP_NF_TARGET_ECN is not set CONFIG_IP_NF_TARGET_TTL=y # CONFIG_IP_NF_RAW is not set +# CONFIG_IP_NF_SECURITY is not set # CONFIG_IP_NF_ARPTABLES is not set # # IPv6: Netfilter Configuration # -CONFIG_NF_DEFRAG_IPV6=y +# CONFIG_NF_DEFRAG_IPV6 is not set +# CONFIG_NF_SOCKET_IPV6 is not set # CONFIG_NF_DUP_IPV6 is not set CONFIG_NF_REJECT_IPV6=y CONFIG_NF_LOG_IPV6=y @@ -965,15 +997,10 @@ CONFIG_IP6_NF_FILTER=y CONFIG_IP6_NF_TARGET_REJECT=y CONFIG_IP6_NF_MANGLE=y # CONFIG_IP6_NF_RAW is not set +# CONFIG_IP6_NF_SECURITY is not set # CONFIG_BRIDGE_NF_EBTABLES is not set # CONFIG_IP_DCCP is not set -CONFIG_IP_SCTP=y -# CONFIG_SCTP_DBG_OBJCNT is not set -CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set -CONFIG_SCTP_COOKIE_HMAC_MD5=y -# CONFIG_SCTP_COOKIE_HMAC_SHA1 is not set +# CONFIG_IP_SCTP is not set # CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set @@ -985,7 +1012,6 @@ CONFIG_BRIDGE=y CONFIG_BRIDGE_IGMP_SNOOPING=y CONFIG_BRIDGE_VLAN_FILTERING=y CONFIG_HAVE_NET_DSA=y -# CONFIG_NET_DSA is not set CONFIG_VLAN_8021Q=y CONFIG_VLAN_8021Q_GVRP=y CONFIG_VLAN_8021Q_MVRP=y @@ -1046,6 +1072,7 @@ CONFIG_NET_CLS_FLOW=y CONFIG_NET_CLS_CGROUP=y CONFIG_NET_CLS_BPF=y CONFIG_NET_CLS_FLOWER=y +# CONFIG_NET_CLS_MATCHALL is not set CONFIG_NET_EMATCH=y CONFIG_NET_EMATCH_STACK=32 CONFIG_NET_EMATCH_CMP=y @@ -1067,23 +1094,25 @@ CONFIG_NET_ACT_SKBEDIT=y CONFIG_NET_ACT_CSUM=y CONFIG_NET_ACT_VLAN=y CONFIG_NET_ACT_BPF=y +# CONFIG_NET_ACT_SKBMOD is not set # CONFIG_NET_ACT_IFE is not set +# CONFIG_NET_ACT_TUNNEL_KEY is not set CONFIG_NET_CLS_IND=y CONFIG_NET_SCH_FIFO=y # CONFIG_DCB is not set CONFIG_DNS_RESOLVER=y # CONFIG_BATMAN_ADV is not set # CONFIG_OPENVSWITCH is not set -CONFIG_VSOCKETS=y +# CONFIG_VSOCKETS is not set CONFIG_NETLINK_DIAG=y # CONFIG_MPLS is not set # CONFIG_HSR is not set -CONFIG_NET_SWITCHDEV=y -# CONFIG_NET_L3_MASTER_DEV is not set +# CONFIG_NET_SWITCHDEV is not set +CONFIG_NET_L3_MASTER_DEV=y +# CONFIG_NET_NCSI is not set CONFIG_RPS=y CONFIG_RFS_ACCEL=y CONFIG_XPS=y -CONFIG_SOCK_CGROUP_DATA=y CONFIG_CGROUP_NET_PRIO=y CONFIG_CGROUP_NET_CLASSID=y CONFIG_NET_RX_BUSY_POLL=y @@ -1100,6 +1129,7 @@ CONFIG_NET_FLOW_LIMIT=y # CONFIG_BT is not set # CONFIG_AF_RXRPC is not set CONFIG_AF_KCM=y +CONFIG_STREAM_PARSER=y CONFIG_FIB_RULES=y # CONFIG_WIRELESS is not set # CONFIG_WIMAX is not set @@ -1109,6 +1139,7 @@ CONFIG_FIB_RULES=y # CONFIG_CEPH_LIB is not set # CONFIG_NFC is not set CONFIG_LWTUNNEL=y +CONFIG_LWTUNNEL_BPF=y CONFIG_DST_CACHE=y CONFIG_NET_DEVLINK=y CONFIG_MAY_USE_DEVLINK=y @@ -1133,13 +1164,14 @@ CONFIG_EXTRA_FIRMWARE="" # CONFIG_ALLOW_DEV_COREDUMP is not set # CONFIG_DEBUG_DRIVER is not set # CONFIG_DEBUG_DEVRES is not set +# CONFIG_DEBUG_TEST_DRIVER_REMOVE is not set # CONFIG_SYS_HYPERVISOR is not set # CONFIG_GENERIC_CPU_DEVICES is not set CONFIG_GENERIC_CPU_AUTOPROBE=y CONFIG_REGMAP=y CONFIG_REGMAP_I2C=y CONFIG_DMA_SHARED_BUFFER=y -# CONFIG_FENCE_TRACE is not set +# CONFIG_DMA_FENCE_TRACE is not set # # Bus devices @@ -1180,6 +1212,8 @@ CONFIG_BLK_DEV_NBD=y # CONFIG_BLK_DEV_RBD is not set # CONFIG_BLK_DEV_RSXX is not set # CONFIG_BLK_DEV_NVME is not set +# CONFIG_NVME_FC is not set +# CONFIG_NVME_TARGET is not set # # Misc devices @@ -1198,12 +1232,10 @@ CONFIG_BLK_DEV_NBD=y # CONFIG_ISL29003 is not set # CONFIG_ISL29020 is not set # CONFIG_SENSORS_TSL2550 is not set -# CONFIG_SENSORS_BH1780 is not set # CONFIG_SENSORS_BH1770 is not set # CONFIG_SENSORS_APDS990X is not set # CONFIG_HMC6352 is not set # CONFIG_DS1682 is not set -# CONFIG_BMP085_I2C is not set # CONFIG_USB_SWITCH_FSA9480 is not set # CONFIG_SRAM is not set # CONFIG_C2PORT is not set @@ -1220,7 +1252,6 @@ CONFIG_BLK_DEV_NBD=y # # Texas Instruments shared transport line discipline # -# CONFIG_TI_ST is not set # CONFIG_SENSORS_LIS3_I2C is not set # @@ -1269,8 +1300,7 @@ CONFIG_INTEL_MEI_TXE=y # CONFIG_GENWQE is not set # CONFIG_ECHO is not set # CONFIG_CXL_BASE is not set -# CONFIG_CXL_KERNEL_API is not set -# CONFIG_CXL_EEH is not set +# CONFIG_CXL_AFU_DRIVER_OPS is not set CONFIG_HAVE_IDE=y # CONFIG_IDE is not set @@ -1291,7 +1321,8 @@ CONFIG_SCSI_MQ_DEFAULT=y CONFIG_BLK_DEV_SD=y # CONFIG_CHR_DEV_ST is not set # CONFIG_CHR_DEV_OSST is not set -# CONFIG_BLK_DEV_SR is not set +CONFIG_BLK_DEV_SR=y +# CONFIG_BLK_DEV_SR_VENDOR is not set CONFIG_CHR_DEV_SG=y # CONFIG_CHR_DEV_SCH is not set CONFIG_SCSI_CONSTANTS=y @@ -1315,7 +1346,7 @@ CONFIG_ATA=y CONFIG_ATA_VERBOSE_ERROR=y CONFIG_ATA_ACPI=y # CONFIG_SATA_ZPODD is not set -CONFIG_SATA_PMP=y +# CONFIG_SATA_PMP is not set # # Controllers with non-SFF native interface @@ -1336,7 +1367,6 @@ CONFIG_MD_RAID10=y CONFIG_MD_RAID456=y # CONFIG_MD_MULTIPATH is not set # CONFIG_MD_FAULTY is not set -CONFIG_MD_CLUSTER=y CONFIG_BCACHE=y # CONFIG_BCACHE_DEBUG is not set # CONFIG_BCACHE_CLOSURES_DEBUG is not set @@ -1345,28 +1375,22 @@ CONFIG_BLK_DEV_DM=y CONFIG_DM_MQ_DEFAULT=y # CONFIG_DM_DEBUG is not set CONFIG_DM_BUFIO=y -# CONFIG_DM_DEBUG_BLOCK_STACK_TRACING is not set -CONFIG_DM_BIO_PRISON=y -CONFIG_DM_PERSISTENT_DATA=y +# CONFIG_DM_DEBUG_BLOCK_MANAGER_LOCKING is not set CONFIG_DM_CRYPT=y CONFIG_DM_SNAPSHOT=y -CONFIG_DM_THIN_PROVISIONING=y -CONFIG_DM_CACHE=y -CONFIG_DM_CACHE_SMQ=y -CONFIG_DM_CACHE_CLEANER=y -CONFIG_DM_ERA=y -CONFIG_DM_MIRROR=y -CONFIG_DM_LOG_USERSPACE=y +# CONFIG_DM_THIN_PROVISIONING is not set +# CONFIG_DM_CACHE is not set +# CONFIG_DM_ERA is not set +# CONFIG_DM_MIRROR is not set CONFIG_DM_RAID=y -CONFIG_DM_ZERO=y +# CONFIG_DM_ZERO is not set # CONFIG_DM_MULTIPATH is not set # CONFIG_DM_DELAY is not set CONFIG_DM_UEVENT=y # CONFIG_DM_FLAKEY is not set -CONFIG_DM_VERITY=y -# CONFIG_DM_VERITY_FEC is not set -CONFIG_DM_SWITCH=y -CONFIG_DM_LOG_WRITES=y +# CONFIG_DM_VERITY is not set +# CONFIG_DM_SWITCH is not set +# CONFIG_DM_LOG_WRITES is not set # CONFIG_TARGET_CORE is not set # CONFIG_FUSION is not set @@ -1385,8 +1409,7 @@ CONFIG_NET_CORE=y # CONFIG_NET_FC is not set # CONFIG_IFB is not set # CONFIG_NET_TEAM is not set -CONFIG_MACVLAN=y -CONFIG_MACVTAP=y +# CONFIG_MACVLAN is not set CONFIG_IPVLAN=y # CONFIG_VXLAN is not set # CONFIG_GENEVE is not set @@ -1398,7 +1421,8 @@ CONFIG_MACSEC=y # CONFIG_TUN is not set # CONFIG_TUN_VNET_CROSS_LE is not set # CONFIG_VETH is not set -# CONFIG_NLMON is not set +CONFIG_NLMON=y +# CONFIG_NET_VRF is not set # CONFIG_ARCNET is not set # @@ -1412,8 +1436,10 @@ CONFIG_ETHERNET=y # CONFIG_NET_VENDOR_3COM is not set # CONFIG_NET_VENDOR_ADAPTEC is not set # CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_ALTEON is not set # CONFIG_ALTERA_TSE is not set +# CONFIG_NET_VENDOR_AMAZON is not set # CONFIG_NET_VENDOR_AMD is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_VENDOR_ATHEROS is not set @@ -1439,6 +1465,7 @@ CONFIG_E1000E=y CONFIG_E1000E_HWTS=y CONFIG_IGB=y CONFIG_IGB_HWMON=y +CONFIG_IGB_DCA=y # CONFIG_IGBVF is not set # CONFIG_IXGB is not set # CONFIG_IXGBE is not set @@ -1472,7 +1499,7 @@ CONFIG_R8169=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SILAN is not set # CONFIG_NET_VENDOR_SIS is not set -# CONFIG_SFC is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_SUN is not set @@ -1484,7 +1511,47 @@ CONFIG_R8169=y # CONFIG_FDDI is not set # CONFIG_HIPPI is not set # CONFIG_NET_SB1000 is not set -# CONFIG_PHYLIB is not set +CONFIG_PHYLIB=y +# CONFIG_LED_TRIGGER_PHY is not set + +# +# MDIO bus device drivers +# +# CONFIG_MDIO_BCM_UNIMAC is not set +# CONFIG_MDIO_BITBANG is not set +# CONFIG_MDIO_OCTEON is not set +# CONFIG_MDIO_THUNDER is not set + +# +# MII PHY device drivers +# +# CONFIG_AMD_PHY is not set +# CONFIG_AQUANTIA_PHY is not set +# CONFIG_AT803X_PHY is not set +# CONFIG_BCM7XXX_PHY is not set +# CONFIG_BCM87XX_PHY is not set +# CONFIG_BROADCOM_PHY is not set +# CONFIG_CICADA_PHY is not set +# CONFIG_DAVICOM_PHY is not set +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +# CONFIG_FIXED_PHY is not set +# CONFIG_ICPLUS_PHY is not set +# CONFIG_INTEL_XWAY_PHY is not set +# CONFIG_LSI_ET1011C_PHY is not set +# CONFIG_LXT_PHY is not set +# CONFIG_MARVELL_PHY is not set +# CONFIG_MICREL_PHY is not set +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_MICROSEMI_PHY is not set +# CONFIG_NATIONAL_PHY is not set +# CONFIG_QSEMI_PHY is not set +# CONFIG_REALTEK_PHY is not set +# CONFIG_SMSC_PHY is not set +# CONFIG_STE10XP is not set +# CONFIG_TERANETICS_PHY is not set +# CONFIG_VITESSE_PHY is not set +# CONFIG_XILINX_GMII2RGMII is not set # CONFIG_PPP is not set # CONFIG_SLIP is not set # CONFIG_USB_NET_DRIVERS is not set @@ -1503,10 +1570,11 @@ CONFIG_R8169=y # Input device support # CONFIG_INPUT=y +CONFIG_INPUT_LEDS=y # CONFIG_INPUT_FF_MEMLESS is not set # CONFIG_INPUT_POLLDEV is not set # CONFIG_INPUT_SPARSEKMAP is not set -# CONFIG_INPUT_MATRIXKMAP is not set +CONFIG_INPUT_MATRIXKMAP=y # # Userland interfaces @@ -1529,11 +1597,9 @@ CONFIG_KEYBOARD_ATKBD=y # CONFIG_KEYBOARD_QT1070 is not set # CONFIG_KEYBOARD_QT2160 is not set # CONFIG_KEYBOARD_LKKBD is not set -# CONFIG_KEYBOARD_GPIO is not set -# CONFIG_KEYBOARD_GPIO_POLLED is not set # CONFIG_KEYBOARD_TCA6416 is not set # CONFIG_KEYBOARD_TCA8418 is not set -# CONFIG_KEYBOARD_MATRIX is not set +# CONFIG_KEYBOARD_LM8323 is not set # CONFIG_KEYBOARD_LM8333 is not set # CONFIG_KEYBOARD_MAX7359 is not set # CONFIG_KEYBOARD_MCS is not set @@ -1544,28 +1610,7 @@ CONFIG_KEYBOARD_ATKBD=y # CONFIG_KEYBOARD_STOWAWAY is not set # CONFIG_KEYBOARD_SUNKBD is not set # CONFIG_KEYBOARD_XTKBD is not set -CONFIG_INPUT_MOUSE=y -CONFIG_MOUSE_PS2=y -# CONFIG_MOUSE_PS2_ALPS is not set -# CONFIG_MOUSE_PS2_BYD is not set -# CONFIG_MOUSE_PS2_LOGIPS2PP is not set -# CONFIG_MOUSE_PS2_SYNAPTICS is not set -# CONFIG_MOUSE_PS2_CYPRESS is not set -# CONFIG_MOUSE_PS2_LIFEBOOK is not set -# CONFIG_MOUSE_PS2_TRACKPOINT is not set -# CONFIG_MOUSE_PS2_ELANTECH is not set -# CONFIG_MOUSE_PS2_SENTELIC is not set -# CONFIG_MOUSE_PS2_TOUCHKIT is not set -# CONFIG_MOUSE_PS2_FOCALTECH is not set -# CONFIG_MOUSE_SERIAL is not set -# CONFIG_MOUSE_APPLETOUCH is not set -# CONFIG_MOUSE_BCM5974 is not set -# CONFIG_MOUSE_CYAPA is not set -# CONFIG_MOUSE_ELAN_I2C is not set -# CONFIG_MOUSE_VSXXXAA is not set -# CONFIG_MOUSE_GPIO is not set -# CONFIG_MOUSE_SYNAPTICS_I2C is not set -# CONFIG_MOUSE_SYNAPTICS_USB is not set +# CONFIG_INPUT_MOUSE is not set # CONFIG_INPUT_JOYSTICK is not set # CONFIG_INPUT_TABLET is not set # CONFIG_INPUT_TOUCHSCREEN is not set @@ -1576,9 +1621,7 @@ CONFIG_INPUT_MISC=y CONFIG_INPUT_PCSPKR=y # CONFIG_INPUT_MMA8450 is not set # CONFIG_INPUT_MPU3050 is not set -# CONFIG_INPUT_GP2A is not set -# CONFIG_INPUT_GPIO_BEEPER is not set -# CONFIG_INPUT_GPIO_TILT_POLLED is not set +# CONFIG_INPUT_APANEL is not set # CONFIG_INPUT_ATLAS_BTNS is not set # CONFIG_INPUT_ATI_REMOTE2 is not set # CONFIG_INPUT_KEYSPAN_REMOTE is not set @@ -1588,11 +1631,10 @@ CONFIG_INPUT_PCSPKR=y # CONFIG_INPUT_CM109 is not set CONFIG_INPUT_UINPUT=y # CONFIG_INPUT_PCF8574 is not set -# CONFIG_INPUT_GPIO_ROTARY_ENCODER is not set # CONFIG_INPUT_ADXL34X is not set +# CONFIG_INPUT_IMS_PCU is not set # CONFIG_INPUT_CMA3000 is not set # CONFIG_INPUT_IDEAPAD_SLIDEBAR is not set -# CONFIG_INPUT_DRV260X_HAPTICS is not set # CONFIG_INPUT_DRV2665_HAPTICS is not set # CONFIG_INPUT_DRV2667_HAPTICS is not set # CONFIG_RMI4_CORE is not set @@ -1642,6 +1684,7 @@ CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y CONFIG_SERIAL_8250_PNP=y # CONFIG_SERIAL_8250_FINTEK is not set CONFIG_SERIAL_8250_CONSOLE=y +CONFIG_SERIAL_8250_DMA=y CONFIG_SERIAL_8250_PCI=y CONFIG_SERIAL_8250_NR_UARTS=4 CONFIG_SERIAL_8250_RUNTIME_UARTS=4 @@ -1649,6 +1692,7 @@ CONFIG_SERIAL_8250_RUNTIME_UARTS=4 # CONFIG_SERIAL_8250_FSL is not set # CONFIG_SERIAL_8250_DW is not set # CONFIG_SERIAL_8250_RT288X is not set +# CONFIG_SERIAL_8250_LPSS is not set # CONFIG_SERIAL_8250_MID is not set # CONFIG_SERIAL_8250_MOXA is not set @@ -1671,11 +1715,15 @@ CONFIG_IPMI_HANDLER=y CONFIG_IPMI_PANIC_EVENT=y CONFIG_IPMI_PANIC_STRING=y CONFIG_IPMI_DEVICE_INTERFACE=y -# CONFIG_IPMI_SI is not set +CONFIG_IPMI_SI=y CONFIG_IPMI_SSIF=y -CONFIG_IPMI_WATCHDOG=y +# CONFIG_IPMI_WATCHDOG is not set CONFIG_IPMI_POWEROFF=y -# CONFIG_HW_RANDOM is not set +CONFIG_HW_RANDOM=y +# CONFIG_HW_RANDOM_TIMERIOMEM is not set +CONFIG_HW_RANDOM_INTEL=y +# CONFIG_HW_RANDOM_AMD is not set +# CONFIG_HW_RANDOM_VIA is not set CONFIG_NVRAM=y # CONFIG_R3964 is not set # CONFIG_APPLICOM is not set @@ -1701,6 +1749,7 @@ CONFIG_I2C_COMPAT=y CONFIG_I2C_CHARDEV=y # CONFIG_I2C_MUX is not set CONFIG_I2C_HELPER_AUTO=y +CONFIG_I2C_SMBUS=y CONFIG_I2C_ALGOBIT=y # @@ -1734,11 +1783,9 @@ CONFIG_I2C_SCMI=y # # I2C system bus drivers (mostly embedded / system-on-chip) # -# CONFIG_I2C_CBUS_GPIO is not set # CONFIG_I2C_DESIGNWARE_PLATFORM is not set # CONFIG_I2C_DESIGNWARE_PCI is not set # CONFIG_I2C_EMEV2 is not set -# CONFIG_I2C_GPIO is not set # CONFIG_I2C_OCORES is not set # CONFIG_I2C_PCA_PLATFORM is not set # CONFIG_I2C_PXA_PCI is not set @@ -1757,6 +1804,7 @@ CONFIG_I2C_SCMI=y # # Other I2C/SMBus bus drivers # +# CONFIG_I2C_MLXCPLD is not set # CONFIG_I2C_SLAVE is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set @@ -1770,7 +1818,6 @@ CONFIG_I2C_SCMI=y # CONFIG_PPS=y # CONFIG_PPS_DEBUG is not set -CONFIG_NTP_PPS=y # # PPS clients support @@ -1786,7 +1833,7 @@ CONFIG_PPS_CLIENT_LDISC=y # # PTP clock support # -CONFIG_PTP_1588_CLOCK=y +# CONFIG_PTP_1588_CLOCK is not set # # Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. @@ -1797,72 +1844,14 @@ CONFIG_PINCTRL=y # Pin controllers # # CONFIG_DEBUG_PINCTRL is not set -# CONFIG_PINCTRL_AMD is not set -# CONFIG_PINCTRL_BAYTRAIL is not set # CONFIG_PINCTRL_CHERRYVIEW is not set # CONFIG_PINCTRL_BROXTON is not set # CONFIG_PINCTRL_SUNRISEPOINT is not set -CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y -CONFIG_GPIOLIB=y -CONFIG_GPIO_DEVRES=y -CONFIG_GPIO_ACPI=y -CONFIG_GPIOLIB_IRQCHIP=y -# CONFIG_DEBUG_GPIO is not set -CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_GENERIC=y - -# -# Memory mapped GPIO drivers -# -# CONFIG_GPIO_AMDPT is not set -# CONFIG_GPIO_DWAPB is not set -CONFIG_GPIO_GENERIC_PLATFORM=y -# CONFIG_GPIO_ICH is not set -CONFIG_GPIO_LYNXPOINT=y -# CONFIG_GPIO_VX855 is not set -# CONFIG_GPIO_ZX is not set - -# -# Port-mapped I/O GPIO drivers -# -# CONFIG_GPIO_F7188X is not set -# CONFIG_GPIO_IT87 is not set -# CONFIG_GPIO_SCH is not set -# CONFIG_GPIO_SCH311X is not set - -# -# I2C GPIO expanders -# -# CONFIG_GPIO_ADP5588 is not set -# CONFIG_GPIO_MAX7300 is not set -# CONFIG_GPIO_MAX732X is not set -# CONFIG_GPIO_PCA953X is not set -# CONFIG_GPIO_PCF857X is not set -# CONFIG_GPIO_SX150X is not set -# CONFIG_GPIO_TPIC2810 is not set - -# -# MFD GPIO expanders -# - -# -# PCI GPIO expanders -# -# CONFIG_GPIO_AMD8111 is not set -# CONFIG_GPIO_BT8XX is not set -# CONFIG_GPIO_INTEL_MID is not set -# CONFIG_GPIO_ML_IOH is not set -# CONFIG_GPIO_RDC321X is not set - -# -# SPI or I2C GPIO expanders -# -# CONFIG_GPIO_MCP23S08 is not set - -# -# USB GPIO expanders -# +# CONFIG_GPIOLIB is not set # CONFIG_W1 is not set +# CONFIG_POWER_AVS is not set +CONFIG_POWER_RESET=y +# CONFIG_POWER_RESET_RESTART is not set CONFIG_POWER_SUPPLY=y # CONFIG_POWER_SUPPLY_DEBUG is not set # CONFIG_PDA_POWER is not set @@ -1876,18 +1865,9 @@ CONFIG_POWER_SUPPLY=y # CONFIG_BATTERY_MAX17042 is not set # CONFIG_CHARGER_MAX8903 is not set # CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_GPIO is not set # CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_BQ24190 is not set -# CONFIG_CHARGER_BQ24257 is not set -# CONFIG_CHARGER_BQ24735 is not set -# CONFIG_CHARGER_BQ25890 is not set # CONFIG_CHARGER_SMB347 is not set # CONFIG_BATTERY_GAUGE_LTC2941 is not set -# CONFIG_CHARGER_RT9455 is not set -CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_RESTART is not set -# CONFIG_POWER_AVS is not set CONFIG_HWMON=y # CONFIG_HWMON_VID is not set # CONFIG_HWMON_DEBUG_CHIP is not set @@ -1924,11 +1904,11 @@ CONFIG_HWMON=y # CONFIG_SENSORS_F71882FG is not set # CONFIG_SENSORS_F75375S is not set # CONFIG_SENSORS_FSCHMD is not set +# CONFIG_SENSORS_FTSTEUTATES is not set # CONFIG_SENSORS_GL518SM is not set # CONFIG_SENSORS_GL520SM is not set # CONFIG_SENSORS_G760A is not set # CONFIG_SENSORS_G762 is not set -# CONFIG_SENSORS_GPIO_FAN is not set # CONFIG_SENSORS_HIH6130 is not set # CONFIG_SENSORS_IBMAEM is not set # CONFIG_SENSORS_IBMPEX is not set @@ -1956,6 +1936,7 @@ CONFIG_SENSORS_JC42=y # CONFIG_SENSORS_MAX6697 is not set # CONFIG_SENSORS_MAX31790 is not set # CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_TC654 is not set # CONFIG_SENSORS_LM63 is not set # CONFIG_SENSORS_LM73 is not set # CONFIG_SENSORS_LM75 is not set @@ -1980,8 +1961,8 @@ CONFIG_SENSORS_JC42=y # CONFIG_SENSORS_NCT7904 is not set # CONFIG_SENSORS_PCF8591 is not set # CONFIG_PMBUS is not set -# CONFIG_SENSORS_SHT15 is not set # CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHT3x is not set # CONFIG_SENSORS_SHTC1 is not set # CONFIG_SENSORS_SIS5595 is not set # CONFIG_SENSORS_DME1737 is not set @@ -2001,10 +1982,12 @@ CONFIG_SENSORS_JC42=y # CONFIG_SENSORS_AMC6821 is not set # CONFIG_SENSORS_INA209 is not set # CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_INA3221 is not set # CONFIG_SENSORS_TC74 is not set # CONFIG_SENSORS_THMC50 is not set # CONFIG_SENSORS_TMP102 is not set # CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP108 is not set # CONFIG_SENSORS_TMP401 is not set # CONFIG_SENSORS_TMP421 is not set # CONFIG_SENSORS_VIA_CPUTEMP is not set @@ -2020,6 +2003,7 @@ CONFIG_SENSORS_JC42=y # CONFIG_SENSORS_W83L786NG is not set # CONFIG_SENSORS_W83627HF is not set # CONFIG_SENSORS_W83627EHF is not set +# CONFIG_SENSORS_XGENE is not set # # ACPI drivers @@ -2059,6 +2043,7 @@ CONFIG_WATCHDOG_SYSFS=y # Watchdog Device Drivers # # CONFIG_SOFT_WATCHDOG is not set +# CONFIG_WDAT_WDT is not set # CONFIG_XILINX_WATCHDOG is not set # CONFIG_ZIIRAVE_WATCHDOG is not set # CONFIG_CADENCE_WATCHDOG is not set @@ -2095,9 +2080,8 @@ CONFIG_ITCO_VENDOR_SUPPORT=y # CONFIG_W83977F_WDT is not set # CONFIG_MACHZ_WDT is not set # CONFIG_SBC_EPX_C3_WATCHDOG is not set -CONFIG_INTEL_MEI_WDT=y +# CONFIG_INTEL_MEI_WDT is not set # CONFIG_NI903X_WDT is not set -# CONFIG_MEN_A21_WDT is not set # # PCI-based Watchdog Cards @@ -2109,6 +2093,11 @@ CONFIG_INTEL_MEI_WDT=y # USB-based Watchdog Cards # # CONFIG_USBPCWATCHDOG is not set + +# +# Watchdog Pretimeout Governors +# +# CONFIG_WATCHDOG_PRETIMEOUT_GOV is not set CONFIG_SSB_POSSIBLE=y # @@ -2128,7 +2117,6 @@ CONFIG_BCMA_POSSIBLE=y CONFIG_MFD_CORE=y # CONFIG_MFD_AS3711 is not set # CONFIG_PMIC_ADP5520 is not set -# CONFIG_MFD_AAT2870_CORE is not set # CONFIG_MFD_BCM590XX is not set # CONFIG_MFD_AXP20X_I2C is not set # CONFIG_MFD_CROS_EC is not set @@ -2141,11 +2129,9 @@ CONFIG_MFD_CORE=y # CONFIG_MFD_DLN2 is not set # CONFIG_MFD_MC13XXX_I2C is not set # CONFIG_HTC_PASIC3 is not set -# CONFIG_HTC_I2CPLD is not set # CONFIG_MFD_INTEL_QUARK_I2C_GPIO is not set CONFIG_LPC_ICH=y # CONFIG_LPC_SCH is not set -# CONFIG_INTEL_SOC_PMIC is not set # CONFIG_MFD_INTEL_LPSS_ACPI is not set # CONFIG_MFD_INTEL_LPSS_PCI is not set # CONFIG_MFD_JANZ_CMODIO is not set @@ -2170,7 +2156,6 @@ CONFIG_LPC_ICH=y # CONFIG_MFD_RT5033 is not set # CONFIG_MFD_RTSX_USB is not set # CONFIG_MFD_RC5T583 is not set -# CONFIG_MFD_RN5T618 is not set # CONFIG_MFD_SEC_CORE is not set # CONFIG_MFD_SI476X_CORE is not set # CONFIG_MFD_SM501 is not set @@ -2183,14 +2168,13 @@ CONFIG_LPC_ICH=y # CONFIG_MFD_LP8788 is not set # CONFIG_MFD_PALMAS is not set # CONFIG_TPS6105X is not set -# CONFIG_TPS65010 is not set # CONFIG_TPS6507X is not set # CONFIG_MFD_TPS65086 is not set # CONFIG_MFD_TPS65090 is not set # CONFIG_MFD_TPS65217 is not set +# CONFIG_MFD_TI_LP873X is not set # CONFIG_MFD_TPS65218 is not set # CONFIG_MFD_TPS6586X is not set -# CONFIG_MFD_TPS65910 is not set # CONFIG_MFD_TPS65912_I2C is not set # CONFIG_MFD_TPS80031 is not set # CONFIG_TWL4030_CORE is not set @@ -2211,10 +2195,12 @@ CONFIG_LPC_ICH=y # Graphics support # # CONFIG_AGP is not set -# CONFIG_VGA_ARB is not set +CONFIG_VGA_ARB=y +CONFIG_VGA_ARB_MAX_GPUS=16 # CONFIG_VGA_SWITCHEROO is not set CONFIG_DRM=y CONFIG_DRM_DP_AUX_CHARDEV=y +# CONFIG_DRM_DEBUG_MM is not set CONFIG_DRM_KMS_HELPER=y CONFIG_DRM_KMS_FB_HELPER=y CONFIG_DRM_FBDEV_EMULATION=y @@ -2224,12 +2210,9 @@ CONFIG_DRM_TTM=y # # I2C encoder or helper chips # -# CONFIG_DRM_I2C_ADV7511 is not set # CONFIG_DRM_I2C_CH7006 is not set # CONFIG_DRM_I2C_SIL164 is not set # CONFIG_DRM_I2C_NXP_TDA998X is not set -# CONFIG_DRM_TDFX is not set -# CONFIG_DRM_R128 is not set # CONFIG_DRM_RADEON is not set # CONFIG_DRM_AMDGPU is not set @@ -2238,9 +2221,6 @@ CONFIG_DRM_TTM=y # # CONFIG_DRM_NOUVEAU is not set # CONFIG_DRM_I915 is not set -# CONFIG_DRM_MGA is not set -# CONFIG_DRM_VIA is not set -# CONFIG_DRM_SAVAGE is not set # CONFIG_DRM_VGEM is not set # CONFIG_DRM_VMWGFX is not set # CONFIG_DRM_GMA500 is not set @@ -2256,6 +2236,8 @@ CONFIG_DRM_BRIDGE=y # Display Interface Bridges # # CONFIG_DRM_ANALOGIX_ANX78XX is not set +# CONFIG_DRM_HISI_HIBMC is not set +# CONFIG_DRM_LEGACY is not set # # Frame buffer Devices @@ -2310,7 +2292,6 @@ CONFIG_FB_EFI=y # CONFIG_FB_S3 is not set # CONFIG_FB_SAVAGE is not set # CONFIG_FB_SIS is not set -# CONFIG_FB_VIA is not set # CONFIG_FB_NEOMAGIC is not set # CONFIG_FB_KYRO is not set # CONFIG_FB_3DFX is not set @@ -2370,8 +2351,8 @@ CONFIG_HID_GENERIC=y # CONFIG_HID_BETOP_FF is not set # CONFIG_HID_CHERRY is not set # CONFIG_HID_CHICONY is not set +# CONFIG_HID_CORSAIR is not set # CONFIG_HID_CMEDIA is not set -# CONFIG_HID_CP2112 is not set # CONFIG_HID_CYPRESS is not set # CONFIG_HID_DRAGONRISE is not set # CONFIG_HID_EMS_FF is not set @@ -2381,6 +2362,7 @@ CONFIG_HID_GENERIC=y # CONFIG_HID_GEMBIRD is not set # CONFIG_HID_GFRM is not set # CONFIG_HID_HOLTEK is not set +# CONFIG_HID_GT683R is not set # CONFIG_HID_KEYTOUCH is not set # CONFIG_HID_KYE is not set # CONFIG_HID_UCLOGIC is not set @@ -2390,9 +2372,11 @@ CONFIG_HID_GENERIC=y # CONFIG_HID_TWINHAN is not set # CONFIG_HID_KENSINGTON is not set # CONFIG_HID_LCPOWER is not set +# CONFIG_HID_LED is not set # CONFIG_HID_LENOVO is not set # CONFIG_HID_LOGITECH is not set # CONFIG_HID_MAGICMOUSE is not set +# CONFIG_HID_MAYFLASH is not set # CONFIG_HID_MICROSOFT is not set # CONFIG_HID_MONTEREY is not set # CONFIG_HID_MULTITOUCH is not set @@ -2407,6 +2391,7 @@ CONFIG_HID_GENERIC=y # CONFIG_HID_ROCCAT is not set # CONFIG_HID_SAITEK is not set # CONFIG_HID_SAMSUNG is not set +# CONFIG_HID_SONY is not set # CONFIG_HID_SPEEDLINK is not set # CONFIG_HID_STEELSERIES is not set # CONFIG_HID_SUNPLUS is not set @@ -2415,12 +2400,16 @@ CONFIG_HID_GENERIC=y # CONFIG_HID_SMARTJOYPLUS is not set # CONFIG_HID_TIVO is not set # CONFIG_HID_TOPSEED is not set +# CONFIG_HID_THINGM is not set # CONFIG_HID_THRUSTMASTER is not set +# CONFIG_HID_UDRAW_PS3 is not set # CONFIG_HID_WACOM is not set +# CONFIG_HID_WIIMOTE is not set # CONFIG_HID_XINMO is not set # CONFIG_HID_ZEROPLUS is not set # CONFIG_HID_ZYDACRON is not set # CONFIG_HID_SENSOR_HUB is not set +# CONFIG_HID_ALPS is not set # # USB HID support @@ -2433,6 +2422,11 @@ CONFIG_USB_HIDDEV=y # I2C HID support # # CONFIG_I2C_HID is not set + +# +# Intel ISH HID support +# +# CONFIG_INTEL_ISH_HID is not set CONFIG_USB_OHCI_LITTLE_ENDIAN=y CONFIG_USB_SUPPORT=y CONFIG_USB_COMMON=y @@ -2445,11 +2439,10 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y # CONFIG_USB_DEFAULT_PERSIST=y CONFIG_USB_DYNAMIC_MINORS=y -CONFIG_USB_OTG=y -CONFIG_USB_OTG_WHITELIST=y +# CONFIG_USB_OTG is not set +# CONFIG_USB_OTG_WHITELIST is not set # CONFIG_USB_OTG_BLACKLIST_HUB is not set -# CONFIG_USB_OTG_FSM is not set -CONFIG_USB_ULPI_BUS=y +# CONFIG_USB_LEDS_TRIGGER_USBPORT is not set # CONFIG_USB_MON is not set # CONFIG_USB_WUSB_CBAF is not set @@ -2460,11 +2453,7 @@ CONFIG_USB_ULPI_BUS=y CONFIG_USB_XHCI_HCD=y CONFIG_USB_XHCI_PCI=y # CONFIG_USB_XHCI_PLATFORM is not set -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=y -# CONFIG_USB_EHCI_HCD_PLATFORM is not set +# CONFIG_USB_EHCI_HCD is not set # CONFIG_USB_OXU210HP_HCD is not set # CONFIG_USB_ISP116X_HCD is not set # CONFIG_USB_ISP1362_HCD is not set @@ -2505,7 +2494,7 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_STORAGE_KARMA is not set # CONFIG_USB_STORAGE_CYPRESS_ATACB is not set # CONFIG_USB_STORAGE_ENE_UB6250 is not set -# CONFIG_USB_UAS is not set +CONFIG_USB_UAS=y # # USB Imaging devices @@ -2516,7 +2505,6 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_MUSB_HDRC is not set # CONFIG_USB_DWC3 is not set # CONFIG_USB_DWC2 is not set -# CONFIG_USB_CHIPIDEA is not set # CONFIG_USB_ISP1760 is not set # @@ -2534,13 +2522,11 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_RIO500 is not set # CONFIG_USB_LEGOTOWER is not set # CONFIG_USB_LCD is not set -# CONFIG_USB_LED is not set # CONFIG_USB_CYPRESS_CY7C63 is not set # CONFIG_USB_CYTHERM is not set # CONFIG_USB_IDMOUSE is not set # CONFIG_USB_FTDI_ELAN is not set # CONFIG_USB_APPLEDISPLAY is not set -# CONFIG_USB_SISUSBVGA is not set # CONFIG_USB_LD is not set # CONFIG_USB_TRANCEVIBRATOR is not set # CONFIG_USB_IOWARRIOR is not set @@ -2550,7 +2536,9 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_YUREX is not set # CONFIG_USB_EZUSB_FX2 is not set # CONFIG_USB_HSIC_USB3503 is not set +# CONFIG_USB_HSIC_USB4604 is not set # CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set # CONFIG_UCSI is not set # @@ -2558,13 +2546,64 @@ CONFIG_USB_STORAGE=y # # CONFIG_USB_PHY is not set # CONFIG_NOP_USB_XCEIV is not set -# CONFIG_USB_GPIO_VBUS is not set # CONFIG_USB_ISP1301 is not set # CONFIG_USB_GADGET is not set +# CONFIG_USB_LED_TRIG is not set +# CONFIG_USB_ULPI_BUS is not set # CONFIG_UWB is not set # CONFIG_MMC is not set # CONFIG_MEMSTICK is not set -# CONFIG_NEW_LEDS is not set +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y +CONFIG_LEDS_CLASS_FLASH=y + +# +# LED drivers +# +# CONFIG_LEDS_LM3530 is not set +# CONFIG_LEDS_LM3642 is not set +# CONFIG_LEDS_PCA9532 is not set +# CONFIG_LEDS_LP3944 is not set +# CONFIG_LEDS_LP5521 is not set +# CONFIG_LEDS_LP5523 is not set +# CONFIG_LEDS_LP5562 is not set +# CONFIG_LEDS_LP8501 is not set +# CONFIG_LEDS_LP8860 is not set +# CONFIG_LEDS_CLEVO_MAIL is not set +# CONFIG_LEDS_PCA955X is not set +# CONFIG_LEDS_PCA963X is not set +# CONFIG_LEDS_BD2802 is not set +# CONFIG_LEDS_INTEL_SS4200 is not set +# CONFIG_LEDS_TCA6507 is not set +# CONFIG_LEDS_TLC591XX is not set +# CONFIG_LEDS_LM355x is not set + +# +# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) +# +# CONFIG_LEDS_BLINKM is not set +# CONFIG_LEDS_MLXCPLD is not set +# CONFIG_LEDS_USER is not set +# CONFIG_LEDS_NIC78BX is not set + +# +# LED Triggers +# +CONFIG_LEDS_TRIGGERS=y +CONFIG_LEDS_TRIGGER_TIMER=y +CONFIG_LEDS_TRIGGER_ONESHOT=y +CONFIG_LEDS_TRIGGER_DISK=y +CONFIG_LEDS_TRIGGER_HEARTBEAT=y +# CONFIG_LEDS_TRIGGER_BACKLIGHT is not set +CONFIG_LEDS_TRIGGER_CPU=y +CONFIG_LEDS_TRIGGER_DEFAULT_ON=y + +# +# iptables trigger is under Netfilter config (LED target) +# +CONFIG_LEDS_TRIGGER_TRANSIENT=y +# CONFIG_LEDS_TRIGGER_CAMERA is not set +CONFIG_LEDS_TRIGGER_PANIC=y # CONFIG_ACCESSIBILITY is not set # CONFIG_INFINIBAND is not set CONFIG_EDAC_ATOMIC_SCRUB=y @@ -2578,7 +2617,7 @@ CONFIG_EDAC_MM_EDAC=y # CONFIG_EDAC_I82975X is not set # CONFIG_EDAC_I3000 is not set # CONFIG_EDAC_I3200 is not set -CONFIG_EDAC_IE31200=y +# CONFIG_EDAC_IE31200 is not set # CONFIG_EDAC_X38 is not set # CONFIG_EDAC_I5400 is not set # CONFIG_EDAC_I7CORE is not set @@ -2586,7 +2625,9 @@ CONFIG_EDAC_IE31200=y # CONFIG_EDAC_I5100 is not set # CONFIG_EDAC_I7300 is not set CONFIG_EDAC_SBRIDGE=y +# CONFIG_EDAC_SKX is not set CONFIG_RTC_LIB=y +CONFIG_RTC_MC146818_LIB=y CONFIG_RTC_CLASS=y CONFIG_RTC_HCTOSYS=y CONFIG_RTC_HCTOSYS_DEVICE="rtc0" @@ -2615,7 +2656,6 @@ CONFIG_RTC_INTF_DEV=y # CONFIG_RTC_DRV_RS5C372 is not set # CONFIG_RTC_DRV_ISL1208 is not set # CONFIG_RTC_DRV_ISL12022 is not set -# CONFIG_RTC_DRV_ISL12057 is not set # CONFIG_RTC_DRV_X1205 is not set # CONFIG_RTC_DRV_PCF8523 is not set # CONFIG_RTC_DRV_PCF85063 is not set @@ -2670,12 +2710,33 @@ CONFIG_RTC_DRV_CMOS=y # HID Sensor RTC drivers # # CONFIG_RTC_DRV_HID_SENSOR_TIME is not set -# CONFIG_DMADEVICES is not set +CONFIG_DMADEVICES=y +# CONFIG_DMADEVICES_DEBUG is not set + +# +# DMA Devices +# +CONFIG_DMA_ENGINE=y +CONFIG_DMA_ACPI=y +# CONFIG_INTEL_IDMA64 is not set +CONFIG_INTEL_IOATDMA=y +# CONFIG_QCOM_HIDMA_MGMT is not set +# CONFIG_QCOM_HIDMA is not set +# CONFIG_DW_DMAC is not set +# CONFIG_DW_DMAC_PCI is not set + +# +# DMA Clients +# +CONFIG_ASYNC_TX_DMA=y +# CONFIG_DMATEST is not set +CONFIG_DMA_ENGINE_RAID=y # # DMABUF options # CONFIG_SYNC_FILE=y +CONFIG_DCA=y # CONFIG_AUXDISPLAY is not set # CONFIG_UIO is not set # CONFIG_VFIO is not set @@ -2691,7 +2752,6 @@ CONFIG_SYNC_FILE=y # Microsoft Hyper-V guest support # CONFIG_STAGING=y -# CONFIG_SLICOSS is not set # CONFIG_RTS5208 is not set # CONFIG_FB_SM750 is not set # CONFIG_FB_XGI is not set @@ -2710,8 +2770,10 @@ CONFIG_STAGING=y # CONFIG_CRYPTO_SKEIN is not set # CONFIG_UNISYSSPAR is not set # CONFIG_MOST is not set +# CONFIG_GREYBUS is not set CONFIG_X86_PLATFORM_DEVICES=y # CONFIG_ACERHDF is not set +# CONFIG_ALIENWARE_WMI is not set # CONFIG_DELL_WMI_AIO is not set # CONFIG_DELL_SMO8800 is not set # CONFIG_FUJITSU_TABLET is not set @@ -2728,6 +2790,7 @@ CONFIG_ACPI_WMI=y # CONFIG_TOSHIBA_WMI is not set # CONFIG_ACPI_CMPC is not set # CONFIG_INTEL_HID_EVENT is not set +# CONFIG_INTEL_VBTN is not set CONFIG_INTEL_IPS=y CONFIG_INTEL_PMC_CORE=y # CONFIG_IBM_RTL is not set @@ -2736,9 +2799,12 @@ CONFIG_INTEL_PMC_CORE=y CONFIG_INTEL_RST=y CONFIG_INTEL_SMARTCONNECT=y # CONFIG_PVPANIC is not set -# CONFIG_INTEL_PMC_IPC is not set +CONFIG_INTEL_PMC_IPC=y # CONFIG_SURFACE_PRO3_BUTTON is not set CONFIG_INTEL_PUNIT_IPC=y +# CONFIG_INTEL_TELEMETRY is not set +# CONFIG_MLX_PLATFORM is not set +# CONFIG_MLX_CPLD_PLATFORM is not set # CONFIG_CHROME_PLATFORMS is not set CONFIG_CLKDEV_LOOKUP=y CONFIG_HAVE_CLK_PREPARE=y @@ -2753,7 +2819,13 @@ CONFIG_COMMON_CLK=y # CONFIG_COMMON_CLK_NXP is not set # CONFIG_COMMON_CLK_PXA is not set # CONFIG_COMMON_CLK_PIC32 is not set -# CONFIG_COMMON_CLK_OXNAS is not set +# CONFIG_COMMON_CLK_MT2701 is not set +# CONFIG_COMMON_CLK_MT2701_MMSYS is not set +# CONFIG_COMMON_CLK_MT2701_IMGSYS is not set +# CONFIG_COMMON_CLK_MT2701_VDECSYS is not set +# CONFIG_COMMON_CLK_MT2701_HIFSYS is not set +# CONFIG_COMMON_CLK_MT2701_ETHSYS is not set +# CONFIG_COMMON_CLK_MT2701_BDPSYS is not set # # Hardware Spinlock drivers @@ -2770,7 +2842,9 @@ CONFIG_CLKBLD_I8253=y # CONFIG_SH_TIMER_MTU2 is not set # CONFIG_SH_TIMER_TMU is not set # CONFIG_EM_TIMER_STI is not set -# CONFIG_MAILBOX is not set +CONFIG_MAILBOX=y +CONFIG_PCC=y +# CONFIG_ALTERA_MBOX is not set CONFIG_IOMMU_API=y CONFIG_IOMMU_SUPPORT=y @@ -2789,7 +2863,7 @@ CONFIG_IRQ_REMAP=y # # Remoteproc drivers # -# CONFIG_STE_MODEM_RPROC is not set +# CONFIG_REMOTEPROC is not set # # Rpmsg drivers @@ -2798,6 +2872,10 @@ CONFIG_IRQ_REMAP=y # # SOC (System On Chip) specific Drivers # + +# +# Broadcom SoC drivers +# # CONFIG_SUNXI_SRAM is not set # CONFIG_SOC_TI is not set # CONFIG_PM_DEVFREQ is not set @@ -2819,7 +2897,6 @@ CONFIG_ARM_GIC_MAX_NR=1 # CONFIG_PHY_PXA_28NM_HSIC is not set # CONFIG_PHY_PXA_28NM_USB2 is not set # CONFIG_BCM_KONA_USB2_PHY is not set -# CONFIG_PHY_TUSB1210 is not set CONFIG_POWERCAP=y CONFIG_INTEL_RAPL=y # CONFIG_MCB is not set @@ -2840,7 +2917,7 @@ CONFIG_ND_BLK=y CONFIG_ND_CLAIM=y CONFIG_ND_BTT=y CONFIG_BTT=y -CONFIG_DEV_DAX=y +# CONFIG_DEV_DAX is not set # CONFIG_NVMEM is not set # CONFIG_STM is not set # CONFIG_INTEL_TH is not set @@ -2853,6 +2930,7 @@ CONFIG_DEV_DAX=y # # Firmware Drivers # +# CONFIG_ARM_SCPI_PROTOCOL is not set # CONFIG_EDD is not set CONFIG_FIRMWARE_MEMMAP=y # CONFIG_DELL_RBU is not set @@ -2875,7 +2953,14 @@ CONFIG_EFI_VARS_PSTORE=y CONFIG_EFI_RUNTIME_WRAPPERS=y CONFIG_EFI_BOOTLOADER_CONTROL=y CONFIG_EFI_CAPSULE_LOADER=y +# CONFIG_EFI_TEST is not set +# CONFIG_APPLE_PROPERTIES is not set CONFIG_UEFI_CPER=y +# CONFIG_EFI_DEV_PATH_PARSER is not set + +# +# Tegra firmware driver +# # # File systems @@ -2915,6 +3000,7 @@ CONFIG_F2FS_FS_ENCRYPTION=y # CONFIG_FS_DAX is not set CONFIG_FS_POSIX_ACL=y CONFIG_EXPORTFS=y +# CONFIG_EXPORTFS_BLOCK_OPS is not set CONFIG_FILE_LOCKING=y # CONFIG_MANDATORY_FILE_LOCKING is not set CONFIG_FS_ENCRYPTION=y @@ -2922,11 +3008,13 @@ CONFIG_FSNOTIFY=y CONFIG_DNOTIFY=y CONFIG_INOTIFY_USER=y CONFIG_FANOTIFY=y +# CONFIG_FANOTIFY_ACCESS_PERMISSIONS is not set # CONFIG_QUOTA is not set # CONFIG_QUOTACTL is not set CONFIG_AUTOFS4_FS=y # CONFIG_FUSE_FS is not set CONFIG_OVERLAY_FS=y +CONFIG_OVERLAY_FS_REDIRECT_DIR=y # # Caches @@ -2977,6 +3065,7 @@ CONFIG_TMPFS_POSIX_ACL=y CONFIG_TMPFS_XATTR=y CONFIG_HUGETLBFS=y CONFIG_HUGETLB_PAGE=y +CONFIG_ARCH_HAS_GIGANTIC_PAGE=y CONFIG_CONFIGFS_FS=y CONFIG_EFIVAR_FS=y CONFIG_MISC_FILESYSTEMS=y @@ -2990,7 +3079,6 @@ CONFIG_HFSPLUS_FS_POSIX_ACL=y # CONFIG_BEFS_FS is not set # CONFIG_BFS_FS is not set # CONFIG_EFS_FS is not set -# CONFIG_LOGFS is not set # CONFIG_CRAMFS is not set # CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set @@ -3001,16 +3089,30 @@ CONFIG_HFSPLUS_FS_POSIX_ACL=y # CONFIG_QNX6FS_FS is not set # CONFIG_ROMFS_FS is not set CONFIG_PSTORE=y +# CONFIG_PSTORE_ZLIB_COMPRESS is not set +# CONFIG_PSTORE_LZO_COMPRESS is not set +CONFIG_PSTORE_LZ4_COMPRESS=y # CONFIG_PSTORE_CONSOLE is not set # CONFIG_PSTORE_PMSG is not set -# CONFIG_PSTORE_RAM is not set +CONFIG_PSTORE_RAM=y # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set CONFIG_NETWORK_FILESYSTEMS=y # CONFIG_NFS_FS is not set # CONFIG_NFSD is not set # CONFIG_CEPH_FS is not set -# CONFIG_CIFS is not set +CONFIG_CIFS=y +# CONFIG_CIFS_STATS is not set +CONFIG_CIFS_WEAK_PW_HASH=y +CONFIG_CIFS_UPCALL=y +CONFIG_CIFS_XATTR=y +CONFIG_CIFS_POSIX=y +CONFIG_CIFS_ACL=y +# CONFIG_CIFS_DEBUG is not set +CONFIG_CIFS_DFS_UPCALL=y +CONFIG_CIFS_SMB2=y +CONFIG_CIFS_SMB311=y +CONFIG_CIFS_FSCACHE=y # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set @@ -3065,8 +3167,7 @@ CONFIG_NLS_CODEPAGE_949=y # CONFIG_NLS_MAC_ROMANIAN is not set # CONFIG_NLS_MAC_TURKISH is not set CONFIG_NLS_UTF8=y -CONFIG_DLM=y -# CONFIG_DLM_DEBUG is not set +# CONFIG_DLM is not set # # Kernel hacking @@ -3077,6 +3178,7 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y # printk and dmesg options # CONFIG_PRINTK_TIME=y +CONFIG_CONSOLE_LOGLEVEL_DEFAULT=7 CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 # CONFIG_BOOT_PRINTK_DELAY is not set @@ -3084,7 +3186,7 @@ CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 # Compile-time checks and compiler options # # CONFIG_DEBUG_INFO is not set -CONFIG_ENABLE_WARN_DEPRECATED=y +# CONFIG_ENABLE_WARN_DEPRECATED is not set # CONFIG_ENABLE_MUST_CHECK is not set CONFIG_FRAME_WARN=2048 CONFIG_STRIP_ASM_SYMS=y @@ -3123,6 +3225,7 @@ CONFIG_HAVE_DEBUG_STACKOVERFLOW=y CONFIG_HAVE_ARCH_KMEMCHECK=y # CONFIG_KMEMCHECK is not set CONFIG_HAVE_ARCH_KASAN=y +# CONFIG_KASAN is not set CONFIG_ARCH_HAS_KCOV=y # CONFIG_KCOV is not set # CONFIG_DEBUG_SHIRQ is not set @@ -3142,7 +3245,7 @@ CONFIG_WQ_WATCHDOG=y CONFIG_PANIC_ON_OOPS_VALUE=0 CONFIG_PANIC_TIMEOUT=15 # CONFIG_SCHED_DEBUG is not set -CONFIG_SCHED_INFO=y +# CONFIG_SCHED_INFO is not set # CONFIG_SCHEDSTATS is not set # CONFIG_SCHED_STACK_END_CHECK is not set # CONFIG_DEBUG_TIMEKEEPING is not set @@ -3187,12 +3290,9 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=21 # CONFIG_NOTIFIER_ERROR_INJECTION is not set # CONFIG_FAULT_INJECTION is not set # CONFIG_LATENCYTOP is not set -CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y -# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set CONFIG_USER_STACKTRACE_SUPPORT=y CONFIG_HAVE_FUNCTION_TRACER=y CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y CONFIG_HAVE_DYNAMIC_FTRACE=y CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y @@ -3223,21 +3323,21 @@ CONFIG_TRACING_SUPPORT=y # CONFIG_TEST_FIRMWARE is not set # CONFIG_TEST_UDELAY is not set # CONFIG_MEMTEST is not set +# CONFIG_BUG_ON_DATA_CORRUPTION is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set CONFIG_ARCH_HAS_UBSAN_SANITIZE_ALL=y +# CONFIG_ARCH_WANTS_UBSAN_NO_NULL is not set # CONFIG_UBSAN is not set CONFIG_ARCH_HAS_DEVMEM_IS_ALLOWED=y -CONFIG_STRICT_DEVMEM=y -CONFIG_IO_STRICT_DEVMEM=y -# CONFIG_X86_VERBOSE_BOOTUP is not set +CONFIG_X86_VERBOSE_BOOTUP=y # CONFIG_EARLY_PRINTK is not set -# CONFIG_X86_PTDUMP_CORE is not set +CONFIG_X86_PTDUMP_CORE=y # CONFIG_X86_PTDUMP is not set # CONFIG_EFI_PGT_DUMP is not set # CONFIG_DEBUG_RODATA_TEST is not set -# CONFIG_DEBUG_WX is not set +CONFIG_DEBUG_WX=y # CONFIG_DOUBLEFAULT is not set # CONFIG_DEBUG_TLBFLUSH is not set # CONFIG_IOMMU_STRESS is not set @@ -3265,11 +3365,31 @@ CONFIG_KEYS=y CONFIG_PERSISTENT_KEYRINGS=y CONFIG_BIG_KEYS=y CONFIG_ENCRYPTED_KEYS=y -# CONFIG_KEY_DH_OPERATIONS is not set +CONFIG_KEY_DH_OPERATIONS=y CONFIG_SECURITY_DMESG_RESTRICT=y -# CONFIG_SECURITY is not set +CONFIG_SECURITY=y CONFIG_SECURITYFS=y -# CONFIG_INTEL_TXT is not set +# CONFIG_SECURITY_NETWORK is not set +# CONFIG_SECURITY_PATH is not set +CONFIG_INTEL_TXT=y +CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y +CONFIG_HAVE_ARCH_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY=y +CONFIG_HARDENED_USERCOPY_PAGESPAN=y +# CONFIG_SECURITY_SMACK is not set +# CONFIG_SECURITY_TOMOYO is not set +# CONFIG_SECURITY_APPARMOR is not set +# CONFIG_SECURITY_LOADPIN is not set +CONFIG_SECURITY_YAMA=y +CONFIG_INTEGRITY=y +CONFIG_INTEGRITY_SIGNATURE=y +CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y +CONFIG_INTEGRITY_TRUSTED_KEYRING=y +# CONFIG_IMA is not set +# CONFIG_IMA_KEYRINGS_PERMIT_SIGNED_BY_BUILTIN_OR_SECONDARY is not set +CONFIG_EVM=y +CONFIG_EVM_ATTR_FSUUID=y +# CONFIG_EVM_LOAD_X509 is not set CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_DEFAULT_SECURITY="" CONFIG_XOR_BLOCKS=y @@ -3283,6 +3403,7 @@ CONFIG_CRYPTO=y # # Crypto core or helper # +CONFIG_CRYPTO_FIPS=y CONFIG_CRYPTO_ALGAPI=y CONFIG_CRYPTO_ALGAPI2=y CONFIG_CRYPTO_AEAD=y @@ -3296,7 +3417,12 @@ CONFIG_CRYPTO_RNG2=y CONFIG_CRYPTO_RNG_DEFAULT=y CONFIG_CRYPTO_AKCIPHER2=y CONFIG_CRYPTO_AKCIPHER=y -# CONFIG_CRYPTO_RSA is not set +CONFIG_CRYPTO_KPP2=y +CONFIG_CRYPTO_KPP=y +CONFIG_CRYPTO_ACOMP2=y +CONFIG_CRYPTO_RSA=y +CONFIG_CRYPTO_DH=y +CONFIG_CRYPTO_ECDH=y CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_MANAGER2=y CONFIG_CRYPTO_USER=y @@ -3310,6 +3436,7 @@ CONFIG_CRYPTO_CRYPTD=y CONFIG_CRYPTO_MCRYPTD=y CONFIG_CRYPTO_AUTHENC=y CONFIG_CRYPTO_ABLK_HELPER=y +CONFIG_CRYPTO_SIMD=y CONFIG_CRYPTO_GLUE_HELPER_X86=y # @@ -3331,7 +3458,7 @@ CONFIG_CRYPTO_ECB=y CONFIG_CRYPTO_LRW=y CONFIG_CRYPTO_PCBC=y CONFIG_CRYPTO_XTS=y -# CONFIG_CRYPTO_KEYWRAP is not set +CONFIG_CRYPTO_KEYWRAP=y # # Hash modes @@ -3352,7 +3479,7 @@ CONFIG_CRYPTO_CRCT10DIF=y CONFIG_CRYPTO_CRCT10DIF_PCLMUL=y CONFIG_CRYPTO_GHASH=y CONFIG_CRYPTO_POLY1305=y -# CONFIG_CRYPTO_POLY1305_X86_64 is not set +CONFIG_CRYPTO_POLY1305_X86_64=y CONFIG_CRYPTO_MD4=y CONFIG_CRYPTO_MD5=y CONFIG_CRYPTO_MICHAEL_MIC=y @@ -3365,8 +3492,11 @@ CONFIG_CRYPTO_SHA1_SSSE3=y CONFIG_CRYPTO_SHA256_SSSE3=y CONFIG_CRYPTO_SHA512_SSSE3=y CONFIG_CRYPTO_SHA1_MB=y +CONFIG_CRYPTO_SHA256_MB=y +CONFIG_CRYPTO_SHA512_MB=y CONFIG_CRYPTO_SHA256=y CONFIG_CRYPTO_SHA512=y +CONFIG_CRYPTO_SHA3=y CONFIG_CRYPTO_TGR192=y CONFIG_CRYPTO_WP512=y CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=y @@ -3398,7 +3528,7 @@ CONFIG_CRYPTO_KHAZAD=y CONFIG_CRYPTO_SALSA20=y CONFIG_CRYPTO_SALSA20_X86_64=y CONFIG_CRYPTO_CHACHA20=y -# CONFIG_CRYPTO_CHACHA20_X86_64 is not set +CONFIG_CRYPTO_CHACHA20_X86_64=y CONFIG_CRYPTO_SEED=y CONFIG_CRYPTO_SERPENT=y CONFIG_CRYPTO_SERPENT_SSE2_X86_64=y @@ -3434,21 +3564,30 @@ CONFIG_CRYPTO_JITTERENTROPY=y # CONFIG_CRYPTO_USER_API_SKCIPHER is not set # CONFIG_CRYPTO_USER_API_RNG is not set # CONFIG_CRYPTO_USER_API_AEAD is not set +CONFIG_CRYPTO_HASH_INFO=y CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_PADLOCK is not set +# CONFIG_CRYPTO_DEV_FSL_CAAM_CRYPTO_API_DESC is not set # CONFIG_CRYPTO_DEV_CCP is not set CONFIG_CRYPTO_DEV_QAT=y -CONFIG_CRYPTO_DEV_QAT_DH895xCC=y +# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set # CONFIG_CRYPTO_DEV_QAT_C3XXX is not set -# CONFIG_CRYPTO_DEV_QAT_C62X is not set -CONFIG_CRYPTO_DEV_QAT_DH895xCCVF=y +CONFIG_CRYPTO_DEV_QAT_C62X=y +# CONFIG_CRYPTO_DEV_QAT_DH895xCCVF is not set # CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set -# CONFIG_CRYPTO_DEV_QAT_C62XVF is not set -# CONFIG_ASYMMETRIC_KEY_TYPE is not set +CONFIG_CRYPTO_DEV_QAT_C62XVF=y +CONFIG_ASYMMETRIC_KEY_TYPE=y +CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=y +CONFIG_X509_CERTIFICATE_PARSER=y +CONFIG_PKCS7_MESSAGE_PARSER=y # # Certificates for signature checking # +CONFIG_SYSTEM_TRUSTED_KEYRING=y +CONFIG_SYSTEM_TRUSTED_KEYS="" +# CONFIG_SYSTEM_EXTRA_CERTIFICATE is not set +CONFIG_SECONDARY_TRUSTED_KEYRING=y CONFIG_HAVE_KVM=y # CONFIG_VIRTUALIZATION is not set # CONFIG_BINARY_PRINTF is not set @@ -3505,6 +3644,9 @@ CONFIG_XZ_DEC_BCJ=y CONFIG_DECOMPRESS_GZIP=y CONFIG_DECOMPRESS_LZ4=y CONFIG_GENERIC_ALLOCATOR=y +CONFIG_REED_SOLOMON=y +CONFIG_REED_SOLOMON_ENC8=y +CONFIG_REED_SOLOMON_DEC8=y CONFIG_TEXTSEARCH=y CONFIG_TEXTSEARCH_KMP=y CONFIG_TEXTSEARCH_BM=y @@ -3515,15 +3657,19 @@ CONFIG_HAS_IOMEM=y CONFIG_HAS_IOPORT_MAP=y CONFIG_HAS_DMA=y CONFIG_CHECK_SIGNATURE=y +CONFIG_CPUMASK_OFFSTACK=y CONFIG_CPU_RMAP=y CONFIG_DQL=y CONFIG_GLOB=y # CONFIG_GLOB_SELFTEST is not set CONFIG_NLATTR=y -CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y +CONFIG_CLZ_TAB=y CONFIG_CORDIC=y CONFIG_DDR=y # CONFIG_IRQ_POLL is not set +CONFIG_MPILIB=y +CONFIG_SIGNATURE=y +CONFIG_OID_REGISTRY=y CONFIG_UCS2_STRING=y CONFIG_FONT_SUPPORT=y # CONFIG_FONTS is not set @@ -3534,3 +3680,4 @@ CONFIG_SG_POOL=y CONFIG_ARCH_HAS_SG_CHAIN=y CONFIG_ARCH_HAS_PMEM_API=y CONFIG_ARCH_HAS_MMIO_FLUSH=y +CONFIG_SBITMAP=y diff --git a/linux-spica.install b/linux-spica.install index 60e8431..fc68f4f 100644 --- a/linux-spica.install +++ b/linux-spica.install @@ -1,5 +1,5 @@ pkgname=linux-spica -kernver=4.7.5-1spica-dirty +kernver=4.10.8spica-dirty #bootdevice="BOOT_IMAGE=/boot/vmlinuz-$pkgname root=UUID=d670564f-2cb3-4981-9d51-6ed9c1327d47" #option="rw quiet clocksource=hpet initrd=EFI/spi-ca/initrd intel_iommu=on pci-stub.ids=1002:683f,1002:aab0 vfio_iommu_type1.allow_unsafe_interrupts=1,kvm.ignore_msrs=1" #option="rw quiet clocksource=hpet initrd=EFI/spi-ca/initrd quiet intremap=no_x2apic_optout zswap.enabled=1 zswap.max_pool_percent=25 zswap.compressor=lz4" @@ -12,19 +12,19 @@ post_install () { echo "> Generating initramfs, using mkinitcpio. Please wait..." echo ">" mkinitcpio -p $pkgname - echo "> Modifing efibootmgr..." - efibootmgr|awk 'match($0,/^Boot([0-9a-fA-F]{4})\* spi-ca_v(.+)$/,m){printf "efibootmgr -b %s -B;echo \">> remove entry : %s\";",m[1],m[2]}'|sh - echo "> Copy efistub from boot" - cp -fv "boot/vmlinuz-$pkgname" "boot/efi/EFI/spi-ca/kernel.efi" - cp -fv "boot/initramfs-$pkgname.img" "boot/efi/EFI/spi-ca/initrd" - echo "> Registering efistub " +# echo "> Modifing efibootmgr..." +# efibootmgr|awk 'match($0,/^Boot([0-9a-fA-F]{4})\* spi-ca_v(.+)$/,m){printf "efibootmgr -b %s -B;echo \">> remove entry : %s\";",m[1],m[2]}'|sh +# echo "> Copy efistub from boot" +# cp -fv "boot/vmlinuz-$pkgname" "boot/efi/EFI/spi-ca/kernel.efi" +# cp -fv "boot/initramfs-$pkgname.img" "boot/efi/EFI/spi-ca/initrd" +# echo "> Registering efistub " #echo 'efibootmgr -c -g -d /dev/sda -p 1 -L "spi-ca_v$kernver" -l "\EFI\spi-ca\kernel.efi" #-u "$bootdevice $option"' - efibootmgr -c -g -d /dev/sde -p 1 -L "spi-ca_v$kernver" -l "\EFI\spi-ca\kernel" # -u "$bootdevice $option" - echo "> Reordering Bootorder..." - newentry=`efibootmgr|awk 'match($0,/^Boot([0-9a-fA-F]{4})\* spi-ca_v(.+)$/,m){print m[1]}'` - prebootorder=`efibootmgr |grep BootOrder |cut -d : -f 2 |tr -d ' '` - efibootmgr -O - efibootmgr -o ${newentry},${prebootorder} +# efibootmgr -c -g -d /dev/sde -p 1 -L "spi-ca_v$kernver" -l "\EFI\spi-ca\kernel" # -u "$bootdevice $option" +# echo "> Reordering Bootorder..." +# newentry=`efibootmgr|awk 'match($0,/^Boot([0-9a-fA-F]{4})\* spi-ca_v(.+)$/,m){print m[1]}'` +# prebootorder=`efibootmgr |grep BootOrder |cut -d : -f 2 |tr -d ' '` +# efibootmgr -O +# efibootmgr -o ${newentry},${prebootorder} echo "> OK!" } diff --git a/patches/0001-block-cgroups-kconfig-build-bits-for-BFQ-v7r11-4.7.0.patch b/patches/0001-block-cgroups-kconfig-build-bits-for-BFQ-v7r11-4.10..patch similarity index 94% rename from patches/0001-block-cgroups-kconfig-build-bits-for-BFQ-v7r11-4.7.0.patch rename to patches/0001-block-cgroups-kconfig-build-bits-for-BFQ-v7r11-4.10..patch index ff75a8b..45f4fd2 100644 --- a/patches/0001-block-cgroups-kconfig-build-bits-for-BFQ-v7r11-4.7.0.patch +++ b/patches/0001-block-cgroups-kconfig-build-bits-for-BFQ-v7r11-4.10..patch @@ -1,7 +1,7 @@ -From 22ee35ec82fa543b65c1b6d516a086a21f723846 Mon Sep 17 00:00:00 2001 +From 8500f47272575b4616beb487c483019248d8c501 Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Tue, 7 Apr 2015 13:39:12 +0200 -Subject: [PATCH 1/4] block: cgroups, kconfig, build bits for BFQ-v7r11-4.7.0 +Subject: [PATCH 1/4] block: cgroups, kconfig, build bits for BFQ-v7r11-4.10.0 Update Kconfig.iosched and do the related Makefile changes to include kernel configuration options for BFQ. Also increase the number of @@ -74,7 +74,7 @@ index 421bef9..0ee5f0f 100644 endmenu diff --git a/block/Makefile b/block/Makefile -index 9eda232..4a36683 100644 +index a827f98..3b14703 100644 --- a/block/Makefile +++ b/block/Makefile @@ -18,6 +18,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o @@ -86,18 +86,18 @@ index 9eda232..4a36683 100644 obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h -index 3d9cf32..8d862a0 100644 +index 1ca8e8f..8e2d6ed 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h -@@ -45,7 +45,7 @@ struct pr_ops; +@@ -47,7 +47,7 @@ struct rq_wb; * Maximum number of blkcg policies allowed to be registered concurrently. * Defined here to simplify include dependency. */ -#define BLKCG_MAX_POLS 2 +#define BLKCG_MAX_POLS 3 - struct request; typedef void (rq_end_io_fn)(struct request *, int); + -- -1.9.1 +2.10.0 diff --git a/patches/0002-block-introduce-the-BFQ-v7r11-I-O-sched-for-4.7.0.patch b/patches/0002-block-introduce-the-BFQ-v7r11-I-O-sched-for-4.10.0.patch similarity index 98% rename from patches/0002-block-introduce-the-BFQ-v7r11-I-O-sched-for-4.7.0.patch rename to patches/0002-block-introduce-the-BFQ-v7r11-I-O-sched-for-4.10.0.patch index 368a4ff..0812a57 100644 --- a/patches/0002-block-introduce-the-BFQ-v7r11-I-O-sched-for-4.7.0.patch +++ b/patches/0002-block-introduce-the-BFQ-v7r11-I-O-sched-for-4.10.0.patch @@ -1,7 +1,7 @@ -From 2aae32be2a18a7d0da104ae42c08cb9bce9d9c7c Mon Sep 17 00:00:00 2001 +From 2f56e91506b329ffc29d0f184924ad0123c9ba9e Mon Sep 17 00:00:00 2001 From: Paolo Valente Date: Thu, 9 May 2013 19:10:02 +0200 -Subject: [PATCH 2/4] block: introduce the BFQ-v7r11 I/O sched for 4.7.0 +Subject: [PATCH 2/4] block: introduce the BFQ-v7r11 I/O sched for 4.10.0 The general structure is borrowed from CFQ, as much of the code for handling I/O contexts. Over time, several useful features have been @@ -56,12 +56,12 @@ Signed-off-by: Paolo Valente Signed-off-by: Arianna Avanzini --- block/Kconfig.iosched | 6 +- - block/bfq-cgroup.c | 1182 ++++++++++++++++ + block/bfq-cgroup.c | 1186 ++++++++++++++++ block/bfq-ioc.c | 36 + - block/bfq-iosched.c | 3754 +++++++++++++++++++++++++++++++++++++++++++++++++ - block/bfq-sched.c | 1200 ++++++++++++++++ + block/bfq-iosched.c | 3763 +++++++++++++++++++++++++++++++++++++++++++++++++ + block/bfq-sched.c | 1199 ++++++++++++++++ block/bfq.h | 801 +++++++++++ - 6 files changed, 6975 insertions(+), 4 deletions(-) + 6 files changed, 6987 insertions(+), 4 deletions(-) create mode 100644 block/bfq-cgroup.c create mode 100644 block/bfq-ioc.c create mode 100644 block/bfq-iosched.c @@ -91,10 +91,10 @@ index 0ee5f0f..f78cd1a 100644 prompt "Default I/O scheduler" diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c new file mode 100644 -index 0000000..8610cd6 +index 0000000..8b08a57 --- /dev/null +++ b/block/bfq-cgroup.c -@@ -0,0 +1,1182 @@ +@@ -0,0 +1,1186 @@ +/* + * BFQ: CGROUPS support. + * @@ -259,7 +259,9 @@ index 0000000..8610cd6 +static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) +{ + struct blkg_policy_data *pd = blkg_to_pd(blkg, &blkcg_policy_bfq); ++ + BUG_ON(!pd); ++ + return pd_to_bfqg(pd); +} + @@ -379,7 +381,8 @@ index 0000000..8610cd6 + blkg_stat_add_aux(&from->time, &from->time); + blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time); + blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); -+ blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples); ++ blkg_stat_add_aux(&to->avg_queue_size_samples, ++ &from->avg_queue_size_samples); + blkg_stat_add_aux(&to->dequeue, &from->dequeue); + blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time); + blkg_stat_add_aux(&to->idle_time, &from->idle_time); @@ -471,9 +474,9 @@ index 0000000..8610cd6 +} + +static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd) -+ { ++{ + return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL; -+ } ++} + +static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) +{ @@ -562,8 +565,8 @@ index 0000000..8610cd6 +} + +/* to be used by recursive prfill, sums live and dead rwstats recursively */ -+static struct blkg_rwstat bfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd, -+ int off) ++static struct blkg_rwstat ++bfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd, int off) +{ + struct blkg_rwstat a, b; + @@ -776,7 +779,6 @@ index 0000000..8610cd6 + + BUG_ON(!bfqq); + bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group); -+ return; +} + +/** @@ -804,8 +806,6 @@ index 0000000..8610cd6 + if (bfqg->sched_data.in_service_entity) + bfq_reparent_leaf_entity(bfqd, + bfqg->sched_data.in_service_entity); -+ -+ return; +} + +/** @@ -930,6 +930,7 @@ index 0000000..8610cd6 + bfqgd->weight = (unsigned short)val; + hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { + struct bfq_group *bfqg = blkg_to_bfqg(blkg); ++ + if (!bfqg) + continue; + /* @@ -1043,7 +1044,8 @@ index 0000000..8610cd6 + return 0; +} + -+static struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) ++static struct bfq_group * ++bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) +{ + int ret; + @@ -1051,22 +1053,22 @@ index 0000000..8610cd6 + if (ret) + return NULL; + -+ return blkg_to_bfqg(bfqd->queue->root_blkg); ++ return blkg_to_bfqg(bfqd->queue->root_blkg); +} + +static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) +{ -+ struct bfq_group_data *bgd; ++ struct bfq_group_data *bgd; + -+ bgd = kzalloc(sizeof(*bgd), GFP_KERNEL); -+ if (!bgd) -+ return NULL; -+ return &bgd->pd; ++ bgd = kzalloc(sizeof(*bgd), GFP_KERNEL); ++ if (!bgd) ++ return NULL; ++ return &bgd->pd; +} + +static void bfq_cpd_free(struct blkcg_policy_data *cpd) +{ -+ kfree(cpd_to_bfqgd(cpd)); ++ kfree(cpd_to_bfqgd(cpd)); +} + +static struct cftype bfqio_files_dfl[] = { @@ -1201,20 +1203,19 @@ index 0000000..8610cd6 +}; + +static struct blkcg_policy blkcg_policy_bfq = { -+ .dfl_cftypes = bfqio_files_dfl, -+ .legacy_cftypes = bfqio_files, ++ .dfl_cftypes = bfqio_files_dfl, ++ .legacy_cftypes = bfqio_files, + -+ .pd_alloc_fn = bfq_pd_alloc, -+ .pd_init_fn = bfq_pd_init, -+ .pd_offline_fn = bfq_pd_offline, -+ .pd_free_fn = bfq_pd_free, -+ .pd_reset_stats_fn = bfq_pd_reset_stats, -+ -+ .cpd_alloc_fn = bfq_cpd_alloc, -+ .cpd_init_fn = bfq_cpd_init, -+ .cpd_bind_fn = bfq_cpd_init, -+ .cpd_free_fn = bfq_cpd_free, ++ .pd_alloc_fn = bfq_pd_alloc, ++ .pd_init_fn = bfq_pd_init, ++ .pd_offline_fn = bfq_pd_offline, ++ .pd_free_fn = bfq_pd_free, ++ .pd_reset_stats_fn = bfq_pd_reset_stats, + ++ .cpd_alloc_fn = bfq_cpd_alloc, ++ .cpd_init_fn = bfq_cpd_init, ++ .cpd_bind_fn = bfq_cpd_init, ++ .cpd_free_fn = bfq_cpd_free, +}; + +#else @@ -1223,6 +1224,7 @@ index 0000000..8610cd6 + struct bfq_group *bfqg) +{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ + entity->weight = entity->new_weight; + entity->orig_weight = entity->new_weight; + if (bfqq) { @@ -1236,6 +1238,7 @@ index 0000000..8610cd6 +bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) +{ + struct bfq_data *bfqd = bic_to_bfqd(bic); ++ + return bfqd->root_group; +} + @@ -1257,12 +1260,13 @@ index 0000000..8610cd6 +} + +static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, -+ struct blkcg *blkcg) ++ struct blkcg *blkcg) +{ + return bfqd->root_group; +} + -+static struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) ++static struct bfq_group * ++bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) +{ + struct bfq_group *bfqg; + int i; @@ -1321,10 +1325,10 @@ index 0000000..fb7bb8f +} diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c new file mode 100644 -index 0000000..f9787a6 +index 0000000..85e2169 --- /dev/null +++ b/block/bfq-iosched.c -@@ -0,0 +1,3754 @@ +@@ -0,0 +1,3763 @@ +/* + * Budget Fair Queueing (BFQ) disk scheduler. + * @@ -1542,7 +1546,7 @@ index 0000000..f9787a6 + unsigned long back_max; +#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */ +#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */ -+ unsigned wrap = 0; /* bit mask: requests behind the disk head? */ ++ unsigned int wrap = 0; /* bit mask: requests behind the disk head? */ + + if (!rq1 || rq1 == rq2) + return rq2; @@ -1597,12 +1601,11 @@ index 0000000..f9787a6 + return rq1; + else if (d2 < d1) + return rq2; -+ else { -+ if (s1 >= s2) -+ return rq1; -+ else -+ return rq2; -+ } ++ ++ if (s1 >= s2) ++ return rq1; ++ else ++ return rq2; + + case BFQ_RQ2_WRAP: + return rq1; @@ -1889,7 +1892,7 @@ index 0000000..f9787a6 + */ + hlist_for_each_entry(bfqq_item, &bfqd->burst_list, + burst_list_node) -+ bfq_mark_bfqq_in_large_burst(bfqq_item); ++ bfq_mark_bfqq_in_large_burst(bfqq_item); + bfq_mark_bfqq_in_large_burst(bfqq); + + /* @@ -2288,7 +2291,7 @@ index 0000000..f9787a6 + bfqd->rq_in_driver++; + bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); + bfq_log(bfqd, "activate_request: new bfqd->last_position %llu", -+ (long long unsigned)bfqd->last_position); ++ (unsigned long long) bfqd->last_position); +} + +static void bfq_deactivate_request(struct request_queue *q, struct request *rq) @@ -2595,6 +2598,7 @@ index 0000000..f9787a6 +{ + struct bfq_queue *bfqq = bfqd->in_service_queue; + unsigned int timeout_coeff; ++ + if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) + timeout_coeff = 1; + else @@ -2667,6 +2671,7 @@ index 0000000..f9787a6 +static int bfq_bfqq_budget_left(struct bfq_queue *bfqq) +{ + struct bfq_entity *entity = &bfqq->entity; ++ + return entity->budget - entity->service; +} + @@ -2906,6 +2911,7 @@ index 0000000..f9787a6 + if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES && + update) { + int dev_type = blk_queue_nonrot(bfqd->queue); ++ + if (bfqd->bfq_user_max_budget == 0) { + bfqd->bfq_max_budget = + bfq_calc_max_budget(bfqd->peak_rate, @@ -3065,6 +3071,7 @@ index 0000000..f9787a6 + enum bfqq_expiration reason) +{ + bool slow; ++ + BUG_ON(bfqq != bfqd->in_service_queue); + + /* @@ -3098,7 +3105,7 @@ index 0000000..f9787a6 + } + + if (reason == BFQ_BFQQ_TOO_IDLE && -+ bfqq->entity.service <= 2 * bfqq->entity.budget / 10 ) ++ bfqq->entity.service <= 2 * bfqq->entity.budget / 10) + bfq_clear_bfqq_IO_bound(bfqq); + + if (bfqd->low_latency && bfqq->wr_coeff == 1) @@ -3244,7 +3251,7 @@ index 0000000..f9787a6 + */ + idling_boosts_thr = !bfqd->hw_tag || + (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) && -+ bfq_bfqq_idle_window(bfqq)) ; ++ bfq_bfqq_idle_window(bfqq)); + + /* + * The value of the next variable, @@ -3356,7 +3363,7 @@ index 0000000..f9787a6 + * (i) each of these processes must get the same throughput as + * the others; + * (ii) all these processes have the same I/O pattern -+ (either sequential or random). ++ * (either sequential or random). + * In fact, in such a scenario, the drive will tend to treat + * the requests of each of these processes in about the same + * way as the requests of the others, and thus to provide @@ -3553,6 +3560,7 @@ index 0000000..f9787a6 +static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) +{ + struct bfq_entity *entity = &bfqq->entity; ++ + if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ + bfq_log_bfqq(bfqd, bfqq, + "raising period dur %u/%u msec, old coeff %u, w %d(%d)", @@ -3643,7 +3651,7 @@ index 0000000..f9787a6 + bfq_log_bfqq(bfqd, bfqq, + "dispatched %u sec req (%llu), budg left %d", + blk_rq_sectors(rq), -+ (long long unsigned)blk_rq_pos(rq), ++ (unsigned long long) blk_rq_pos(rq), + bfq_bfqq_budget_left(bfqq)); + + dispatched++; @@ -3841,7 +3849,8 @@ index 0000000..f9787a6 + * Update the entity prio values; note that the new values will not + * be used until the next (re)activation. + */ -+static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) ++static void ++bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) +{ + struct task_struct *tsk = current; + int ioprio_class; @@ -3874,8 +3883,8 @@ index 0000000..f9787a6 + } + + if (bfqq->new_ioprio < 0 || bfqq->new_ioprio >= IOPRIO_BE_NR) { -+ printk(KERN_CRIT "bfq_set_next_ioprio_data: new_ioprio %d\n", -+ bfqq->new_ioprio); ++ pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n", ++ bfqq->new_ioprio); + BUG(); + } + @@ -3999,7 +4008,7 @@ index 0000000..f9787a6 + + if (bfqq) { + bfq_init_bfqq(bfqd, bfqq, bic, current->pid, -+ is_sync); ++ is_sync); + bfq_init_entity(&bfqq->entity, bfqg); + bfq_log_bfqq(bfqd, bfqq, "allocated"); + } else { @@ -4187,7 +4196,7 @@ index 0000000..f9787a6 + bfq_log_bfqq(bfqd, bfqq, + "rq_enqueued: idle_window=%d (seeky %d, mean %llu)", + bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq), -+ (long long unsigned)bfqq->seek_mean); ++ (unsigned long long) bfqq->seek_mean); + + bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); + @@ -4738,8 +4747,7 @@ index 0000000..f9787a6 + +static void bfq_slab_kill(void) +{ -+ if (bfq_pool) -+ kmem_cache_destroy(bfq_pool); ++ kmem_cache_destroy(bfq_pool); +} + +static int __init bfq_slab_setup(void) @@ -4770,6 +4778,7 @@ index 0000000..f9787a6 +static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page) +{ + struct bfq_data *bfqd = e->elevator_data; ++ + return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ? + jiffies_to_msecs(bfqd->bfq_wr_max_time) : + jiffies_to_msecs(bfq_wr_duration(bfqd))); @@ -4788,25 +4797,29 @@ index 0000000..f9787a6 + + num_char += sprintf(page + num_char, "Active:\n"); + list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) { -+ num_char += sprintf(page + num_char, -+ "pid%d: weight %hu, nr_queued %d %d, dur %d/%u\n", -+ bfqq->pid, -+ bfqq->entity.weight, -+ bfqq->queued[0], -+ bfqq->queued[1], -+ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); ++ num_char += sprintf(page + num_char, ++ "pid%d: weight %hu, nr_queued %d %d, ", ++ bfqq->pid, ++ bfqq->entity.weight, ++ bfqq->queued[0], ++ bfqq->queued[1]); ++ num_char += sprintf(page + num_char, ++ "dur %d/%u\n", ++ jiffies_to_msecs( ++ jiffies - ++ bfqq->last_wr_start_finish), ++ jiffies_to_msecs(bfqq->wr_cur_max_time)); + } + + num_char += sprintf(page + num_char, "Idle:\n"); + list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) { -+ num_char += sprintf(page + num_char, -+ "pid%d: weight %hu, dur %d/%u\n", -+ bfqq->pid, -+ bfqq->entity.weight, -+ jiffies_to_msecs(jiffies - -+ bfqq->last_wr_start_finish), -+ jiffies_to_msecs(bfqq->wr_cur_max_time)); ++ num_char += sprintf(page + num_char, ++ "pid%d: weight %hu, dur %d/%u\n", ++ bfqq->pid, ++ bfqq->entity.weight, ++ jiffies_to_msecs(jiffies - ++ bfqq->last_wr_start_finish), ++ jiffies_to_msecs(bfqq->wr_cur_max_time)); + } + + spin_unlock_irq(bfqd->queue->queue_lock); @@ -5081,10 +5094,10 @@ index 0000000..f9787a6 +MODULE_LICENSE("GPL"); diff --git a/block/bfq-sched.c b/block/bfq-sched.c new file mode 100644 -index 0000000..a64fec1 +index 0000000..a5ed694 --- /dev/null +++ b/block/bfq-sched.c -@@ -0,0 +1,1200 @@ +@@ -0,0 +1,1199 @@ +/* + * BFQ: Hierarchical B-WF2Q+ scheduler. + * @@ -5715,8 +5728,7 @@ index 0000000..a64fec1 + if (entity->new_weight != entity->orig_weight) { + if (entity->new_weight < BFQ_MIN_WEIGHT || + entity->new_weight > BFQ_MAX_WEIGHT) { -+ printk(KERN_CRIT "update_weight_prio: " -+ "new_weight %d\n", ++ pr_crit("update_weight_prio: new_weight %d\n", + entity->new_weight); + BUG(); + } @@ -6287,7 +6299,7 @@ index 0000000..a64fec1 +} diff --git a/block/bfq.h b/block/bfq.h new file mode 100644 -index 0000000..485d0c9 +index 0000000..2bf54ae --- /dev/null +++ b/block/bfq.h @@ -0,0 +1,801 @@ @@ -6722,10 +6734,10 @@ index 0000000..485d0c9 + * @last_ins_in_burst. + * @burst_size: number of queues in the current burst of queue activations. + * @bfq_large_burst_thresh: maximum burst size above which the current -+ * queue-activation burst is deemed as 'large'. ++ * queue-activation burst is deemed as 'large'. + * @large_burst: true if a large queue-activation burst is in progress. + * @burst_list: head of the burst list (as for the above fields, more details -+ * in the comments to the function bfq_handle_burst). ++ * in the comments to the function bfq_handle_burst). + * @low_latency: if set to true, low-latency heuristics are enabled. + * @bfq_wr_coeff: maximum factor by which the weight of a weight-raised + * queue is multiplied. @@ -7093,5 +7105,5 @@ index 0000000..485d0c9 + +#endif /* _BFQ_H */ -- -1.9.1 +2.10.0 diff --git a/patches/0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r11-for.patch b/patches/0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r11-for.patch index a9876aa..28eeb1f 100644 --- a/patches/0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r11-for.patch +++ b/patches/0003-block-bfq-add-Early-Queue-Merge-EQM-to-BFQ-v7r11-for.patch @@ -1,8 +1,8 @@ -From 47de1e46ef5f462e9694e5b0607aec6ad658f1e0 Mon Sep 17 00:00:00 2001 +From e4d9bed2dfdec562b23491e44602c89c4a2a5ea4 Mon Sep 17 00:00:00 2001 From: Mauro Andreolini Date: Sun, 6 Sep 2015 16:09:05 +0200 Subject: [PATCH 3/4] block, bfq: add Early Queue Merge (EQM) to BFQ-v7r11 for - 4.7.0 + 4.10.0 A set of processes may happen to perform interleaved reads, i.e.,requests whose union would give rise to a sequential read pattern. There are two @@ -35,16 +35,16 @@ Signed-off-by: Arianna Avanzini Signed-off-by: Paolo Valente Signed-off-by: Linus Walleij --- - block/bfq-cgroup.c | 4 + - block/bfq-iosched.c | 687 ++++++++++++++++++++++++++++++++++++++++++++++++++-- + block/bfq-cgroup.c | 5 + + block/bfq-iosched.c | 685 +++++++++++++++++++++++++++++++++++++++++++++++++++- block/bfq.h | 66 +++++ - 3 files changed, 743 insertions(+), 14 deletions(-) + 3 files changed, 743 insertions(+), 13 deletions(-) diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c -index 8610cd6..5ee99ec 100644 +index 8b08a57..0367996 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c -@@ -437,6 +437,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd) +@@ -440,6 +440,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd) */ bfqg->bfqd = bfqd; bfqg->active_entities = 0; @@ -52,16 +52,17 @@ index 8610cd6..5ee99ec 100644 } static void bfq_pd_free(struct blkg_policy_data *pd) -@@ -530,6 +531,8 @@ static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, +@@ -533,6 +534,9 @@ static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, return bfqg; } -+static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq); ++static void bfq_pos_tree_add_move(struct bfq_data *bfqd, ++ struct bfq_queue *bfqq); + /** * bfq_bfqq_move - migrate @bfqq to @bfqg. * @bfqd: queue descriptor. -@@ -577,6 +580,7 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -580,6 +584,7 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqg_get(bfqg); if (busy) { @@ -70,10 +71,10 @@ index 8610cd6..5ee99ec 100644 bfq_activate_bfqq(bfqd, bfqq); } diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c -index f9787a6..d1f648d 100644 +index 85e2169..cf3e9b1 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c -@@ -296,6 +296,72 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd, +@@ -295,6 +295,72 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd, } } @@ -112,7 +113,7 @@ index f9787a6..d1f648d 100644 + *rb_link = p; + + bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d", -+ (long long unsigned)sector, ++ (unsigned long long) sector, + bfqq ? bfqq->pid : 0); + + return bfqq; @@ -146,11 +147,11 @@ index f9787a6..d1f648d 100644 /* * Tell whether there are active queues or groups with differentiated weights. */ -@@ -528,6 +594,57 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd) +@@ -527,6 +593,57 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd) return dur; } -+static unsigned bfq_bfqq_cooperations(struct bfq_queue *bfqq) ++static unsigned int bfq_bfqq_cooperations(struct bfq_queue *bfqq) +{ + return bfqq->bic ? bfqq->bic->cooperations : 0; +} @@ -204,7 +205,7 @@ index f9787a6..d1f648d 100644 /* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */ static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) { -@@ -764,8 +881,14 @@ static void bfq_add_request(struct request *rq) +@@ -763,8 +880,14 @@ static void bfq_add_request(struct request *rq) BUG_ON(!next_rq); bfqq->next_rq = next_rq; @@ -220,7 +221,7 @@ index f9787a6..d1f648d 100644 idle_for_long_time = time_is_before_jiffies( bfqq->budget_timeout + bfqd->bfq_wr_min_idle_time); -@@ -793,11 +916,12 @@ static void bfq_add_request(struct request *rq) +@@ -792,11 +915,12 @@ static void bfq_add_request(struct request *rq) bfqd->last_ins_in_burst = jiffies; } @@ -236,7 +237,7 @@ index f9787a6..d1f648d 100644 entity->budget = max_t(unsigned long, bfqq->max_budget, bfq_serv_to_charge(next_rq, bfqq)); -@@ -816,6 +940,9 @@ static void bfq_add_request(struct request *rq) +@@ -815,6 +939,9 @@ static void bfq_add_request(struct request *rq) if (!bfqd->low_latency) goto add_bfqq_busy; @@ -246,7 +247,7 @@ index f9787a6..d1f648d 100644 /* * If the queue: * - is not being boosted, -@@ -840,7 +967,7 @@ static void bfq_add_request(struct request *rq) +@@ -839,7 +966,7 @@ static void bfq_add_request(struct request *rq) } else if (old_wr_coeff > 1) { if (interactive) bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); @@ -255,7 +256,7 @@ index f9787a6..d1f648d 100644 (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && !soft_rt)) { -@@ -905,6 +1032,7 @@ static void bfq_add_request(struct request *rq) +@@ -904,6 +1031,7 @@ static void bfq_add_request(struct request *rq) bfqd->bfq_wr_rt_max_time; } } @@ -263,7 +264,7 @@ index f9787a6..d1f648d 100644 if (old_wr_coeff != bfqq->wr_coeff) entity->prio_changed = 1; add_bfqq_busy: -@@ -1047,6 +1175,15 @@ static void bfq_merged_request(struct request_queue *q, struct request *req, +@@ -1046,6 +1174,15 @@ static void bfq_merged_request(struct request_queue *q, struct request *req, bfqd->last_position); BUG_ON(!next_rq); bfqq->next_rq = next_rq; @@ -279,7 +280,7 @@ index f9787a6..d1f648d 100644 } } -@@ -1129,11 +1266,346 @@ static void bfq_end_wr(struct bfq_data *bfqd) +@@ -1128,11 +1265,346 @@ static void bfq_end_wr(struct bfq_data *bfqd) spin_unlock_irq(bfqd->queue->queue_lock); } @@ -572,7 +573,7 @@ index f9787a6..d1f648d 100644 + struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) +{ + bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", -+ (long unsigned)new_bfqq->pid); ++ (unsigned long) new_bfqq->pid); + /* Save weight raising and idle window of the merged queues */ + bfq_bfqq_save_state(bfqq); + bfq_bfqq_save_state(new_bfqq); @@ -626,7 +627,7 @@ index f9787a6..d1f648d 100644 /* * Disallow merge of a sync bio into an async request. -@@ -1150,7 +1622,26 @@ static int bfq_allow_merge(struct request_queue *q, struct request *rq, +@@ -1149,7 +1621,26 @@ static int bfq_allow_merge(struct request_queue *q, struct request *rq, if (!bic) return 0; @@ -654,7 +655,7 @@ index f9787a6..d1f648d 100644 } static void __bfq_set_in_service_queue(struct bfq_data *bfqd, -@@ -1349,6 +1840,15 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -1350,6 +1841,15 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) __bfq_bfqd_reset_in_service(bfqd); @@ -670,7 +671,7 @@ index f9787a6..d1f648d 100644 if (RB_EMPTY_ROOT(&bfqq->sort_list)) { /* * Overloading budget_timeout field to store the time -@@ -1357,8 +1857,13 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -1358,8 +1858,13 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) */ bfqq->budget_timeout = jiffies; bfq_del_bfqq_busy(bfqd, bfqq, 1); @@ -685,7 +686,7 @@ index f9787a6..d1f648d 100644 } /** -@@ -2242,10 +2747,12 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -2246,10 +2751,12 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) /* * If the queue was activated in a burst, or * too much time has elapsed from the beginning @@ -700,7 +701,7 @@ index f9787a6..d1f648d 100644 time_is_before_jiffies(bfqq->last_wr_start_finish + bfqq->wr_cur_max_time)) { bfqq->last_wr_start_finish = jiffies; -@@ -2474,6 +2981,25 @@ static void bfq_put_queue(struct bfq_queue *bfqq) +@@ -2478,6 +2985,25 @@ static void bfq_put_queue(struct bfq_queue *bfqq) #endif } @@ -726,7 +727,7 @@ index f9787a6..d1f648d 100644 static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) { if (bfqq == bfqd->in_service_queue) { -@@ -2484,6 +3010,8 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -2488,6 +3014,8 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, atomic_read(&bfqq->ref)); @@ -735,7 +736,7 @@ index f9787a6..d1f648d 100644 bfq_put_queue(bfqq); } -@@ -2492,6 +3020,25 @@ static void bfq_init_icq(struct io_cq *icq) +@@ -2496,6 +3024,25 @@ static void bfq_init_icq(struct io_cq *icq) struct bfq_io_cq *bic = icq_to_bic(icq); bic->ttime.last_end_request = jiffies; @@ -761,7 +762,7 @@ index f9787a6..d1f648d 100644 } static void bfq_exit_icq(struct io_cq *icq) -@@ -2505,6 +3052,13 @@ static void bfq_exit_icq(struct io_cq *icq) +@@ -2509,6 +3056,13 @@ static void bfq_exit_icq(struct io_cq *icq) } if (bic->bfqq[BLK_RW_SYNC]) { @@ -775,7 +776,7 @@ index f9787a6..d1f648d 100644 bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]); bic->bfqq[BLK_RW_SYNC] = NULL; } -@@ -2809,6 +3363,10 @@ static void bfq_update_idle_window(struct bfq_data *bfqd, +@@ -2814,6 +3368,10 @@ static void bfq_update_idle_window(struct bfq_data *bfqd, if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq)) return; @@ -786,7 +787,7 @@ index f9787a6..d1f648d 100644 enable_idle = bfq_bfqq_idle_window(bfqq); if (atomic_read(&bic->icq.ioc->active_ref) == 0 || -@@ -2856,6 +3414,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -2861,6 +3419,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 || !BFQQ_SEEKY(bfqq)) bfq_update_idle_window(bfqd, bfqq, bic); @@ -794,7 +795,7 @@ index f9787a6..d1f648d 100644 bfq_log_bfqq(bfqd, bfqq, "rq_enqueued: idle_window=%d (seeky %d, mean %llu)", -@@ -2920,12 +3479,47 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -2925,12 +3484,47 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, static void bfq_insert_request(struct request_queue *q, struct request *rq) { struct bfq_data *bfqd = q->elevator->elevator_data; @@ -843,7 +844,7 @@ index f9787a6..d1f648d 100644 rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; list_add_tail(&rq->queuelist, &bfqq->fifo); -@@ -3094,6 +3688,32 @@ static void bfq_put_request(struct request *rq) +@@ -3099,6 +3693,32 @@ static void bfq_put_request(struct request *rq) } /* @@ -876,7 +877,7 @@ index f9787a6..d1f648d 100644 * Allocate bfq data structures associated with this request. */ static int bfq_set_request(struct request_queue *q, struct request *rq, -@@ -3105,6 +3725,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, +@@ -3110,6 +3730,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, const int is_sync = rq_is_sync(rq); struct bfq_queue *bfqq; unsigned long flags; @@ -884,7 +885,7 @@ index f9787a6..d1f648d 100644 might_sleep_if(gfpflags_allow_blocking(gfp_mask)); -@@ -3117,15 +3738,30 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, +@@ -3122,15 +3743,30 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, bfq_bic_update_cgroup(bic, bio); @@ -900,12 +901,11 @@ index f9787a6..d1f648d 100644 + bic->saved_in_large_burst) bfq_mark_bfqq_in_large_burst(bfqq); - else -- bfq_clear_bfqq_in_large_burst(bfqq); + else { -+ bfq_clear_bfqq_in_large_burst(bfqq); -+ if (bic->was_in_burst_list) -+ hlist_add_head(&bfqq->burst_list_node, -+ &bfqd->burst_list); + bfq_clear_bfqq_in_large_burst(bfqq); ++ if (bic->was_in_burst_list) ++ hlist_add_head(&bfqq->burst_list_node, ++ &bfqd->burst_list); + } + } + } else { @@ -919,7 +919,7 @@ index f9787a6..d1f648d 100644 } } -@@ -3137,6 +3773,26 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, +@@ -3142,6 +3778,26 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, rq->elv.priv[0] = bic; rq->elv.priv[1] = bfqq; @@ -946,7 +946,7 @@ index f9787a6..d1f648d 100644 spin_unlock_irqrestore(q->queue_lock, flags); return 0; -@@ -3290,6 +3946,7 @@ static void bfq_init_root_group(struct bfq_group *root_group, +@@ -3295,6 +3951,7 @@ static void bfq_init_root_group(struct bfq_group *root_group, root_group->my_entity = NULL; root_group->bfqd = bfqd; #endif @@ -954,7 +954,7 @@ index f9787a6..d1f648d 100644 for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; } -@@ -3370,6 +4027,8 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) +@@ -3375,6 +4032,8 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async; bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync; @@ -964,7 +964,7 @@ index f9787a6..d1f648d 100644 bfqd->bfq_large_burst_thresh = 11; diff --git a/block/bfq.h b/block/bfq.h -index 485d0c9..f73c942 100644 +index 2bf54ae..fcce855 100644 --- a/block/bfq.h +++ b/block/bfq.h @@ -183,6 +183,8 @@ struct bfq_group; @@ -1097,5 +1097,5 @@ index 485d0c9..f73c942 100644 static void bfq_put_queue(struct bfq_queue *bfqq); static void bfq_dispatch_insert(struct request_queue *q, struct request *rq); -- -1.9.1 +2.10.0 diff --git a/patches/0004-block-bfq-turn-BFQ-v7r11-for-4.7.0-into-BFQ-v8r3-for.patch b/patches/0004-Turn-BFQ-v7r11-for-4.10.0-into-BFQ-v8r8-for-4.10.0.patch similarity index 60% rename from patches/0004-block-bfq-turn-BFQ-v7r11-for-4.7.0-into-BFQ-v8r3-for.patch rename to patches/0004-Turn-BFQ-v7r11-for-4.10.0-into-BFQ-v8r8-for-4.10.0.patch index bf56ac7..48e64d9 100644 --- a/patches/0004-block-bfq-turn-BFQ-v7r11-for-4.7.0-into-BFQ-v8r3-for.patch +++ b/patches/0004-Turn-BFQ-v7r11-for-4.10.0-into-BFQ-v8r8-for-4.10.0.patch @@ -1,22 +1,588 @@ -From d384ccf796a992e27691b7359ce54534db57e74c Mon Sep 17 00:00:00 2001 +From b782bbfcb5e08e92c0448d0c6a870b44db198837 Mon Sep 17 00:00:00 2001 From: Paolo Valente -Date: Tue, 17 May 2016 08:28:04 +0200 -Subject: [PATCH 4/4] block, bfq: turn BFQ-v7r11 for 4.7.0 into BFQ-v8r3 for - 4.7.0 +Date: Mon, 16 May 2016 11:16:17 +0200 +Subject: [PATCH 4/4] Turn BFQ-v7r11 for 4.10.0 into BFQ-v8r8 for 4.10.0 +Signed-off-by: Paolo Valente --- - block/Kconfig.iosched | 2 +- - block/bfq-cgroup.c | 480 +++++---- - block/bfq-iosched.c | 2602 +++++++++++++++++++++++++++++-------------------- - block/bfq-sched.c | 441 +++++++-- - block/bfq.h | 708 +++++++------- - 5 files changed, 2484 insertions(+), 1749 deletions(-) + Documentation/block/00-INDEX | 2 + + Documentation/block/bfq-iosched.txt | 530 ++++++ + block/Kconfig.iosched | 18 +- + block/bfq-cgroup.c | 510 +++--- + block/bfq-iosched.c | 3414 ++++++++++++++++++++++------------- + block/bfq-sched.c | 1290 ++++++++++--- + block/bfq.h | 800 ++++---- + 7 files changed, 4390 insertions(+), 2174 deletions(-) + create mode 100644 Documentation/block/bfq-iosched.txt +diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX +index e55103a..8d55b4b 100644 +--- a/Documentation/block/00-INDEX ++++ b/Documentation/block/00-INDEX +@@ -1,5 +1,7 @@ + 00-INDEX + - This file ++bfq-iosched.txt ++ - BFQ IO scheduler and its tunables + biodoc.txt + - Notes on the Generic Block Layer Rewrite in Linux 2.5 + biovecs.txt +diff --git a/Documentation/block/bfq-iosched.txt b/Documentation/block/bfq-iosched.txt +new file mode 100644 +index 0000000..13b5248 +--- /dev/null ++++ b/Documentation/block/bfq-iosched.txt +@@ -0,0 +1,530 @@ ++BFQ (Budget Fair Queueing) ++========================== ++ ++BFQ is a proportional-share I/O scheduler, with some extra ++low-latency capabilities. In addition to cgroups support (blkio or io ++controllers), BFQ's main features are: ++- BFQ guarantees a high system and application responsiveness, and a ++ low latency for time-sensitive applications, such as audio or video ++ players; ++- BFQ distributes bandwidth, and not just time, among processes or ++ groups (switching back to time distribution when needed to keep ++ throughput high). ++ ++On average CPUs, the current version of BFQ can handle devices ++performing at most ~30K IOPS; at most ~50 KIOPS on faster CPUs. As a ++reference, 30-50 KIOPS correspond to very high bandwidths with ++sequential I/O (e.g., 8-12 GB/s if I/O requests are 256 KB large), and ++to 120-200 MB/s with 4KB random I/O. ++ ++The table of contents follow. Impatients can just jump to Section 3. ++ ++CONTENTS ++ ++1. When may BFQ be useful? ++ 1-1 Personal systems ++ 1-2 Server systems ++2. How does BFQ work? ++3. What are BFQ's tunable? ++4. BFQ group scheduling ++ 4-1 Service guarantees provided ++ 4-2 Interface ++ ++1. When may BFQ be useful? ++========================== ++ ++BFQ provides the following benefits on personal and server systems. ++ ++1-1 Personal systems ++-------------------- ++ ++Low latency for interactive applications ++ ++Regardless of the actual background workload, BFQ guarantees that, for ++interactive tasks, the storage device is virtually as responsive as if ++it was idle. For example, even if one or more of the following ++background workloads are being executed: ++- one or more large files are being read, written or copied, ++- a tree of source files is being compiled, ++- one or more virtual machines are performing I/O, ++- a software update is in progress, ++- indexing daemons are scanning filesystems and updating their ++ databases, ++starting an application or loading a file from within an application ++takes about the same time as if the storage device was idle. As a ++comparison, with CFQ, NOOP or DEADLINE, and in the same conditions, ++applications experience high latencies, or even become unresponsive ++until the background workload terminates (also on SSDs). ++ ++Low latency for soft real-time applications ++ ++Also soft real-time applications, such as audio and video ++players/streamers, enjoy a low latency and a low drop rate, regardless ++of the background I/O workload. As a consequence, these applications ++do not suffer from almost any glitch due to the background workload. ++ ++Higher speed for code-development tasks ++ ++If some additional workload happens to be executed in parallel, then ++BFQ executes the I/O-related components of typical code-development ++tasks (compilation, checkout, merge, ...) much more quickly than CFQ, ++NOOP or DEADLINE. ++ ++High throughput ++ ++On hard disks, BFQ achieves up to 30% higher throughput than CFQ, and ++up to 150% higher throughput than DEADLINE and NOOP, with all the ++sequential workloads considered in our tests. With random workloads, ++and with all the workloads on flash-based devices, BFQ achieves, ++instead, about the same throughput as the other schedulers. ++ ++Strong fairness, bandwidth and delay guarantees ++ ++BFQ distributes the device throughput, and not just the device time, ++among I/O-bound applications in proportion their weights, with any ++workload and regardless of the device parameters. From these bandwidth ++guarantees, it is possible to compute tight per-I/O-request delay ++guarantees by a simple formula. If not configured for strict service ++guarantees, BFQ switches to time-based resource sharing (only) for ++applications that would otherwise cause a throughput loss. ++ ++1-2 Server systems ++------------------ ++ ++Most benefits for server systems follow from the same service ++properties as above. In particular, regardless of whether additional, ++possibly heavy workloads are being served, BFQ guarantees: ++ ++. audio and video-streaming with zero or very low jitter and drop ++ rate; ++ ++. fast retrieval of WEB pages and embedded objects; ++ ++. real-time recording of data in live-dumping applications (e.g., ++ packet logging); ++ ++. responsiveness in local and remote access to a server. ++ ++ ++2. How does BFQ work? ++===================== ++ ++BFQ is a proportional-share I/O scheduler, whose general structure, ++plus a lot of code, are borrowed from CFQ. ++ ++- Each process doing I/O on a device is associated with a weight and a ++ (bfq_)queue. ++ ++- BFQ grants exclusive access to the device, for a while, to one queue ++ (process) at a time, and implements this service model by ++ associating every queue with a budget, measured in number of ++ sectors. ++ ++ - After a queue is granted access to the device, the budget of the ++ queue is decremented, on each request dispatch, by the size of the ++ request. ++ ++ - The in-service queue is expired, i.e., its service is suspended, ++ only if one of the following events occurs: 1) the queue finishes ++ its budget, 2) the queue empties, 3) a "budget timeout" fires. ++ ++ - The budget timeout prevents processes doing random I/O from ++ holding the device for too long and dramatically reducing ++ throughput. ++ ++ - Actually, as in CFQ, a queue associated with a process issuing ++ sync requests may not be expired immediately when it empties. In ++ contrast, BFQ may idle the device for a short time interval, ++ giving the process the chance to go on being served if it issues ++ a new request in time. Device idling typically boosts the ++ throughput on rotational devices, if processes do synchronous ++ and sequential I/O. In addition, under BFQ, device idling is ++ also instrumental in guaranteeing the desired throughput ++ fraction to processes issuing sync requests (see the description ++ of the slice_idle tunable in this document, or [1, 2], for more ++ details). ++ ++ - With respect to idling for service guarantees, if several ++ processes are competing for the device at the same time, but ++ all processes (and groups, after the following commit) have ++ the same weight, then BFQ guarantees the expected throughput ++ distribution without ever idling the device. Throughput is ++ thus as high as possible in this common scenario. ++ ++ - If low-latency mode is enabled (default configuration), BFQ ++ executes some special heuristics to detect interactive and soft ++ real-time applications (e.g., video or audio players/streamers), ++ and to reduce their latency. The most important action taken to ++ achieve this goal is to give to the queues associated with these ++ applications more than their fair share of the device ++ throughput. For brevity, we call just "weight-raising" the whole ++ sets of actions taken by BFQ to privilege these queues. In ++ particular, BFQ provides a milder form of weight-raising for ++ interactive applications, and a stronger form for soft real-time ++ applications. ++ ++ - BFQ automatically deactivates idling for queues born in a burst of ++ queue creations. In fact, these queues are usually associated with ++ the processes of applications and services that benefit mostly ++ from a high throughput. Examples are systemd during boot, or git ++ grep. ++ ++ - As CFQ, BFQ merges queues performing interleaved I/O, i.e., ++ performing random I/O that becomes mostly sequential if ++ merged. Differently from CFQ, BFQ achieves this goal with a more ++ reactive mechanism, called Early Queue Merge (EQM). EQM is so ++ responsive in detecting interleaved I/O (cooperating processes), ++ that it enables BFQ to achieve a high throughput, by queue ++ merging, even for queues for which CFQ needs a different ++ mechanism, preemption, to get a high throughput. As such EQM is a ++ unified mechanism to achieve a high throughput with interleaved ++ I/O. ++ ++ - Queues are scheduled according to a variant of WF2Q+, named ++ B-WF2Q+, and implemented using an augmented rb-tree to preserve an ++ O(log N) overall complexity. See [2] for more details. B-WF2Q+ is ++ also ready for hierarchical scheduling. However, for a cleaner ++ logical breakdown, the code that enables and completes ++ hierarchical support is provided in the next commit, which focuses ++ exactly on this feature. ++ ++ - B-WF2Q+ guarantees a tight deviation with respect to an ideal, ++ perfectly fair, and smooth service. In particular, B-WF2Q+ ++ guarantees that each queue receives a fraction of the device ++ throughput proportional to its weight, even if the throughput ++ fluctuates, and regardless of: the device parameters, the current ++ workload and the budgets assigned to the queue. ++ ++ - The last, budget-independence, property (although probably ++ counterintuitive in the first place) is definitely beneficial, for ++ the following reasons: ++ ++ - First, with any proportional-share scheduler, the maximum ++ deviation with respect to an ideal service is proportional to ++ the maximum budget (slice) assigned to queues. As a consequence, ++ BFQ can keep this deviation tight not only because of the ++ accurate service of B-WF2Q+, but also because BFQ *does not* ++ need to assign a larger budget to a queue to let the queue ++ receive a higher fraction of the device throughput. ++ ++ - Second, BFQ is free to choose, for every process (queue), the ++ budget that best fits the needs of the process, or best ++ leverages the I/O pattern of the process. In particular, BFQ ++ updates queue budgets with a simple feedback-loop algorithm that ++ allows a high throughput to be achieved, while still providing ++ tight latency guarantees to time-sensitive applications. When ++ the in-service queue expires, this algorithm computes the next ++ budget of the queue so as to: ++ ++ - Let large budgets be eventually assigned to the queues ++ associated with I/O-bound applications performing sequential ++ I/O: in fact, the longer these applications are served once ++ got access to the device, the higher the throughput is. ++ ++ - Let small budgets be eventually assigned to the queues ++ associated with time-sensitive applications (which typically ++ perform sporadic and short I/O), because, the smaller the ++ budget assigned to a queue waiting for service is, the sooner ++ B-WF2Q+ will serve that queue (Subsec 3.3 in [2]). ++ ++- If several processes are competing for the device at the same time, ++ but all processes and groups have the same weight, then BFQ ++ guarantees the expected throughput distribution without ever idling ++ the device. It uses preemption instead. Throughput is then much ++ higher in this common scenario. ++ ++- ioprio classes are served in strict priority order, i.e., ++ lower-priority queues are not served as long as there are ++ higher-priority queues. Among queues in the same class, the ++ bandwidth is distributed in proportion to the weight of each ++ queue. A very thin extra bandwidth is however guaranteed to ++ the Idle class, to prevent it from starving. ++ ++ ++3. What are BFQ's tunable? ++========================== ++ ++The tunables back_seek-max, back_seek_penalty, fifo_expire_async and ++fifo_expire_sync below are the same as in CFQ. Their description is ++just copied from that for CFQ. Some considerations in the description ++of slice_idle are copied from CFQ too. ++ ++per-process ioprio and weight ++----------------------------- ++ ++Unless the cgroups interface is used (see "4. BFQ group scheduling"), ++weights can be assigned to processes only indirectly, through I/O ++priorities, and according to the relation: ++weight = (IOPRIO_BE_NR - ioprio) * 10. ++ ++Beware that, if low-latency is set, then BFQ automatically raises the ++weight of the queues associated with interactive and soft real-time ++applications. Unset this tunable if you need/want to control weights. ++ ++slice_idle ++---------- ++ ++This parameter specifies how long BFQ should idle for next I/O ++request, when certain sync BFQ queues become empty. By default ++slice_idle is a non-zero value. Idling has a double purpose: boosting ++throughput and making sure that the desired throughput distribution is ++respected (see the description of how BFQ works, and, if needed, the ++papers referred there). ++ ++As for throughput, idling can be very helpful on highly seeky media ++like single spindle SATA/SAS disks where we can cut down on overall ++number of seeks and see improved throughput. ++ ++Setting slice_idle to 0 will remove all the idling on queues and one ++should see an overall improved throughput on faster storage devices ++like multiple SATA/SAS disks in hardware RAID configuration. ++ ++So depending on storage and workload, it might be useful to set ++slice_idle=0. In general for SATA/SAS disks and software RAID of ++SATA/SAS disks keeping slice_idle enabled should be useful. For any ++configurations where there are multiple spindles behind single LUN ++(Host based hardware RAID controller or for storage arrays), setting ++slice_idle=0 might end up in better throughput and acceptable ++latencies. ++ ++Idling is however necessary to have service guarantees enforced in ++case of differentiated weights or differentiated I/O-request lengths. ++To see why, suppose that a given BFQ queue A must get several I/O ++requests served for each request served for another queue B. Idling ++ensures that, if A makes a new I/O request slightly after becoming ++empty, then no request of B is dispatched in the middle, and thus A ++does not lose the possibility to get more than one request dispatched ++before the next request of B is dispatched. Note that idling ++guarantees the desired differentiated treatment of queues only in ++terms of I/O-request dispatches. To guarantee that the actual service ++order then corresponds to the dispatch order, the strict_guarantees ++tunable must be set too. ++ ++There is an important flipside for idling: apart from the above cases ++where it is beneficial also for throughput, idling can severely impact ++throughput. One important case is random workload. Because of this ++issue, BFQ tends to avoid idling as much as possible, when it is not ++beneficial also for throughput. As a consequence of this behavior, and ++of further issues described for the strict_guarantees tunable, ++short-term service guarantees may be occasionally violated. And, in ++some cases, these guarantees may be more important than guaranteeing ++maximum throughput. For example, in video playing/streaming, a very ++low drop rate may be more important than maximum throughput. In these ++cases, consider setting the strict_guarantees parameter. ++ ++strict_guarantees ++----------------- ++ ++If this parameter is set (default: unset), then BFQ ++ ++- always performs idling when the in-service queue becomes empty; ++ ++- forces the device to serve one I/O request at a time, by dispatching a ++ new request only if there is no outstanding request. ++ ++In the presence of differentiated weights or I/O-request sizes, both ++the above conditions are needed to guarantee that every BFQ queue ++receives its allotted share of the bandwidth. The first condition is ++needed for the reasons explained in the description of the slice_idle ++tunable. The second condition is needed because all modern storage ++devices reorder internally-queued requests, which may trivially break ++the service guarantees enforced by the I/O scheduler. ++ ++Setting strict_guarantees may evidently affect throughput. ++ ++back_seek_max ++------------- ++ ++This specifies, given in Kbytes, the maximum "distance" for backward seeking. ++The distance is the amount of space from the current head location to the ++sectors that are backward in terms of distance. ++ ++This parameter allows the scheduler to anticipate requests in the "backward" ++direction and consider them as being the "next" if they are within this ++distance from the current head location. ++ ++back_seek_penalty ++----------------- ++ ++This parameter is used to compute the cost of backward seeking. If the ++backward distance of request is just 1/back_seek_penalty from a "front" ++request, then the seeking cost of two requests is considered equivalent. ++ ++So scheduler will not bias toward one or the other request (otherwise scheduler ++will bias toward front request). Default value of back_seek_penalty is 2. ++ ++fifo_expire_async ++----------------- ++ ++This parameter is used to set the timeout of asynchronous requests. Default ++value of this is 248ms. ++ ++fifo_expire_sync ++---------------- ++ ++This parameter is used to set the timeout of synchronous requests. Default ++value of this is 124ms. In case to favor synchronous requests over asynchronous ++one, this value should be decreased relative to fifo_expire_async. ++ ++low_latency ++----------- ++ ++This parameter is used to enable/disable BFQ's low latency mode. By ++default, low latency mode is enabled. If enabled, interactive and soft ++real-time applications are privileged and experience a lower latency, ++as explained in more detail in the description of how BFQ works. ++ ++DO NOT enable this mode if you need full control on bandwidth ++distribution. In fact, if it is enabled, then BFQ automatically ++increases the bandwidth share of privileged applications, as the main ++means to guarantee a lower latency to them. ++ ++timeout_sync ++------------ ++ ++Maximum amount of device time that can be given to a task (queue) once ++it has been selected for service. On devices with costly seeks, ++increasing this time usually increases maximum throughput. On the ++opposite end, increasing this time coarsens the granularity of the ++short-term bandwidth and latency guarantees, especially if the ++following parameter is set to zero. ++ ++max_budget ++---------- ++ ++Maximum amount of service, measured in sectors, that can be provided ++to a BFQ queue once it is set in service (of course within the limits ++of the above timeout). According to what said in the description of ++the algorithm, larger values increase the throughput in proportion to ++the percentage of sequential I/O requests issued. The price of larger ++values is that they coarsen the granularity of short-term bandwidth ++and latency guarantees. ++ ++The default value is 0, which enables auto-tuning: BFQ sets max_budget ++to the maximum number of sectors that can be served during ++timeout_sync, according to the estimated peak rate. ++ ++weights ++------- ++ ++Read-only parameter, used to show the weights of the currently active ++BFQ queues. ++ ++ ++wr_ tunables ++------------ ++ ++BFQ exports a few parameters to control/tune the behavior of ++low-latency heuristics. ++ ++wr_coeff ++ ++Factor by which the weight of a weight-raised queue is multiplied. If ++the queue is deemed soft real-time, then the weight is further ++multiplied by an additional, constant factor. ++ ++wr_max_time ++ ++Maximum duration of a weight-raising period for an interactive task ++(ms). If set to zero (default value), then this value is computed ++automatically, as a function of the peak rate of the device. In any ++case, when the value of this parameter is read, it always reports the ++current duration, regardless of whether it has been set manually or ++computed automatically. ++ ++wr_max_softrt_rate ++ ++Maximum service rate below which a queue is deemed to be associated ++with a soft real-time application, and is then weight-raised ++accordingly (sectors/sec). ++ ++wr_min_idle_time ++ ++Minimum idle period after which interactive weight-raising may be ++reactivated for a queue (in ms). ++ ++wr_rt_max_time ++ ++Maximum weight-raising duration for soft real-time queues (in ms). The ++start time from which this duration is considered is automatically ++moved forward if the queue is detected to be still soft real-time ++before the current soft real-time weight-raising period finishes. ++ ++wr_min_inter_arr_async ++ ++Minimum period between I/O request arrivals after which weight-raising ++may be reactivated for an already busy async queue (in ms). ++ ++ ++4. Group scheduling with BFQ ++============================ ++ ++BFQ supports both cgroups-v1 and cgroups-v2 io controllers, namely ++blkio and io. In particular, BFQ supports weight-based proportional ++share. To activate cgroups support, set BFQ_GROUP_IOSCHED. ++ ++4-1 Service guarantees provided ++------------------------------- ++ ++With BFQ, proportional share means true proportional share of the ++device bandwidth, according to group weights. For example, a group ++with weight 200 gets twice the bandwidth, and not just twice the time, ++of a group with weight 100. ++ ++BFQ supports hierarchies (group trees) of any depth. Bandwidth is ++distributed among groups and processes in the expected way: for each ++group, the children of the group share the whole bandwidth of the ++group in proportion to their weights. In particular, this implies ++that, for each leaf group, every process of the group receives the ++same share of the whole group bandwidth, unless the ioprio of the ++process is modified. ++ ++The resource-sharing guarantee for a group may partially or totally ++switch from bandwidth to time, if providing bandwidth guarantees to ++the group lowers the throughput too much. This switch occurs on a ++per-process basis: if a process of a leaf group causes throughput loss ++if served in such a way to receive its share of the bandwidth, then ++BFQ switches back to just time-based proportional share for that ++process. ++ ++4-2 Interface ++------------- ++ ++To get proportional sharing of bandwidth with BFQ for a given device, ++BFQ must of course be the active scheduler for that device. ++ ++Within each group directory, the names of the files associated with ++BFQ-specific cgroup parameters and stats begin with the "bfq." ++prefix. So, with cgroups-v1 or cgroups-v2, the full prefix for ++BFQ-specific files is "blkio.bfq." or "io.bfq." For example, the group ++parameter to set the weight of a group with BFQ is blkio.bfq.weight ++or io.bfq.weight. ++ ++Parameters to set ++----------------- ++ ++For each group, there is only the following parameter to set. ++ ++weight (namely blkio.bfq.weight or io.bfq-weight): the weight of the ++group inside its parent. Available values: 1..10000 (default 100). The ++linear mapping between ioprio and weights, described at the beginning ++of the tunable section, is still valid, but all weights higher than ++IOPRIO_BE_NR*10 are mapped to ioprio 0. ++ ++Recall that, if low-latency is set, then BFQ automatically raises the ++weight of the queues associated with interactive and soft real-time ++applications. Unset this tunable if you need/want to control weights. ++ ++ ++[1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O ++ Scheduler", Proceedings of the First Workshop on Mobile System ++ Technologies (MST-2015), May 2015. ++ http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf ++ ++[2] P. Valente and M. Andreolini, "Improving Application ++ Responsiveness with the BFQ Disk I/O Scheduler", Proceedings of ++ the 5th Annual International Systems and Storage Conference ++ (SYSTOR '12), June 2012. ++ Slightly extended version: ++ http://algogroup.unimore.it/people/paolo/disk_sched/bfq-v1-suite- ++ results.pdf diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched -index f78cd1a..6d92579 100644 +index f78cd1a..f2cd945 100644 --- a/block/Kconfig.iosched +++ b/block/Kconfig.iosched -@@ -53,7 +53,7 @@ config IOSCHED_BFQ +@@ -43,20 +43,20 @@ config IOSCHED_BFQ + tristate "BFQ I/O scheduler" + default n + ---help--- +- The BFQ I/O scheduler tries to distribute bandwidth among +- all processes according to their weights. +- It aims at distributing the bandwidth as desired, independently of +- the disk parameters and with any workload. It also tries to +- guarantee low latency to interactive and soft real-time +- applications. If compiled built-in (saying Y here), BFQ can +- be configured to support hierarchical scheduling. ++ The BFQ I/O scheduler distributes bandwidth among all ++ processes according to their weights, regardless of the ++ device parameters and with any workload. It also guarantees ++ a low latency to interactive and soft real-time applications. ++ Details in Documentation/block/bfq-iosched.txt config BFQ_GROUP_IOSCHED bool "BFQ hierarchical scheduling support" @@ -24,35 +590,91 @@ index f78cd1a..6d92579 100644 + depends on IOSCHED_BFQ && BLK_CGROUP default n ---help--- - Enable hierarchical scheduling in BFQ, using the blkio controller. +- Enable hierarchical scheduling in BFQ, using the blkio controller. ++ ++ Enable hierarchical scheduling in BFQ, using the blkio ++ (cgroups-v1) or io (cgroups-v2) controller. + + choice + prompt "Default I/O scheduler" diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c -index 5ee99ec..c83d90c 100644 +index 0367996..0125275 100644 --- a/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c -@@ -162,7 +162,6 @@ static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg) - static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) +@@ -7,7 +7,9 @@ + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + * +- * Copyright (C) 2010 Paolo Valente ++ * Copyright (C) 2015 Paolo Valente ++ * ++ * Copyright (C) 2016 Paolo Valente + * + * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ + * file. +@@ -163,8 +165,6 @@ static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) { struct blkg_policy_data *pd = blkg_to_pd(blkg, &blkcg_policy_bfq); + - BUG_ON(!pd); +- return pd_to_bfqg(pd); } -@@ -224,14 +223,6 @@ static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, int rw) - blkg_rwstat_add(&bfqg->stats.merged, rw, 1); +@@ -208,59 +208,47 @@ static void bfqg_put(struct bfq_group *bfqg) + + static void bfqg_stats_update_io_add(struct bfq_group *bfqg, + struct bfq_queue *bfqq, +- int rw) ++ unsigned int op) + { +- blkg_rwstat_add(&bfqg->stats.queued, rw, 1); ++ blkg_rwstat_add(&bfqg->stats.queued, op, 1); + bfqg_stats_end_empty_time(&bfqg->stats); + if (!(bfqq == ((struct bfq_data *)bfqg->bfqd)->in_service_queue)) + bfqg_stats_set_start_group_wait_time(bfqg, bfqq_group(bfqq)); + } + +-static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, int rw) +-{ +- blkg_rwstat_add(&bfqg->stats.queued, rw, -1); +-} +- +-static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, int rw) ++static void bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) + { +- blkg_rwstat_add(&bfqg->stats.merged, rw, 1); ++ blkg_rwstat_add(&bfqg->stats.queued, op, -1); } -static void bfqg_stats_update_dispatch(struct bfq_group *bfqg, - uint64_t bytes, int rw) --{ ++static void bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) + { - blkg_stat_add(&bfqg->stats.sectors, bytes >> 9); - blkg_rwstat_add(&bfqg->stats.serviced, rw, 1); - blkg_rwstat_add(&bfqg->stats.service_bytes, rw, bytes); --} -- ++ blkg_rwstat_add(&bfqg->stats.merged, op, 1); + } + static void bfqg_stats_update_completion(struct bfq_group *bfqg, - uint64_t start_time, uint64_t io_start_time, int rw) +- uint64_t start_time, uint64_t io_start_time, int rw) ++ uint64_t start_time, uint64_t io_start_time, ++ unsigned int op) { -@@ -248,17 +239,11 @@ static void bfqg_stats_update_completion(struct bfq_group *bfqg, + struct bfqg_stats *stats = &bfqg->stats; + unsigned long long now = sched_clock(); + + if (time_after64(now, io_start_time)) +- blkg_rwstat_add(&stats->service_time, rw, now - io_start_time); ++ blkg_rwstat_add(&stats->service_time, op, ++ now - io_start_time); + if (time_after64(io_start_time, start_time)) +- blkg_rwstat_add(&stats->wait_time, rw, ++ blkg_rwstat_add(&stats->wait_time, op, + io_start_time - start_time); + } + /* @stats = 0 */ static void bfqg_stats_reset(struct bfqg_stats *stats) { @@ -70,7 +692,7 @@ index 5ee99ec..c83d90c 100644 blkg_stat_reset(&stats->avg_queue_size_sum); blkg_stat_reset(&stats->avg_queue_size_samples); blkg_stat_reset(&stats->dequeue); -@@ -268,21 +253,19 @@ static void bfqg_stats_reset(struct bfqg_stats *stats) +@@ -270,19 +258,16 @@ static void bfqg_stats_reset(struct bfqg_stats *stats) } /* @to += @from */ @@ -89,13 +711,9 @@ index 5ee99ec..c83d90c 100644 blkg_stat_add_aux(&from->time, &from->time); - blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time); blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); -- blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples); -+ blkg_stat_add_aux(&to->avg_queue_size_samples, -+ &from->avg_queue_size_samples); - blkg_stat_add_aux(&to->dequeue, &from->dequeue); - blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time); - blkg_stat_add_aux(&to->idle_time, &from->idle_time); -@@ -308,10 +291,8 @@ static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) + blkg_stat_add_aux(&to->avg_queue_size_samples, + &from->avg_queue_size_samples); +@@ -311,10 +296,8 @@ static void bfqg_stats_xfer_dead(struct bfq_group *bfqg) if (unlikely(!parent)) return; @@ -107,7 +725,14 @@ index 5ee99ec..c83d90c 100644 } static void bfq_init_entity(struct bfq_entity *entity, -@@ -332,15 +313,11 @@ static void bfq_init_entity(struct bfq_entity *entity, +@@ -329,21 +312,17 @@ static void bfq_init_entity(struct bfq_entity *entity, + bfqq->ioprio_class = bfqq->new_ioprio_class; + bfqg_get(bfqg); + } +- entity->parent = bfqg->my_entity; ++ entity->parent = bfqg->my_entity; /* NULL for root group */ + entity->sched_data = &bfqg->sched_data; + } static void bfqg_stats_exit(struct bfqg_stats *stats) { @@ -123,7 +748,7 @@ index 5ee99ec..c83d90c 100644 blkg_stat_exit(&stats->avg_queue_size_sum); blkg_stat_exit(&stats->avg_queue_size_samples); blkg_stat_exit(&stats->dequeue); -@@ -351,15 +328,11 @@ static void bfqg_stats_exit(struct bfqg_stats *stats) +@@ -354,15 +333,11 @@ static void bfqg_stats_exit(struct bfqg_stats *stats) static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) { @@ -140,18 +765,7 @@ index 5ee99ec..c83d90c 100644 blkg_stat_init(&stats->avg_queue_size_sum, gfp) || blkg_stat_init(&stats->avg_queue_size_samples, gfp) || blkg_stat_init(&stats->dequeue, gfp) || -@@ -374,20 +347,36 @@ static int bfqg_stats_init(struct bfqg_stats *stats, gfp_t gfp) - } - - static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd) -- { -+{ - return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL; -- } -+} - - static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) - { +@@ -386,11 +361,27 @@ static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) return cpd_to_bfqgd(blkcg_to_cpd(blkcg, &blkcg_policy_bfq)); } @@ -159,7 +773,7 @@ index 5ee99ec..c83d90c 100644 +{ + struct bfq_group_data *bgd; + -+ bgd = kzalloc(sizeof(*bgd), GFP_KERNEL); ++ bgd = kzalloc(sizeof(*bgd), gfp); + if (!bgd) + return NULL; + return &bgd->pd; @@ -180,7 +794,7 @@ index 5ee99ec..c83d90c 100644 } static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) -@@ -398,8 +387,7 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) +@@ -401,8 +392,7 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) if (!bfqg) return NULL; @@ -190,7 +804,7 @@ index 5ee99ec..c83d90c 100644 kfree(bfqg); return NULL; } -@@ -407,27 +395,20 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) +@@ -410,27 +400,20 @@ static struct blkg_policy_data *bfq_pd_alloc(gfp_t gfp, int node) return &bfqg->pd; } @@ -228,7 +842,7 @@ index 5ee99ec..c83d90c 100644 entity->orig_weight = entity->weight = entity->new_weight = d->weight; entity->my_sched_data = &bfqg->sched_data; -@@ -445,70 +426,53 @@ static void bfq_pd_free(struct blkg_policy_data *pd) +@@ -448,70 +431,53 @@ static void bfq_pd_free(struct blkg_policy_data *pd) struct bfq_group *bfqg = pd_to_bfqg(pd); bfqg_stats_exit(&bfqg->stats); @@ -256,23 +870,23 @@ index 5ee99ec..c83d90c 100644 } -/* to be used by recursive prfill, sums live and dead rwstats recursively */ --static struct blkg_rwstat bfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd, -- int off) +-static struct blkg_rwstat +-bfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd, int off) +static void bfq_group_set_parent(struct bfq_group *bfqg, + struct bfq_group *parent) { - struct blkg_rwstat a, b; + struct bfq_entity *entity; -+ -+ BUG_ON(!parent); -+ BUG_ON(!bfqg); -+ BUG_ON(bfqg == parent); - a = blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, off); - b = blkg_rwstat_recursive_sum(pd_to_blkg(pd), &blkcg_policy_bfq, - off + dead_stats_off_delta); - blkg_rwstat_add_aux(&a, &b); - return a; ++ BUG_ON(!parent); ++ BUG_ON(!bfqg); ++ BUG_ON(bfqg == parent); ++ + entity = &bfqg->entity; + entity->parent = parent->my_entity; + entity->sched_data = &parent->sched_data; @@ -326,19 +940,15 @@ index 5ee99ec..c83d90c 100644 /* * Update chain of bfq_groups as we might be handling a leaf group -@@ -531,13 +495,18 @@ static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, - return bfqg; - } +@@ -537,11 +503,15 @@ static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, + static void bfq_pos_tree_add_move(struct bfq_data *bfqd, + struct bfq_queue *bfqq); --static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq); -+static void bfq_pos_tree_add_move(struct bfq_data *bfqd, -+ struct bfq_queue *bfqq); -+ +static void bfq_bfqq_expire(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + bool compensate, + enum bfqq_expiration reason); - ++ /** * bfq_bfqq_move - migrate @bfqq to @bfqg. * @bfqd: queue descriptor. @@ -347,7 +957,7 @@ index 5ee99ec..c83d90c 100644 * @bfqg: the group to move to. * * Move @bfqq to @bfqg, deactivating it from its old group and reactivating -@@ -548,26 +517,40 @@ static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -552,26 +522,40 @@ static void bfq_pos_tree_add_move(struct bfq_data *bfqd, * rcu_read_lock()). */ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, @@ -393,7 +1003,7 @@ index 5ee99ec..c83d90c 100644 - bfq_deactivate_bfqq(bfqd, bfqq, 0); - } else if (entity->on_st) + if (bfq_bfqq_busy(bfqq)) -+ bfq_deactivate_bfqq(bfqd, bfqq, 0); ++ bfq_deactivate_bfqq(bfqd, bfqq, false, false); + else if (entity->on_st) { + BUG_ON(&bfq_entity_service_tree(entity)->idle != + entity->tree); @@ -402,7 +1012,7 @@ index 5ee99ec..c83d90c 100644 bfqg_put(bfqq_group(bfqq)); /* -@@ -579,14 +562,17 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -583,14 +567,17 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, entity->sched_data = &bfqg->sched_data; bfqg_get(bfqg); @@ -423,16 +1033,20 @@ index 5ee99ec..c83d90c 100644 } /** -@@ -613,7 +599,7 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, +@@ -617,7 +604,11 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, lockdep_assert_held(bfqd->queue->queue_lock); - bfqg = bfq_find_alloc_group(bfqd, blkcg); + bfqg = bfq_find_set_group(bfqd, blkcg); ++ ++ if (unlikely(!bfqg)) ++ bfqg = bfqd->root_group; ++ if (async_bfqq) { entity = &async_bfqq->entity; -@@ -621,7 +607,8 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, +@@ -625,7 +616,8 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, bic_set_bfqq(bic, NULL, 0); bfq_log_bfqq(bfqd, async_bfqq, "bic_change_group: %p %d", @@ -442,7 +1056,7 @@ index 5ee99ec..c83d90c 100644 bfq_put_queue(async_bfqq); } } -@@ -629,7 +616,7 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, +@@ -633,7 +625,7 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, if (sync_bfqq) { entity = &sync_bfqq->entity; if (entity->sched_data != &bfqg->sched_data) @@ -451,7 +1065,7 @@ index 5ee99ec..c83d90c 100644 } return bfqg; -@@ -638,25 +625,23 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, +@@ -642,25 +634,23 @@ static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd, static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) { struct bfq_data *bfqd = bic_to_bfqd(bic); @@ -485,22 +1099,25 @@ index 5ee99ec..c83d90c 100644 } /** -@@ -682,8 +667,7 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, +@@ -672,7 +662,7 @@ static void bfq_flush_idle_tree(struct bfq_service_tree *st) + struct bfq_entity *entity = st->first_idle; + + for (; entity ; entity = st->first_idle) +- __bfq_deactivate_entity(entity, 0); ++ __bfq_deactivate_entity(entity, false); + } + + /** +@@ -686,7 +676,7 @@ static void bfq_reparent_leaf_entity(struct bfq_data *bfqd, struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); BUG_ON(!bfqq); - bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group); -- return; + bfq_bfqq_move(bfqd, bfqq, bfqd->root_group); } /** -@@ -711,16 +695,15 @@ static void bfq_reparent_active_entities(struct bfq_data *bfqd, - if (bfqg->sched_data.in_service_entity) - bfq_reparent_leaf_entity(bfqd, - bfqg->sched_data.in_service_entity); -- -- return; +@@ -717,11 +707,12 @@ static void bfq_reparent_active_entities(struct bfq_data *bfqd, } /** @@ -517,7 +1134,12 @@ index 5ee99ec..c83d90c 100644 */ static void bfq_pd_offline(struct blkg_policy_data *pd) { -@@ -779,6 +762,12 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) +@@ -776,10 +767,16 @@ static void bfq_pd_offline(struct blkg_policy_data *pd) + BUG_ON(bfqg->sched_data.next_in_service); + BUG_ON(bfqg->sched_data.in_service_entity); + +- __bfq_deactivate_entity(entity, 0); ++ __bfq_deactivate_entity(entity, false); bfq_put_async_queues(bfqd, bfqg); BUG_ON(entity->tree); @@ -530,7 +1152,7 @@ index 5ee99ec..c83d90c 100644 bfqg_stats_xfer_dead(bfqg); } -@@ -788,46 +777,35 @@ static void bfq_end_wr_async(struct bfq_data *bfqd) +@@ -789,46 +786,35 @@ static void bfq_end_wr_async(struct bfq_data *bfqd) list_for_each_entry(blkg, &bfqd->queue->blkg_list, q_node) { struct bfq_group *bfqg = blkg_to_bfqg(blkg); @@ -588,15 +1210,7 @@ index 5ee99ec..c83d90c 100644 if (val < BFQ_MIN_WEIGHT || val > BFQ_MAX_WEIGHT) return ret; -@@ -837,6 +815,7 @@ static int bfqio_cgroup_weight_write(struct cgroup_subsys_state *css, - bfqgd->weight = (unsigned short)val; - hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { - struct bfq_group *bfqg = blkg_to_bfqg(blkg); -+ - if (!bfqg) - continue; - /* -@@ -871,13 +850,18 @@ static int bfqio_cgroup_weight_write(struct cgroup_subsys_state *css, +@@ -873,13 +859,18 @@ static int bfqio_cgroup_weight_write(struct cgroup_subsys_state *css, return ret; } @@ -620,7 +1234,7 @@ index 5ee99ec..c83d90c 100644 } static int bfqg_print_stat(struct seq_file *sf, void *v) -@@ -897,16 +881,17 @@ static int bfqg_print_rwstat(struct seq_file *sf, void *v) +@@ -899,16 +890,17 @@ static int bfqg_print_rwstat(struct seq_file *sf, void *v) static u64 bfqg_prfill_stat_recursive(struct seq_file *sf, struct blkg_policy_data *pd, int off) { @@ -642,7 +1256,7 @@ index 5ee99ec..c83d90c 100644 return __blkg_prfill_rwstat(sf, pd, &sum); } -@@ -926,6 +911,41 @@ static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v) +@@ -928,6 +920,41 @@ static int bfqg_print_rwstat_recursive(struct seq_file *sf, void *v) return 0; } @@ -684,37 +1298,23 @@ index 5ee99ec..c83d90c 100644 static u64 bfqg_prfill_avg_queue_size(struct seq_file *sf, struct blkg_policy_data *pd, int off) { -@@ -950,7 +970,8 @@ static int bfqg_print_avg_queue_size(struct seq_file *sf, void *v) - return 0; - } - --static struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) -+static struct bfq_group * -+bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) - { - int ret; - -@@ -958,41 +979,18 @@ static struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int n - if (ret) - return NULL; - -- return blkg_to_bfqg(bfqd->queue->root_blkg); -+ return blkg_to_bfqg(bfqd->queue->root_blkg); +@@ -964,38 +991,15 @@ bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) + return blkg_to_bfqg(bfqd->queue->root_blkg); } -static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) -{ -- struct bfq_group_data *bgd; +- struct bfq_group_data *bgd; - -- bgd = kzalloc(sizeof(*bgd), GFP_KERNEL); -- if (!bgd) -- return NULL; -- return &bgd->pd; +- bgd = kzalloc(sizeof(*bgd), GFP_KERNEL); +- if (!bgd) +- return NULL; +- return &bgd->pd; -} - -static void bfq_cpd_free(struct blkcg_policy_data *cpd) -{ -- kfree(cpd_to_bfqgd(cpd)); +- kfree(cpd_to_bfqgd(cpd)); -} - -static struct cftype bfqio_files_dfl[] = { @@ -742,7 +1342,7 @@ index 5ee99ec..c83d90c 100644 { .name = "bfq.time", .private = offsetof(struct bfq_group, stats.time), -@@ -1000,18 +998,17 @@ static struct cftype bfqio_files[] = { +@@ -1003,18 +1007,17 @@ static struct cftype bfqio_files[] = { }, { .name = "bfq.sectors", @@ -766,7 +1366,7 @@ index 5ee99ec..c83d90c 100644 }, { .name = "bfq.io_service_time", -@@ -1042,18 +1039,17 @@ static struct cftype bfqio_files[] = { +@@ -1045,18 +1048,17 @@ static struct cftype bfqio_files[] = { }, { .name = "bfq.sectors_recursive", @@ -790,7 +1390,7 @@ index 5ee99ec..c83d90c 100644 }, { .name = "bfq.io_service_time_recursive", -@@ -1099,32 +1095,35 @@ static struct cftype bfqio_files[] = { +@@ -1102,31 +1104,42 @@ static struct cftype bfqio_files[] = { .private = offsetof(struct bfq_group, stats.dequeue), .seq_show = bfqg_print_stat, }, @@ -803,20 +1403,19 @@ index 5ee99ec..c83d90c 100644 }; -static struct blkcg_policy blkcg_policy_bfq = { -- .dfl_cftypes = bfqio_files_dfl, -- .legacy_cftypes = bfqio_files, +- .dfl_cftypes = bfqio_files_dfl, +- .legacy_cftypes = bfqio_files, - -- .pd_alloc_fn = bfq_pd_alloc, -- .pd_init_fn = bfq_pd_init, -- .pd_offline_fn = bfq_pd_offline, -- .pd_free_fn = bfq_pd_free, -- .pd_reset_stats_fn = bfq_pd_reset_stats, -- -- .cpd_alloc_fn = bfq_cpd_alloc, -- .cpd_init_fn = bfq_cpd_init, -- .cpd_bind_fn = bfq_cpd_init, -- .cpd_free_fn = bfq_cpd_free, +- .pd_alloc_fn = bfq_pd_alloc, +- .pd_init_fn = bfq_pd_init, +- .pd_offline_fn = bfq_pd_offline, +- .pd_free_fn = bfq_pd_free, +- .pd_reset_stats_fn = bfq_pd_reset_stats, - +- .cpd_alloc_fn = bfq_cpd_alloc, +- .cpd_init_fn = bfq_cpd_init, +- .cpd_bind_fn = bfq_cpd_init, +- .cpd_free_fn = bfq_cpd_free, +static struct cftype bfq_blkg_files[] = { + { + .name = "bfq.weight", @@ -831,33 +1430,49 @@ index 5ee99ec..c83d90c 100644 +#else /* CONFIG_BFQ_GROUP_IOSCHED */ + +static inline void bfqg_stats_update_io_add(struct bfq_group *bfqg, -+ struct bfq_queue *bfqq, int rw) { } -+static inline void bfqg_stats_update_io_remove(struct bfq_group *bfqg, int rw) { } -+static inline void bfqg_stats_update_io_merged(struct bfq_group *bfqg, int rw) { } ++ struct bfq_queue *bfqq, unsigned int op) { } ++static inline void ++bfqg_stats_update_io_remove(struct bfq_group *bfqg, unsigned int op) { } ++static inline void ++bfqg_stats_update_io_merged(struct bfq_group *bfqg, unsigned int op) { } +static inline void bfqg_stats_update_completion(struct bfq_group *bfqg, -+ uint64_t start_time, uint64_t io_start_time, int rw) { } -+static inline void bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, -+struct bfq_group *curr_bfqg) { } ++ uint64_t start_time, uint64_t io_start_time, ++ unsigned int op) { } ++static inline void ++bfqg_stats_set_start_group_wait_time(struct bfq_group *bfqg, ++ struct bfq_group *curr_bfqg) { } +static inline void bfqg_stats_end_empty_time(struct bfqg_stats *stats) { } +static inline void bfqg_stats_update_dequeue(struct bfq_group *bfqg) { } +static inline void bfqg_stats_set_start_empty_time(struct bfq_group *bfqg) { } +static inline void bfqg_stats_update_idle_time(struct bfq_group *bfqg) { } +static inline void bfqg_stats_set_start_idle_time(struct bfq_group *bfqg) { } +static inline void bfqg_stats_update_avg_queue_size(struct bfq_group *bfqg) { } ++ ++static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, ++ struct bfq_group *bfqg) {} static void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg) -@@ -1146,27 +1145,20 @@ bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) - return bfqd->root_group; +@@ -1142,35 +1155,22 @@ static void bfq_init_entity(struct bfq_entity *entity, + entity->sched_data = &bfqg->sched_data; } +-static struct bfq_group * +-bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) +-{ +- struct bfq_data *bfqd = bic_to_bfqd(bic); +- +- return bfqd->root_group; +-} +- -static void bfq_bfqq_move(struct bfq_data *bfqd, - struct bfq_queue *bfqq, - struct bfq_entity *entity, - struct bfq_group *bfqg) -{ -} -- ++static void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) {} + static void bfq_end_wr_async(struct bfq_data *bfqd) { bfq_end_wr_async_queues(bfqd, bfqd->root_group); @@ -865,31 +1480,40 @@ index 5ee99ec..c83d90c 100644 -static void bfq_disconnect_groups(struct bfq_data *bfqd) +static struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, -+ struct blkcg *blkcg) ++ struct blkcg *blkcg) { - bfq_put_async_queues(bfqd, bfqd->root_group); + return bfqd->root_group; } -static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, -- struct blkcg *blkcg) +- struct blkcg *blkcg) +static struct bfq_group *bfqq_group(struct bfq_queue *bfqq) { - return bfqd->root_group; + return bfqq->bfqd->root_group; } - static struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) + static struct bfq_group * diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c -index d1f648d..3bc1f8b 100644 +index cf3e9b1..e5dfa5a 100644 --- a/block/bfq-iosched.c +++ b/block/bfq-iosched.c -@@ -7,25 +7,26 @@ +@@ -1,5 +1,5 @@ + /* +- * Budget Fair Queueing (BFQ) disk scheduler. ++ * Budget Fair Queueing (BFQ) I/O scheduler. + * + * Based on ideas and code from CFQ: + * Copyright (C) 2003 Jens Axboe +@@ -7,25 +7,34 @@ * Copyright (C) 2008 Fabio Checconi * Paolo Valente * - * Copyright (C) 2010 Paolo Valente -+ * Copyright (C) 2016 Paolo Valente ++ * Copyright (C) 2015 Paolo Valente ++ * ++ * Copyright (C) 2017 Paolo Valente * * Licensed under the GPL-2 as detailed in the accompanying COPYING.BFQ * file. @@ -908,6 +1532,12 @@ index d1f648d..3bc1f8b 100644 - * I/O-bound processes issuing sequential requests (to boost the - * throughput), and yet guarantee a low latency to interactive and soft - * real-time applications. ++ * BFQ is a proportional-share I/O scheduler, with some extra ++ * low-latency capabilities. BFQ also supports full hierarchical ++ * scheduling through cgroups. Next paragraphs provide an introduction ++ * on BFQ inner workings. Details on BFQ benefits and usage can be ++ * found in Documentation/block/bfq-iosched.txt. ++ * + * BFQ is a proportional-share storage-I/O scheduling algorithm based + * on the slice-by-slice service scheme of CFQ. But BFQ assigns + * budgets, measured in number of sectors, to processes instead of @@ -926,37 +1556,91 @@ index d1f648d..3bc1f8b 100644 * * BFQ is described in [1], where also a reference to the initial, more * theoretical paper on BFQ can be found. The interested reader can find -@@ -87,7 +88,6 @@ static const int bfq_stats_min_budgets = 194; +@@ -40,10 +49,10 @@ + * H-WF2Q+, while the augmented tree used to implement B-WF2Q+ with O(log N) + * complexity derives from the one introduced with EEVDF in [3]. + * +- * [1] P. Valente and M. Andreolini, ``Improving Application Responsiveness +- * with the BFQ Disk I/O Scheduler'', +- * Proceedings of the 5th Annual International Systems and Storage +- * Conference (SYSTOR '12), June 2012. ++ * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O ++ * Scheduler", Proceedings of the First Workshop on Mobile System ++ * Technologies (MST-2015), May 2015. ++ * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf + * + * http://algogroup.unimo.it/people/paolo/disk_sched/bf1-v1-suite-results.pdf + * +@@ -70,24 +79,23 @@ + #include "bfq.h" + #include "blk.h" + +-/* Expiration time of sync (0) and async (1) requests, in jiffies. */ +-static const int bfq_fifo_expire[2] = { HZ / 4, HZ / 8 }; ++/* Expiration time of sync (0) and async (1) requests, in ns. */ ++static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; + + /* Maximum backwards seek, in KiB. */ +-static const int bfq_back_max = 16 * 1024; ++static const int bfq_back_max = (16 * 1024); + + /* Penalty of a backwards seek, in number of sectors. */ + static const int bfq_back_penalty = 2; + +-/* Idling period duration, in jiffies. */ +-static int bfq_slice_idle = HZ / 125; ++/* Idling period duration, in ns. */ ++static u32 bfq_slice_idle = (NSEC_PER_SEC / 125); + + /* Minimum number of assigned budgets for which stats are safe to compute. */ + static const int bfq_stats_min_budgets = 194; /* Default maximum budget values, in sectors and number of requests. */ - static const int bfq_default_max_budget = 16 * 1024; +-static const int bfq_default_max_budget = 16 * 1024; -static const int bfq_max_budget_async_rq = 4; ++static const int bfq_default_max_budget = (16 * 1024); /* * Async to sync throughput distribution is controlled as follows: -@@ -97,8 +97,7 @@ static const int bfq_max_budget_async_rq = 4; +@@ -97,23 +105,28 @@ static const int bfq_max_budget_async_rq = 4; static const int bfq_async_charge_factor = 10; /* Default timeout values, in jiffies, approximating CFQ defaults. */ -static const int bfq_timeout_sync = HZ / 8; -static int bfq_timeout_async = HZ / 25; -+static const int bfq_timeout = HZ / 8; ++static const int bfq_timeout = (HZ / 8); - struct kmem_cache *bfq_pool; +-struct kmem_cache *bfq_pool; ++static struct kmem_cache *bfq_pool; -@@ -109,8 +108,9 @@ struct kmem_cache *bfq_pool; +-/* Below this threshold (in ms), we consider thinktime immediate. */ +-#define BFQ_MIN_TT 2 ++/* Below this threshold (in ns), we consider thinktime immediate. */ ++#define BFQ_MIN_TT (2 * NSEC_PER_MSEC) + + /* hw_tag detection: parallel requests threshold and min samples needed. */ #define BFQ_HW_QUEUE_THRESHOLD 4 #define BFQ_HW_QUEUE_SAMPLES 32 -#define BFQQ_SEEK_THR (sector_t)(8 * 1024) -#define BFQQ_SEEKY(bfqq) ((bfqq)->seek_mean > BFQQ_SEEK_THR) -+#define BFQQ_SEEK_THR (sector_t)(8 * 100) ++#define BFQQ_SEEK_THR (sector_t)(8 * 100) ++#define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32) +#define BFQQ_CLOSE_THR (sector_t)(8 * 1024) +#define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 32/8) - /* Min samples used for peak rate estimation (for autotuning). */ - #define BFQ_PEAK_RATE_SAMPLES 32 -@@ -141,16 +141,24 @@ struct kmem_cache *bfq_pool; +-/* Min samples used for peak rate estimation (for autotuning). */ +-#define BFQ_PEAK_RATE_SAMPLES 32 ++/* Min number of samples required to perform peak-rate update */ ++#define BFQ_RATE_MIN_SAMPLES 32 ++/* Min observation time interval required to perform a peak-rate update (ns) */ ++#define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC) ++/* Target observation time interval for a peak-rate update (ns) */ ++#define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC + + /* Shift used for peak rate fixed precision calculations. */ + #define BFQ_RATE_SHIFT 16 +@@ -141,16 +154,24 @@ struct kmem_cache *bfq_pool; * The device's speed class is dynamically (re)detected in * bfq_update_peak_rate() every time the estimated peak rate is updated. * @@ -988,7 +1672,26 @@ index d1f648d..3bc1f8b 100644 /* * To improve readability, a conversion function is used to initialize the * following arrays, which entails that they can be initialized only in a -@@ -410,11 +418,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd) +@@ -178,18 +199,6 @@ static void bfq_schedule_dispatch(struct bfq_data *bfqd); + #define bfq_sample_valid(samples) ((samples) > 80) + + /* +- * We regard a request as SYNC, if either it's a read or has the SYNC bit +- * set (in which case it could also be a direct WRITE). +- */ +-static int bfq_bio_sync(struct bio *bio) +-{ +- if (bio_data_dir(bio) == READ || (bio->bi_rw & REQ_SYNC)) +- return 1; +- +- return 0; +-} +- +-/* + * Scheduler run of queue, if there are requests pending and no one in the + * driver that will restart queueing. + */ +@@ -409,11 +418,7 @@ static bool bfq_differentiated_weights(struct bfq_data *bfqd) */ static bool bfq_symmetric_scenario(struct bfq_data *bfqd) { @@ -1001,7 +1704,54 @@ index d1f648d..3bc1f8b 100644 } /* -@@ -534,9 +538,19 @@ static struct request *bfq_find_next_rq(struct bfq_data *bfqd, +@@ -505,13 +510,45 @@ static void bfq_weights_tree_remove(struct bfq_data *bfqd, + entity->weight_counter = NULL; + } + ++/* ++ * Return expired entry, or NULL to just start from scratch in rbtree. ++ */ ++static struct request *bfq_check_fifo(struct bfq_queue *bfqq, ++ struct request *last) ++{ ++ struct request *rq; ++ ++ if (bfq_bfqq_fifo_expire(bfqq)) ++ return NULL; ++ ++ bfq_mark_bfqq_fifo_expire(bfqq); ++ ++ rq = rq_entry_fifo(bfqq->fifo.next); ++ ++ if (rq == last || ktime_get_ns() < rq->fifo_time) ++ return NULL; ++ ++ bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p", rq); ++ BUG_ON(RB_EMPTY_NODE(&rq->rb_node)); ++ return rq; ++} ++ + static struct request *bfq_find_next_rq(struct bfq_data *bfqd, + struct bfq_queue *bfqq, + struct request *last) + { + struct rb_node *rbnext = rb_next(&last->rb_node); + struct rb_node *rbprev = rb_prev(&last->rb_node); +- struct request *next = NULL, *prev = NULL; ++ struct request *next, *prev = NULL; ++ ++ BUG_ON(list_empty(&bfqq->fifo)); ++ ++ /* Follow expired path, else get first next available. */ ++ next = bfq_check_fifo(bfqq, last); ++ if (next) { ++ BUG_ON(next == last); ++ return next; ++ } + + BUG_ON(RB_EMPTY_NODE(&last->rb_node)); + +@@ -533,9 +570,19 @@ static struct request *bfq_find_next_rq(struct bfq_data *bfqd, static unsigned long bfq_serv_to_charge(struct request *rq, struct bfq_queue *bfqq) { @@ -1024,7 +1774,16 @@ index d1f648d..3bc1f8b 100644 } /** -@@ -591,12 +605,23 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd) +@@ -576,7 +623,7 @@ static void bfq_updated_next_req(struct bfq_data *bfqd, + entity->budget = new_budget; + bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu", + new_budget); +- bfq_activate_bfqq(bfqd, bfqq); ++ bfq_requeue_bfqq(bfqd, bfqq); + } + } + +@@ -590,12 +637,23 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd) dur = bfqd->RT_prod; do_div(dur, bfqd->peak_rate); @@ -1046,14 +1805,14 @@ index d1f648d..3bc1f8b 100644 + else if (dur < msecs_to_jiffies(3000)) + dur = msecs_to_jiffies(3000); --static unsigned bfq_bfqq_cooperations(struct bfq_queue *bfqq) +-static unsigned int bfq_bfqq_cooperations(struct bfq_queue *bfqq) -{ - return bfqq->bic ? bfqq->bic->cooperations : 0; + return dur; } static void -@@ -606,31 +631,11 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic) +@@ -605,31 +663,31 @@ bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_io_cq *bic) bfq_mark_bfqq_idle_window(bfqq); else bfq_clear_bfqq_idle_window(bfqq); @@ -1076,17 +1835,36 @@ index d1f648d..3bc1f8b 100644 - bfqq->wr_cur_max_time = bic->wr_time_left; - bfqq->last_wr_start_finish = jiffies; - bfqq->entity.prio_changed = 1; -- } ++ ++ bfqq->wr_coeff = bic->saved_wr_coeff; ++ bfqq->wr_start_at_switch_to_srt = bic->saved_wr_start_at_switch_to_srt; ++ BUG_ON(time_is_after_jiffies(bfqq->wr_start_at_switch_to_srt)); ++ bfqq->last_wr_start_finish = bic->saved_last_wr_start_finish; ++ bfqq->wr_cur_max_time = bic->saved_wr_cur_max_time; ++ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); ++ ++ if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) || ++ time_is_before_jiffies(bfqq->last_wr_start_finish + ++ bfqq->wr_cur_max_time))) { ++ bfq_log_bfqq(bfqq->bfqd, bfqq, ++ "resume state: switching off wr (%lu + %lu < %lu)", ++ bfqq->last_wr_start_finish, bfqq->wr_cur_max_time, ++ jiffies); ++ ++ bfqq->wr_coeff = 1; + } - /* - * Clear wr_time_left to prevent bfq_bfqq_save_state() from - * getting confused about the queue's need of a weight-raising - * period. - */ - bic->wr_time_left = 0; ++ /* make sure weight will be updated, however we got here */ ++ bfqq->entity.prio_changed = 1; } static int bfqq_process_refs(struct bfq_queue *bfqq) -@@ -640,7 +645,7 @@ static int bfqq_process_refs(struct bfq_queue *bfqq) +@@ -639,7 +697,7 @@ static int bfqq_process_refs(struct bfq_queue *bfqq) lockdep_assert_held(bfqq->bfqd->queue->queue_lock); io_refs = bfqq->allocated[READ] + bfqq->allocated[WRITE]; @@ -1095,7 +1873,7 @@ index d1f648d..3bc1f8b 100644 BUG_ON(process_refs < 0); return process_refs; } -@@ -655,6 +660,7 @@ static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -654,6 +712,7 @@ static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) hlist_del_init(&item->burst_list_node); hlist_add_head(&bfqq->burst_list_node, &bfqd->burst_list); bfqd->burst_size = 1; @@ -1103,7 +1881,7 @@ index d1f648d..3bc1f8b 100644 } /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */ -@@ -663,6 +669,10 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -662,6 +721,10 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) /* Increment burst size to take into account also bfqq */ bfqd->burst_size++; @@ -1114,7 +1892,7 @@ index d1f648d..3bc1f8b 100644 if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { struct bfq_queue *pos, *bfqq_item; struct hlist_node *n; -@@ -672,15 +682,19 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -671,15 +734,19 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) * other to consider this burst as large. */ bfqd->large_burst = true; @@ -1127,7 +1905,7 @@ index d1f648d..3bc1f8b 100644 hlist_for_each_entry(bfqq_item, &bfqd->burst_list, - burst_list_node) + burst_list_node) { - bfq_mark_bfqq_in_large_burst(bfqq_item); + bfq_mark_bfqq_in_large_burst(bfqq_item); + bfq_log_bfqq(bfqd, bfqq_item, "marked in large burst"); + } bfq_mark_bfqq_in_large_burst(bfqq); @@ -1135,7 +1913,7 @@ index d1f648d..3bc1f8b 100644 /* * From now on, and until the current burst finishes, any -@@ -692,67 +706,79 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -691,67 +758,79 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, burst_list_node) hlist_del_init(&pos->burst_list_node); @@ -1249,7 +2027,8 @@ index d1f648d..3bc1f8b 100644 + * enjoy weight raising as expected. Fortunately these false positives + * are very rare. They typically occur if some service happens to + * start doing I/O exactly when the interactive task starts. -+ * + * +- * . when the very first queue is activated, the queue is inserted into the + * Turning back to the next function, it implements all the steps + * needed to detect the occurrence of a large burst and to properly + * mark all the queues belonging to it (so that they can then be @@ -1258,13 +2037,12 @@ index d1f648d..3bc1f8b 100644 + * burst in progress. The list is then used to mark these queues as + * belonging to a large burst if the burst does become large. The main + * steps are the following. - * -- * . when the very first queue is activated, the queue is inserted into the ++ * + * . when the very first queue is created, the queue is inserted into the * list (as it could be the first queue in a possible burst) * * . if the current burst has not yet become large, and a queue Q that does -@@ -773,13 +799,13 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -772,13 +851,13 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) * * . the device enters a large-burst mode * @@ -1280,7 +2058,7 @@ index d1f648d..3bc1f8b 100644 * later, i.e., not shortly after, than the last time at which a queue * either entered the burst list or was marked as belonging to the * current large burst, then the current burst is deemed as finished and: -@@ -792,52 +818,44 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -791,52 +870,44 @@ static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) * in a possible new burst (then the burst list contains just Q * after this step). */ @@ -1357,7 +2135,7 @@ index d1f648d..3bc1f8b 100644 } /* -@@ -846,8 +864,9 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -845,8 +916,9 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq, * bfqq as belonging to this large burst immediately. */ if (bfqd->large_burst) { @@ -1368,7 +2146,7 @@ index d1f648d..3bc1f8b 100644 } /* -@@ -856,25 +875,498 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -855,25 +927,490 @@ static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq, * queue. Then we add bfqq to the burst. */ bfq_add_to_burst(bfqd, bfqq); @@ -1388,6 +2166,7 @@ index d1f648d..3bc1f8b 100644 +static int bfq_bfqq_budget_left(struct bfq_queue *bfqq) +{ + struct bfq_entity *entity = &bfqq->entity; ++ + return entity->budget - entity->service; +} + @@ -1550,6 +2329,7 @@ index d1f648d..3bc1f8b 100644 + * operation, is reset only when bfqq is selected for + * service (see bfq_get_next_queue). + */ ++ BUG_ON(bfqq->max_budget < 0); + entity->budget = min_t(unsigned long, + bfq_bfqq_budget_left(bfqq), + bfqq->max_budget); @@ -1558,8 +2338,9 @@ index d1f648d..3bc1f8b 100644 + return true; + } + ++ BUG_ON(bfqq->max_budget < 0); + entity->budget = max_t(unsigned long, bfqq->max_budget, -+ bfq_serv_to_charge(bfqq->next_rq,bfqq)); ++ bfq_serv_to_charge(bfqq->next_rq, bfqq)); + BUG_ON(entity->budget < 0); + + bfq_clear_bfqq_non_blocking_wait_rq(bfqq); @@ -1580,6 +2361,7 @@ index d1f648d..3bc1f8b 100644 + bfqq->wr_coeff = bfqd->bfq_wr_coeff; + bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); + } else { ++ bfqq->wr_start_at_switch_to_srt = jiffies; + bfqq->wr_coeff = bfqd->bfq_wr_coeff * + BFQ_SOFTRT_WEIGHT_FACTOR; + bfqq->wr_cur_max_time = @@ -1613,32 +2395,13 @@ index d1f648d..3bc1f8b 100644 + jiffies, + jiffies_to_msecs(bfqq-> + wr_cur_max_time)); -+ } else if (time_before( -+ bfqq->last_wr_start_finish + -+ bfqq->wr_cur_max_time, -+ jiffies + -+ bfqd->bfq_wr_rt_max_time) && -+ soft_rt) { ++ } else if (soft_rt) { + /* -+ * The remaining weight-raising time is lower -+ * than bfqd->bfq_wr_rt_max_time, which means -+ * that the application is enjoying weight -+ * raising either because deemed soft-rt in -+ * the near past, or because deemed interactive -+ * a long ago. -+ * In both cases, resetting now the current -+ * remaining weight-raising time for the -+ * application to the weight-raising duration -+ * for soft rt applications would not cause any -+ * latency increase for the application (as the -+ * new duration would be higher than the -+ * remaining time). -+ * -+ * In addition, the application is now meeting -+ * the requirements for being deemed soft rt. -+ * In the end we can correctly and safely -+ * (re)charge the weight-raising duration for -+ * the application with the weight-raising ++ * The application is now or still meeting the ++ * requirements for being deemed soft rt. We ++ * can then correctly and safely (re)charge ++ * the weight-raising duration for the ++ * application with the weight-raising + * duration for soft rt applications. + * + * In particular, doing this recharge now, i.e., @@ -1662,14 +2425,22 @@ index d1f648d..3bc1f8b 100644 + * latency because the application is not + * weight-raised while they are pending. + */ ++ if (bfqq->wr_cur_max_time != ++ bfqd->bfq_wr_rt_max_time) { ++ bfqq->wr_start_at_switch_to_srt = ++ bfqq->last_wr_start_finish; ++ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); ++ ++ bfqq->wr_cur_max_time = ++ bfqd->bfq_wr_rt_max_time; ++ bfqq->wr_coeff = bfqd->bfq_wr_coeff * ++ BFQ_SOFTRT_WEIGHT_FACTOR; ++ bfq_log_bfqq(bfqd, bfqq, ++ "switching to soft_rt wr"); ++ } else ++ bfq_log_bfqq(bfqd, bfqq, ++ "moving forward soft_rt wr duration"); + bfqq->last_wr_start_finish = jiffies; -+ bfqq->wr_cur_max_time = -+ bfqd->bfq_wr_rt_max_time; -+ bfqq->wr_coeff = bfqd->bfq_wr_coeff * -+ BFQ_SOFTRT_WEIGHT_FACTOR; -+ bfq_log_bfqq(bfqd, bfqq, -+ "switching to soft_rt wr, or " -+ " just moving forward duration"); + } + } +} @@ -1697,9 +2468,9 @@ index d1f648d..3bc1f8b 100644 + * bfq_bfqq_update_budg_for_activation for + * details on the usage of the next variable. + */ -+ arrived_in_time = time_is_after_jiffies( ++ arrived_in_time = ktime_get_ns() <= + RQ_BIC(rq)->ttime.last_end_request + -+ bfqd->bfq_slice_idle * 3); ++ bfqd->bfq_slice_idle * 3; + + bfq_log_bfqq(bfqd, bfqq, + "bfq_add_request non-busy: " @@ -1713,8 +2484,7 @@ index d1f648d..3bc1f8b 100644 + BUG_ON(bfqq->entity.budget < bfqq->entity.service); + + BUG_ON(bfqq == bfqd->in_service_queue); -+ bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, -+ rq->cmd_flags); ++ bfqg_stats_update_io_add(bfqq_group(RQ_BFQQ(rq)), bfqq, rq->cmd_flags); + + /* + * bfqq deserves to be weight-raised if: @@ -1871,7 +2641,7 @@ index d1f648d..3bc1f8b 100644 */ prev = bfqq->next_rq; next_rq = bfq_choose_req(bfqd, bfqq->next_rq, rq, bfqd->last_position); -@@ -887,160 +1379,10 @@ static void bfq_add_request(struct request *rq) +@@ -886,160 +1423,10 @@ static void bfq_add_request(struct request *rq) if (prev != bfqq->next_rq) bfq_pos_tree_add_move(bfqd, bfqq); @@ -2036,7 +2806,7 @@ index d1f648d..3bc1f8b 100644 if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && time_is_before_jiffies( bfqq->last_wr_start_finish + -@@ -1049,16 +1391,43 @@ add_bfqq_busy: +@@ -1048,16 +1435,43 @@ static void bfq_add_request(struct request *rq) bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); bfqd->wr_busy_queues++; @@ -2084,7 +2854,49 @@ index d1f648d..3bc1f8b 100644 if (bfqd->low_latency && (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive)) bfqq->last_wr_start_finish = jiffies; -@@ -1106,6 +1475,9 @@ static void bfq_remove_request(struct request *rq) +@@ -1074,22 +1488,32 @@ static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, + if (!bic) + return NULL; + +- bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio)); ++ bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf)); + if (bfqq) + return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); + + return NULL; + } + +-static void bfq_activate_request(struct request_queue *q, struct request *rq) ++static sector_t get_sdist(sector_t last_pos, struct request *rq) + { +- struct bfq_data *bfqd = q->elevator->elevator_data; +- +- bfqd->rq_in_driver++; +- bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); +- bfq_log(bfqd, "activate_request: new bfqd->last_position %llu", +- (unsigned long long) bfqd->last_position); +-} ++ sector_t sdist = 0; ++ ++ if (last_pos) { ++ if (last_pos < blk_rq_pos(rq)) ++ sdist = blk_rq_pos(rq) - last_pos; ++ else ++ sdist = last_pos - blk_rq_pos(rq); ++ } ++ ++ return sdist; ++} ++ ++static void bfq_activate_request(struct request_queue *q, struct request *rq) ++{ ++ struct bfq_data *bfqd = q->elevator->elevator_data; ++ bfqd->rq_in_driver++; ++} + + static void bfq_deactivate_request(struct request_queue *q, struct request *rq) + { +@@ -1105,6 +1529,9 @@ static void bfq_remove_request(struct request *rq) struct bfq_data *bfqd = bfqq->bfqd; const int sync = rq_is_sync(rq); @@ -2094,16 +2906,18 @@ index d1f648d..3bc1f8b 100644 if (bfqq->next_rq == rq) { bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, rq); bfq_updated_next_req(bfqd, bfqq); -@@ -1119,8 +1491,25 @@ static void bfq_remove_request(struct request *rq) +@@ -1118,8 +1545,26 @@ static void bfq_remove_request(struct request *rq) elv_rb_del(&bfqq->sort_list, rq); if (RB_EMPTY_ROOT(&bfqq->sort_list)) { - if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) +- bfq_del_bfqq_busy(bfqd, bfqq, 1); ++ bfqq->next_rq = NULL; ++ + BUG_ON(bfqq->entity.budget < 0); + + if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { - bfq_del_bfqq_busy(bfqd, bfqq, 1); -+ ++ bfq_del_bfqq_busy(bfqd, bfqq, false); + /* bfqq emptied. In normal operation, when + * bfqq is empty, bfqq->entity.service and + * bfqq->entity.budget must contain, @@ -2121,7 +2935,7 @@ index d1f648d..3bc1f8b 100644 /* * Remove queue from request-position tree as it is empty. */ -@@ -1134,9 +1523,7 @@ static void bfq_remove_request(struct request *rq) +@@ -1133,9 +1578,7 @@ static void bfq_remove_request(struct request *rq) BUG_ON(bfqq->meta_pending == 0); bfqq->meta_pending--; } @@ -2131,7 +2945,34 @@ index d1f648d..3bc1f8b 100644 } static int bfq_merge(struct request_queue *q, struct request **req, -@@ -1221,21 +1608,25 @@ static void bfq_merged_requests(struct request_queue *q, struct request *rq, +@@ -1145,7 +1588,7 @@ static int bfq_merge(struct request_queue *q, struct request **req, + struct request *__rq; + + __rq = bfq_find_rq_fmerge(bfqd, bio); +- if (__rq && elv_rq_merge_ok(__rq, bio)) { ++ if (__rq && elv_bio_merge_ok(__rq, bio)) { + *req = __rq; + return ELEVATOR_FRONT_MERGE; + } +@@ -1190,7 +1633,7 @@ static void bfq_merged_request(struct request_queue *q, struct request *req, + static void bfq_bio_merged(struct request_queue *q, struct request *req, + struct bio *bio) + { +- bfqg_stats_update_io_merged(bfqq_group(RQ_BFQQ(req)), bio->bi_rw); ++ bfqg_stats_update_io_merged(bfqq_group(RQ_BFQQ(req)), bio->bi_opf); + } + #endif + +@@ -1210,7 +1653,7 @@ static void bfq_merged_requests(struct request_queue *q, struct request *rq, + */ + if (bfqq == next_bfqq && + !list_empty(&rq->queuelist) && !list_empty(&next->queuelist) && +- time_before(next->fifo_time, rq->fifo_time)) { ++ next->fifo_time < rq->fifo_time) { + list_del_init(&rq->queuelist); + list_replace_init(&next->queuelist, &rq->queuelist); + rq->fifo_time = next->fifo_time; +@@ -1220,21 +1663,30 @@ static void bfq_merged_requests(struct request_queue *q, struct request *rq, bfqq->next_rq = rq; bfq_remove_request(next); @@ -2150,17 +2991,22 @@ index d1f648d..3bc1f8b 100644 bfqq->wr_coeff = 1; bfqq->wr_cur_max_time = 0; - /* Trigger a weight change on the next activation of the queue */ ++ bfqq->last_wr_start_finish = jiffies; + /* + * Trigger a weight change on the next invocation of + * __bfq_entity_update_weight_prio. + */ bfqq->entity.prio_changed = 1; ++ bfq_log_bfqq(bfqq->bfqd, bfqq, ++ "end_wr: wrais ending at %lu, rais_max_time %u", ++ bfqq->last_wr_start_finish, ++ jiffies_to_msecs(bfqq->wr_cur_max_time)); + bfq_log_bfqq(bfqq->bfqd, bfqq, "end_wr: wr_busy %d", + bfqq->bfqd->wr_busy_queues); } static void bfq_end_wr_async_queues(struct bfq_data *bfqd, -@@ -1278,7 +1669,7 @@ static int bfq_rq_close_to_sector(void *io_struct, bool request, +@@ -1277,7 +1729,7 @@ static int bfq_rq_close_to_sector(void *io_struct, bool request, sector_t sector) { return abs(bfq_io_struct_pos(io_struct, request) - sector) <= @@ -2169,7 +3015,7 @@ index d1f648d..3bc1f8b 100644 } static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd, -@@ -1400,7 +1791,7 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) +@@ -1399,7 +1851,7 @@ bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) * throughput. */ bfqq->new_bfqq = new_bfqq; @@ -2178,7 +3024,7 @@ index d1f648d..3bc1f8b 100644 return new_bfqq; } -@@ -1431,9 +1822,23 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, +@@ -1430,9 +1882,23 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, } /* @@ -2191,7 +3037,7 @@ index d1f648d..3bc1f8b 100644 + * positives. In case bfqq is weight-raised, such false positives + * would evidently degrade latency guarantees for bfqq. + */ -+bool wr_from_too_long(struct bfq_queue *bfqq) ++static bool wr_from_too_long(struct bfq_queue *bfqq) +{ + return bfqq->wr_coeff > 1 && + time_is_before_jiffies(bfqq->last_wr_start_finish + @@ -2205,7 +3051,7 @@ index d1f648d..3bc1f8b 100644 * structure otherwise. * * The OOM queue is not allowed to participate to cooperation: in fact, since -@@ -1442,6 +1847,18 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, +@@ -1441,6 +1907,18 @@ static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, * handle merging with the OOM queue would be quite complex and expensive * to maintain. Besides, in such a critical condition as an out of memory, * the benefits of queue merging may be little relevant, or even negligible. @@ -2224,7 +3070,7 @@ index d1f648d..3bc1f8b 100644 */ static struct bfq_queue * bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, -@@ -1451,16 +1868,32 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -1450,16 +1928,32 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (bfqq->new_bfqq) return bfqq->new_bfqq; @@ -2260,7 +3106,7 @@ index d1f648d..3bc1f8b 100644 unlikely(in_service_bfqq == &bfqd->oom_bfqq)) goto check_scheduled; -@@ -1482,7 +1915,15 @@ check_scheduled: +@@ -1481,7 +1975,15 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, BUG_ON(new_bfqq && bfqq->entity.parent != new_bfqq->entity.parent); @@ -2277,9 +3123,19 @@ index d1f648d..3bc1f8b 100644 bfq_may_be_close_cooperator(bfqq, new_bfqq)) return bfq_setup_merge(bfqq, new_bfqq); -@@ -1498,46 +1939,11 @@ static void bfq_bfqq_save_state(struct bfq_queue *bfqq) +@@ -1490,53 +1992,25 @@ bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, + + static void bfq_bfqq_save_state(struct bfq_queue *bfqq) + { ++ struct bfq_io_cq *bic = bfqq->bic; ++ + /* + * If !bfqq->bic, the queue is already shared or its requests + * have already been redirected to a shared queue; both idle window + * and weight raising state have already been saved. Do nothing. */ - if (!bfqq->bic) +- if (!bfqq->bic) ++ if (!bic) return; - if (bfqq->bic->wr_time_left) - /* @@ -2315,17 +3171,26 @@ index d1f648d..3bc1f8b 100644 - bfq_bfqq_end_wr(bfqq); - } else - bfqq->bic->wr_time_left = 0; -+ - bfqq->bic->saved_idle_window = bfq_bfqq_idle_window(bfqq); - bfqq->bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); - bfqq->bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); - bfqq->bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); +- bfqq->bic->saved_idle_window = bfq_bfqq_idle_window(bfqq); +- bfqq->bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); +- bfqq->bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); +- bfqq->bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); - bfqq->bic->cooperations++; - bfqq->bic->failed_cooperations = 0; ++ ++ bic->saved_idle_window = bfq_bfqq_idle_window(bfqq); ++ bic->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); ++ bic->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); ++ bic->was_in_burst_list = !hlist_unhashed(&bfqq->burst_list_node); ++ bic->saved_wr_coeff = bfqq->wr_coeff; ++ bic->saved_wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt; ++ bic->saved_last_wr_start_finish = bfqq->last_wr_start_finish; ++ bic->saved_wr_cur_max_time = bfqq->wr_cur_max_time; ++ BUG_ON(time_is_after_jiffies(bfqq->last_wr_start_finish)); } static void bfq_get_bic_reference(struct bfq_queue *bfqq) -@@ -1562,6 +1968,40 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, +@@ -1561,6 +2035,40 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, if (bfq_bfqq_IO_bound(bfqq)) bfq_mark_bfqq_IO_bound(new_bfqq); bfq_clear_bfqq_IO_bound(bfqq); @@ -2343,12 +3208,12 @@ index d1f648d..3bc1f8b 100644 + new_bfqq->wr_coeff = bfqq->wr_coeff; + new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time; + new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish; ++ new_bfqq->wr_start_at_switch_to_srt = bfqq->wr_start_at_switch_to_srt; + if (bfq_bfqq_busy(new_bfqq)) -+ bfqd->wr_busy_queues++; ++ bfqd->wr_busy_queues++; + new_bfqq->entity.prio_changed = 1; + bfq_log_bfqq(bfqd, new_bfqq, -+ "wr starting after merge with %d, " -+ "rais_max_time %u", ++ "wr start after merge with %d, rais_max_time %u", + bfqq->pid, + jiffies_to_msecs(bfqq->wr_cur_max_time)); + } @@ -2366,7 +3231,7 @@ index d1f648d..3bc1f8b 100644 /* * Grab a reference to the bic, to prevent it from being destroyed * before being possibly touched by a bfq_split_bfqq(). -@@ -1588,18 +2028,6 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, +@@ -1587,30 +2095,19 @@ bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, bfq_put_queue(bfqq); } @@ -2382,10 +3247,39 @@ index d1f648d..3bc1f8b 100644 - } -} - - static int bfq_allow_merge(struct request_queue *q, struct request *rq, - struct bio *bio) +-static int bfq_allow_merge(struct request_queue *q, struct request *rq, +- struct bio *bio) ++static int bfq_allow_bio_merge(struct request_queue *q, struct request *rq, ++ struct bio *bio) { -@@ -1637,30 +2065,86 @@ static int bfq_allow_merge(struct request_queue *q, struct request *rq, + struct bfq_data *bfqd = q->elevator->elevator_data; ++ bool is_sync = op_is_sync(bio->bi_opf); + struct bfq_io_cq *bic; + struct bfq_queue *bfqq, *new_bfqq; + + /* + * Disallow merge of a sync bio into an async request. + */ +- if (bfq_bio_sync(bio) && !rq_is_sync(rq)) +- return 0; ++ if (is_sync && !rq_is_sync(rq)) ++ return false; + + /* + * Lookup the bfqq that this bio will be queued with. Allow +@@ -1619,9 +2116,9 @@ static int bfq_allow_merge(struct request_queue *q, struct request *rq, + */ + bic = bfq_bic_lookup(bfqd, current->io_context); + if (!bic) +- return 0; ++ return false; + +- bfqq = bic_to_bfqq(bic, bfq_bio_sync(bio)); ++ bfqq = bic_to_bfqq(bic, is_sync); + /* + * We take advantage of this function to perform an early merge + * of the queues of possible cooperating processes. +@@ -1636,30 +2133,111 @@ static int bfq_allow_merge(struct request_queue *q, struct request *rq, * to decide whether bio and rq can be merged. */ bfqq = new_bfqq; @@ -2397,6 +3291,12 @@ index d1f648d..3bc1f8b 100644 return bfqq == RQ_BFQQ(rq); } ++static int bfq_allow_rq_merge(struct request_queue *q, struct request *rq, ++ struct request *next) ++{ ++ return RQ_BFQQ(rq) == RQ_BFQQ(next); ++} ++ +/* + * Set the maximum time for the in-service queue to consume its + * budget. This prevents seeky processes from lowering the throughput. @@ -2407,6 +3307,7 @@ index d1f648d..3bc1f8b 100644 + struct bfq_queue *bfqq) +{ + unsigned int timeout_coeff; ++ + if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) + timeout_coeff = 1; + else @@ -2437,9 +3338,10 @@ index d1f648d..3bc1f8b 100644 + BUG_ON(bfqq == bfqd->in_service_queue); + BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); + -+ if (bfqq->wr_coeff > 1 && ++ if (time_is_before_jiffies(bfqq->last_wr_start_finish) && ++ bfqq->wr_coeff > 1 && + bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && -+ time_is_before_jiffies(bfqq->budget_timeout)) { ++ time_is_before_jiffies(bfqq->budget_timeout)) { + /* + * For soft real-time queues, move the start + * of the weight-raising period forward by the @@ -2464,8 +3366,25 @@ index d1f648d..3bc1f8b 100644 + * not only expires, but also remains with no + * request. + */ -+ bfqq->last_wr_start_finish += jiffies - -+ bfqq->budget_timeout; ++ if (time_after(bfqq->budget_timeout, ++ bfqq->last_wr_start_finish)) ++ bfqq->last_wr_start_finish += ++ jiffies - bfqq->budget_timeout; ++ else ++ bfqq->last_wr_start_finish = jiffies; ++ ++ if (time_is_after_jiffies(bfqq->last_wr_start_finish)) { ++ pr_crit( ++ "BFQ WARNING:last %lu budget %lu jiffies %lu", ++ bfqq->last_wr_start_finish, ++ bfqq->budget_timeout, ++ jiffies); ++ pr_crit("diff %lu", jiffies - ++ max_t(unsigned long, ++ bfqq->last_wr_start_finish, ++ bfqq->budget_timeout)); ++ bfqq->last_wr_start_finish = jiffies; ++ } + } + + bfq_set_budget_timeout(bfqd, bfqq); @@ -2478,7 +3397,7 @@ index d1f648d..3bc1f8b 100644 bfqd->in_service_queue = bfqq; } -@@ -1676,31 +2160,6 @@ static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) +@@ -1675,36 +2253,11 @@ static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) return bfqq; } @@ -2510,58 +3429,19 @@ index d1f648d..3bc1f8b 100644 static void bfq_arm_slice_timer(struct bfq_data *bfqd) { struct bfq_queue *bfqq = bfqd->in_service_queue; -@@ -1725,62 +2184,34 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd) - * being too ill-treated, grant them a small fraction of the - * assigned budget before reducing the waiting time to - * BFQ_MIN_TT. This happened to help reduce latency. -- */ -- sl = bfqd->bfq_slice_idle; -- /* -- * Unless the queue is being weight-raised or the scenario is + struct bfq_io_cq *bic; +- unsigned long sl; ++ u32 sl; + + BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); + +@@ -1728,119 +2281,366 @@ static void bfq_arm_slice_timer(struct bfq_data *bfqd) + sl = bfqd->bfq_slice_idle; + /* + * Unless the queue is being weight-raised or the scenario is - * asymmetric, grant only minimum idle time if the queue either - * has been seeky for long enough or has already proved to be - * constantly seeky. -- */ -- if (bfq_sample_valid(bfqq->seek_samples) && -- ((BFQQ_SEEKY(bfqq) && bfqq->entity.service > -- bfq_max_budget(bfqq->bfqd) / 8) || -- bfq_bfqq_constantly_seeky(bfqq)) && bfqq->wr_coeff == 1 && -- bfq_symmetric_scenario(bfqd)) -- sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT)); -- else if (bfqq->wr_coeff > 1) -- sl = sl * 3; -- bfqd->last_idling_start = ktime_get(); -- mod_timer(&bfqd->idle_slice_timer, jiffies + sl); --#ifdef CONFIG_BFQ_GROUP_IOSCHED -- bfqg_stats_set_start_idle_time(bfqq_group(bfqq)); --#endif -- bfq_log(bfqd, "arm idle: %u/%u ms", -- jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle)); --} -- --/* -- * Set the maximum time for the in-service queue to consume its -- * budget. This prevents seeky processes from lowering the disk -- * throughput (always guaranteed with a time slice scheme as in CFQ). -- */ --static void bfq_set_budget_timeout(struct bfq_data *bfqd) --{ -- struct bfq_queue *bfqq = bfqd->in_service_queue; -- unsigned int timeout_coeff; -- if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) -- timeout_coeff = 1; -- else -- timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; -- -- bfqd->last_budget_start = ktime_get(); -- -- bfq_clear_bfqq_budget_new(bfqq); -- bfqq->budget_timeout = jiffies + -- bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff; -+ */ -+ sl = bfqd->bfq_slice_idle; -+ /* -+ * Unless the queue is being weight-raised or the scenario is + * asymmetric, grant only minimum idle time if the queue + * is seeky. A long idling is preserved for a weight-raised + * queue, or, more in general, in an asymemtric scenario, @@ -2569,69 +3449,432 @@ index d1f648d..3bc1f8b 100644 + * its reserved share of the throughput (in particular, it is + * needed if the queue has a higher weight than some other + * queue). -+ */ + */ +- if (bfq_sample_valid(bfqq->seek_samples) && +- ((BFQQ_SEEKY(bfqq) && bfqq->entity.service > +- bfq_max_budget(bfqq->bfqd) / 8) || +- bfq_bfqq_constantly_seeky(bfqq)) && bfqq->wr_coeff == 1 && + if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && -+ bfq_symmetric_scenario(bfqd)) -+ sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT)); + bfq_symmetric_scenario(bfqd)) +- sl = min(sl, msecs_to_jiffies(BFQ_MIN_TT)); +- else if (bfqq->wr_coeff > 1) +- sl = sl * 3; ++ sl = min_t(u32, sl, BFQ_MIN_TT); ++ + bfqd->last_idling_start = ktime_get(); +- mod_timer(&bfqd->idle_slice_timer, jiffies + sl); +-#ifdef CONFIG_BFQ_GROUP_IOSCHED ++ hrtimer_start(&bfqd->idle_slice_timer, ns_to_ktime(sl), ++ HRTIMER_MODE_REL); + bfqg_stats_set_start_idle_time(bfqq_group(bfqq)); +-#endif +- bfq_log(bfqd, "arm idle: %u/%u ms", +- jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle)); ++ bfq_log(bfqd, "arm idle: %ld/%ld ms", ++ sl / NSEC_PER_MSEC, bfqd->bfq_slice_idle / NSEC_PER_MSEC); + } + + /* +- * Set the maximum time for the in-service queue to consume its +- * budget. This prevents seeky processes from lowering the disk +- * throughput (always guaranteed with a time slice scheme as in CFQ). ++ * In autotuning mode, max_budget is dynamically recomputed as the ++ * amount of sectors transferred in timeout at the estimated peak ++ * rate. This enables BFQ to utilize a full timeslice with a full ++ * budget, even if the in-service queue is served at peak rate. And ++ * this maximises throughput with sequential workloads. + */ +-static void bfq_set_budget_timeout(struct bfq_data *bfqd) ++static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd) + { +- struct bfq_queue *bfqq = bfqd->in_service_queue; +- unsigned int timeout_coeff; ++ return (u64)bfqd->peak_rate * USEC_PER_MSEC * ++ jiffies_to_msecs(bfqd->bfq_timeout)>>BFQ_RATE_SHIFT; ++} + +- if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) +- timeout_coeff = 1; +- else +- timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; ++/* ++ * Update parameters related to throughput and responsiveness, as a ++ * function of the estimated peak rate. See comments on ++ * bfq_calc_max_budget(), and on T_slow and T_fast arrays. ++ */ ++static void update_thr_responsiveness_params(struct bfq_data *bfqd) ++{ ++ int dev_type = blk_queue_nonrot(bfqd->queue); ++ ++ if (bfqd->bfq_user_max_budget == 0) { ++ bfqd->bfq_max_budget = ++ bfq_calc_max_budget(bfqd); ++ BUG_ON(bfqd->bfq_max_budget < 0); ++ bfq_log(bfqd, "new max_budget = %d", ++ bfqd->bfq_max_budget); ++ } + +- bfqd->last_budget_start = ktime_get(); ++ if (bfqd->device_speed == BFQ_BFQD_FAST && ++ bfqd->peak_rate < device_speed_thresh[dev_type]) { ++ bfqd->device_speed = BFQ_BFQD_SLOW; ++ bfqd->RT_prod = R_slow[dev_type] * ++ T_slow[dev_type]; ++ } else if (bfqd->device_speed == BFQ_BFQD_SLOW && ++ bfqd->peak_rate > device_speed_thresh[dev_type]) { ++ bfqd->device_speed = BFQ_BFQD_FAST; ++ bfqd->RT_prod = R_fast[dev_type] * ++ T_fast[dev_type]; ++ } + +- bfq_clear_bfqq_budget_new(bfqq); +- bfqq->budget_timeout = jiffies + +- bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * timeout_coeff; ++ bfq_log(bfqd, ++"dev_type %s dev_speed_class = %s (%llu sects/sec), thresh %llu setcs/sec", ++ dev_type == 0 ? "ROT" : "NONROT", ++ bfqd->device_speed == BFQ_BFQD_FAST ? "FAST" : "SLOW", ++ bfqd->device_speed == BFQ_BFQD_FAST ? ++ (USEC_PER_SEC*(u64)R_fast[dev_type])>>BFQ_RATE_SHIFT : ++ (USEC_PER_SEC*(u64)R_slow[dev_type])>>BFQ_RATE_SHIFT, ++ (USEC_PER_SEC*(u64)device_speed_thresh[dev_type])>> ++ BFQ_RATE_SHIFT); ++} - bfq_log_bfqq(bfqd, bfqq, "set budget_timeout %u", - jiffies_to_msecs(bfqd->bfq_timeout[bfq_bfqq_sync(bfqq)] * - timeout_coeff)); -+ bfqd->last_idling_start = ktime_get(); -+ mod_timer(&bfqd->idle_slice_timer, jiffies + sl); -+ bfqg_stats_set_start_idle_time(bfqq_group(bfqq)); -+ bfq_log(bfqd, "arm idle: %u/%u ms", -+ jiffies_to_msecs(sl), jiffies_to_msecs(bfqd->bfq_slice_idle)); ++static void bfq_reset_rate_computation(struct bfq_data *bfqd, struct request *rq) ++{ ++ if (rq != NULL) { /* new rq dispatch now, reset accordingly */ ++ bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns() ; ++ bfqd->peak_rate_samples = 1; ++ bfqd->sequential_samples = 0; ++ bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = ++ blk_rq_sectors(rq); ++ } else /* no new rq dispatched, just reset the number of samples */ ++ bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ ++ ++ bfq_log(bfqd, ++ "reset_rate_computation at end, sample %u/%u tot_sects %llu", ++ bfqd->peak_rate_samples, bfqd->sequential_samples, ++ bfqd->tot_sectors_dispatched); } - /* +-/* - * Move request from internal lists to the request queue dispatch list. -+ * Move request from internal lists to the dispatch list of the request queue - */ - static void bfq_dispatch_insert(struct request_queue *q, struct request *rq) +- */ +-static void bfq_dispatch_insert(struct request_queue *q, struct request *rq) ++static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) { - struct bfq_data *bfqd = q->elevator->elevator_data; - struct bfq_queue *bfqq = RQ_BFQQ(rq); +- struct bfq_queue *bfqq = RQ_BFQQ(rq); ++ u32 rate, weight, divisor; /* -@@ -1794,15 +2225,9 @@ static void bfq_dispatch_insert(struct request_queue *q, struct request *rq) - * incrementing bfqq->dispatched. +- * For consistency, the next instruction should have been executed +- * after removing the request from the queue and dispatching it. +- * We execute instead this instruction before bfq_remove_request() +- * (and hence introduce a temporary inconsistency), for efficiency. +- * In fact, in a forced_dispatch, this prevents two counters related +- * to bfqq->dispatched to risk to be uselessly decremented if bfqq +- * is not in service, and then to be incremented again after +- * incrementing bfqq->dispatched. ++ * For the convergence property to hold (see comments on ++ * bfq_update_peak_rate()) and for the assessment to be ++ * reliable, a minimum number of samples must be present, and ++ * a minimum amount of time must have elapsed. If not so, do ++ * not compute new rate. Just reset parameters, to get ready ++ * for a new evaluation attempt. */ - bfqq->dispatched++; -+ - bfq_remove_request(rq); - elv_dispatch_sort(q, rq); -- +- bfqq->dispatched++; +- bfq_remove_request(rq); +- elv_dispatch_sort(q, rq); ++ if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || ++ bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) { ++ bfq_log(bfqd, ++ "update_rate_reset: only resetting, delta_first %lluus samples %d", ++ bfqd->delta_from_first>>10, bfqd->peak_rate_samples); ++ goto reset_computation; ++ } + - if (bfq_bfqq_sync(bfqq)) - bfqd->sync_flight++; -#ifdef CONFIG_BFQ_GROUP_IOSCHED - bfqg_stats_update_dispatch(bfqq_group(bfqq), blk_rq_bytes(rq), - rq->cmd_flags); -#endif ++ /* ++ * If a new request completion has occurred after last ++ * dispatch, then, to approximate the rate at which requests ++ * have been served by the device, it is more precise to ++ * extend the observation interval to the last completion. ++ */ ++ bfqd->delta_from_first = ++ max_t(u64, bfqd->delta_from_first, ++ bfqd->last_completion - bfqd->first_dispatch); ++ ++ BUG_ON(bfqd->delta_from_first == 0); ++ /* ++ * Rate computed in sects/usec, and not sects/nsec, for ++ * precision issues. ++ */ ++ rate = div64_ul(bfqd->tot_sectors_dispatched<delta_from_first, NSEC_PER_USEC)); ++ ++ bfq_log(bfqd, ++"update_rate_reset: tot_sects %llu delta_first %lluus rate %llu sects/s (%d)", ++ bfqd->tot_sectors_dispatched, bfqd->delta_from_first>>10, ++ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), ++ rate > 20< 20M sectors/sec) ++ */ ++ if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 && ++ rate <= bfqd->peak_rate) || ++ rate > 20<peak_rate_samples, bfqd->sequential_samples, ++ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), ++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); ++ goto reset_computation; ++ } else { ++ bfq_log(bfqd, ++ "update_rate_reset: do update, samples %u/%u rate/peak %llu/%llu", ++ bfqd->peak_rate_samples, bfqd->sequential_samples, ++ ((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT), ++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); ++ } ++ ++ /* ++ * We have to update the peak rate, at last! To this purpose, ++ * we use a low-pass filter. We compute the smoothing constant ++ * of the filter as a function of the 'weight' of the new ++ * measured rate. ++ * ++ * As can be seen in next formulas, we define this weight as a ++ * quantity proportional to how sequential the workload is, ++ * and to how long the observation time interval is. ++ * ++ * The weight runs from 0 to 8. The maximum value of the ++ * weight, 8, yields the minimum value for the smoothing ++ * constant. At this minimum value for the smoothing constant, ++ * the measured rate contributes for half of the next value of ++ * the estimated peak rate. ++ * ++ * So, the first step is to compute the weight as a function ++ * of how sequential the workload is. Note that the weight ++ * cannot reach 9, because bfqd->sequential_samples cannot ++ * become equal to bfqd->peak_rate_samples, which, in its ++ * turn, holds true because bfqd->sequential_samples is not ++ * incremented for the first sample. ++ */ ++ weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples; ++ ++ /* ++ * Second step: further refine the weight as a function of the ++ * duration of the observation interval. ++ */ ++ weight = min_t(u32, 8, ++ div_u64(weight * bfqd->delta_from_first, ++ BFQ_RATE_REF_INTERVAL)); ++ ++ /* ++ * Divisor ranging from 10, for minimum weight, to 2, for ++ * maximum weight. ++ */ ++ divisor = 10 - weight; ++ BUG_ON(divisor == 0); ++ ++ /* ++ * Finally, update peak rate: ++ * ++ * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor ++ */ ++ bfqd->peak_rate *= divisor-1; ++ bfqd->peak_rate /= divisor; ++ rate /= divisor; /* smoothing constant alpha = 1/divisor */ ++ ++ bfq_log(bfqd, ++ "update_rate_reset: divisor %d tmp_peak_rate %llu tmp_rate %u", ++ divisor, ++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT), ++ (u32)((USEC_PER_SEC*(u64)rate)>>BFQ_RATE_SHIFT)); ++ ++ BUG_ON(bfqd->peak_rate == 0); ++ BUG_ON(bfqd->peak_rate > 20<peak_rate += rate; ++ update_thr_responsiveness_params(bfqd); ++ BUG_ON(bfqd->peak_rate > 20<peak_rate_samples == 0) { /* first dispatch */ ++ bfq_log(bfqd, ++ "update_peak_rate: goto reset, samples %d", ++ bfqd->peak_rate_samples) ; ++ bfq_reset_rate_computation(bfqd, rq); ++ goto update_last_values; /* will add one sample */ ++ } - rq = rq_entry_fifo(bfqq->fifo.next); +- if (bfq_bfqq_fifo_expire(bfqq)) +- return NULL; ++ /* ++ * Device idle for very long: the observation interval lasting ++ * up to this dispatch cannot be a valid observation interval ++ * for computing a new peak rate (similarly to the late- ++ * completion event in bfq_completed_request()). Go to ++ * update_rate_and_reset to have the following three steps ++ * taken: ++ * - close the observation interval at the last (previous) ++ * request dispatch or completion ++ * - compute rate, if possible, for that observation interval ++ * - start a new observation interval with this dispatch ++ */ ++ if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && ++ bfqd->rq_in_driver == 0) { ++ bfq_log(bfqd, ++"update_peak_rate: jumping to updating&resetting delta_last %lluus samples %d", ++ (now_ns - bfqd->last_dispatch)>>10, ++ bfqd->peak_rate_samples) ; ++ goto update_rate_and_reset; ++ } + +- bfq_mark_bfqq_fifo_expire(bfqq); ++ /* Update sampling information */ ++ bfqd->peak_rate_samples++; + +- if (list_empty(&bfqq->fifo)) +- return NULL; ++ if ((bfqd->rq_in_driver > 0 || ++ now_ns - bfqd->last_completion < BFQ_MIN_TT) ++ && get_sdist(bfqd->last_position, rq) < BFQQ_SEEK_THR) ++ bfqd->sequential_samples++; + +- rq = rq_entry_fifo(bfqq->fifo.next); ++ bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); - if (time_before(jiffies, rq->fifo_time)) -+ if (time_is_after_jiffies(rq->fifo_time)) - return NULL; +- return NULL; ++ /* Reset max observed rq size every 32 dispatches */ ++ if (likely(bfqd->peak_rate_samples % 32)) ++ bfqd->last_rq_max_size = ++ max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); ++ else ++ bfqd->last_rq_max_size = blk_rq_sectors(rq); - return rq; +- return rq; ++ bfqd->delta_from_first = now_ns - bfqd->first_dispatch; ++ ++ bfq_log(bfqd, ++ "update_peak_rate: added samples %u/%u tot_sects %llu delta_first %lluus", ++ bfqd->peak_rate_samples, bfqd->sequential_samples, ++ bfqd->tot_sectors_dispatched, ++ bfqd->delta_from_first>>10); ++ ++ /* Target observation interval not yet reached, go on sampling */ ++ if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL) ++ goto update_last_values; ++ ++update_rate_and_reset: ++ bfq_update_rate_reset(bfqd, rq); ++update_last_values: ++ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); ++ bfqd->last_dispatch = now_ns; ++ ++ bfq_log(bfqd, ++ "update_peak_rate: delta_first %lluus last_pos %llu peak_rate %llu", ++ (now_ns - bfqd->first_dispatch)>>10, ++ (unsigned long long) bfqd->last_position, ++ ((USEC_PER_SEC*(u64)bfqd->peak_rate)>>BFQ_RATE_SHIFT)); ++ bfq_log(bfqd, ++ "update_peak_rate: samples at end %d", bfqd->peak_rate_samples); } -static int bfq_bfqq_budget_left(struct bfq_queue *bfqq) --{ ++/* ++ * Move request from internal lists to the dispatch list of the request queue ++ */ ++static void bfq_dispatch_insert(struct request_queue *q, struct request *rq) + { - struct bfq_entity *entity = &bfqq->entity; ++ struct bfq_queue *bfqq = RQ_BFQQ(rq); + - return entity->budget - entity->service; --} -- ++ /* ++ * For consistency, the next instruction should have been executed ++ * after removing the request from the queue and dispatching it. ++ * We execute instead this instruction before bfq_remove_request() ++ * (and hence introduce a temporary inconsistency), for efficiency. ++ * In fact, in a forced_dispatch, this prevents two counters related ++ * to bfqq->dispatched to risk to be uselessly decremented if bfqq ++ * is not in service, and then to be incremented again after ++ * incrementing bfqq->dispatched. ++ */ ++ bfqq->dispatched++; ++ bfq_update_peak_rate(q->elevator->elevator_data, rq); ++ ++ bfq_remove_request(rq); ++ elv_dispatch_sort(q, rq); + } + static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) { BUG_ON(bfqq != bfqd->in_service_queue); -@@ -1850,12 +2269,15 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) + +- __bfq_bfqd_reset_in_service(bfqd); +- + /* + * If this bfqq is shared between multiple processes, check + * to make sure that those processes are still issuing I/Os +@@ -1851,20 +2651,30 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_mark_bfqq_split_coop(bfqq); if (RB_EMPTY_ROOT(&bfqq->sort_list)) { @@ -2641,6 +3884,7 @@ index d1f648d..3bc1f8b 100644 - * the weight-raising mechanism. - */ - bfqq->budget_timeout = jiffies; +- bfq_del_bfqq_busy(bfqd, bfqq, 1); + if (bfqq->dispatched == 0) + /* + * Overloading budget_timeout field to store @@ -2650,10 +3894,26 @@ index d1f648d..3bc1f8b 100644 + */ + bfqq->budget_timeout = jiffies; + - bfq_del_bfqq_busy(bfqd, bfqq, 1); ++ bfq_del_bfqq_busy(bfqd, bfqq, true); } else { - bfq_activate_bfqq(bfqd, bfqq); -@@ -1882,10 +2304,19 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, +- bfq_activate_bfqq(bfqd, bfqq); ++ bfq_requeue_bfqq(bfqd, bfqq); + /* + * Resort priority tree of potential close cooperators. + */ + bfq_pos_tree_add_move(bfqd, bfqq); + } ++ ++ /* ++ * All in-service entities must have been properly deactivated ++ * or requeued before executing the next function, which ++ * resets all in-service entites as no more in service. ++ */ ++ __bfq_bfqd_reset_in_service(bfqd); + } + + /** +@@ -1883,10 +2693,19 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, struct request *next_rq; int budget, min_budget; @@ -2675,7 +3935,7 @@ index d1f648d..3bc1f8b 100644 bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d", bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); -@@ -1894,7 +2325,7 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, +@@ -1895,7 +2714,7 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d", bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); @@ -2684,7 +3944,7 @@ index d1f648d..3bc1f8b 100644 switch (reason) { /* * Caveat: in all the following cases we trade latency -@@ -1936,14 +2367,10 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, +@@ -1937,14 +2756,10 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, break; case BFQ_BFQQ_BUDGET_TIMEOUT: /* @@ -2703,7 +3963,7 @@ index d1f648d..3bc1f8b 100644 */ budget = min(budget * 2, bfqd->bfq_max_budget); break; -@@ -1960,17 +2387,49 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, +@@ -1961,17 +2776,49 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, budget = min(budget * 4, bfqd->bfq_max_budget); break; case BFQ_BFQQ_NO_MORE_REQUESTS: @@ -2760,7 +4020,7 @@ index d1f648d..3bc1f8b 100644 */ budget = bfqd->bfq_max_budget; -@@ -1981,65 +2440,105 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, +@@ -1982,160 +2829,120 @@ static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); /* @@ -2795,39 +4055,34 @@ index d1f648d..3bc1f8b 100644 } -static unsigned long bfq_calc_max_budget(u64 peak_rate, u64 timeout) -+static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd) - { +-{ - unsigned long max_budget; - - /* - * The max_budget calculated when autotuning is equal to the +- /* +- * The max_budget calculated when autotuning is equal to the - * amount of sectors transfered in timeout_sync at the -+ * amount of sectors transfered in timeout at the - * estimated peak rate. - */ +- * estimated peak rate. +- */ - max_budget = (unsigned long)(peak_rate * 1000 * - timeout >> BFQ_RATE_SHIFT); - - return max_budget; -+ return bfqd->peak_rate * 1000 * jiffies_to_msecs(bfqd->bfq_timeout) >> -+ BFQ_RATE_SHIFT; - } - +-} +- /* - * In addition to updating the peak rate, checks whether the process - * is "slow", and returns 1 if so. This slow flag is used, in addition - * to the budget timeout, to reduce the amount of service provided to - * seeky processes, and hence reduce their chances to lower the - * throughput. See the code for more details. -+ * Update the read peak rate (quantity used for auto-tuning) as a -+ * function of the rate at which bfqq has been served, and check -+ * whether the process associated with bfqq is "slow". Return true if -+ * the process is slow. The slow flag is used, in addition to the -+ * budget timeout, to reduce the amount of service provided to seeky -+ * processes, and hence reduce their chances to lower the -+ * throughput. More details in the body of the function. ++ * Return true if the process associated with bfqq is "slow". The slow ++ * flag is used, in addition to the budget timeout, to reduce the ++ * amount of service provided to seeky processes, and thus reduce ++ * their chances to lower the throughput. More details in the comments ++ * on the function bfq_bfqq_expire(). + * -+ * An important observation is in order: with devices with internal ++ * An important observation is in order: as discussed in the comments ++ * on the function bfq_update_peak_rate(), with devices with internal + * queues, it is hard if ever possible to know when and for how long + * an I/O request is processed by the device (apart from the trivial + * I/O pattern where a new request is dispatched only after the @@ -2835,29 +4090,32 @@ index d1f648d..3bc1f8b 100644 + * the real rate at which the I/O requests of each bfq_queue are + * served. In fact, for an I/O scheduler like BFQ, serving a + * bfq_queue means just dispatching its requests during its service -+ * slot, i.e., until the budget of the queue is exhausted, or the -+ * queue remains idle, or, finally, a timeout fires. But, during the -+ * service slot of a bfq_queue, the device may be still processing -+ * requests of bfq_queues served in previous service slots. On the -+ * opposite end, the requests of the in-service bfq_queue may be -+ * completed after the service slot of the queue finishes. Anyway, -+ * unless more sophisticated solutions are used (where possible), the -+ * sum of the sizes of the requests dispatched during the service slot -+ * of a bfq_queue is probably the only approximation available for -+ * the service received by the bfq_queue during its service slot. And, -+ * as written above, this sum is the quantity used in this function to -+ * evaluate the peak rate. ++ * slot (i.e., until the budget of the queue is exhausted, or the ++ * queue remains idle, or, finally, a timeout fires). But, during the ++ * service slot of a bfq_queue, around 100 ms at most, the device may ++ * be even still processing requests of bfq_queues served in previous ++ * service slots. On the opposite end, the requests of the in-service ++ * bfq_queue may be completed after the service slot of the queue ++ * finishes. ++ * ++ * Anyway, unless more sophisticated solutions are used ++ * (where possible), the sum of the sizes of the requests dispatched ++ * during the service slot of a bfq_queue is probably the only ++ * approximation available for the service received by the bfq_queue ++ * during its service slot. And this sum is the quantity used in this ++ * function to evaluate the I/O speed of a process. */ - static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq, +-static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq, - bool compensate, enum bfqq_expiration reason) ++static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, + bool compensate, enum bfqq_expiration reason, + unsigned long *delta_ms) { - u64 bw, usecs, expected, timeout; - ktime_t delta; -+ u64 bw, bwdiv10, delta_usecs, delta_ms_tmp; +- int update = 0; + ktime_t delta_ktime; - int update = 0; ++ u32 delta_usecs; + bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */ - if (!bfq_bfqq_sync(bfqq) || bfq_bfqq_budget_new(bfqq)) @@ -2871,133 +4129,110 @@ index d1f648d..3bc1f8b 100644 - delta = ktime_get(); - delta = ktime_sub(delta, bfqd->last_budget_start); - usecs = ktime_to_us(delta); +- +- /* Don't trust short/unrealistic values. */ +- if (usecs < 100 || usecs >= LONG_MAX) +- return false; +- +- /* +- * Calculate the bandwidth for the last slice. We use a 64 bit +- * value to store the peak rate, in sectors per usec in fixed +- * point math. We do so to have enough precision in the estimate +- * and to avoid overflows. +- */ +- bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT; +- do_div(bw, (unsigned long)usecs); + delta_ktime = ktime_get(); + delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); + delta_usecs = ktime_to_us(delta_ktime); - - /* Don't trust short/unrealistic values. */ -- if (usecs < 100 || usecs >= LONG_MAX) -- return false; ++ ++ /* don't trust short/unrealistic values. */ + if (delta_usecs < 1000 || delta_usecs >= LONG_MAX) { + if (blk_queue_nonrot(bfqd->queue)) -+ *delta_ms = BFQ_MIN_TT; /* give same worst-case -+ guarantees as -+ idling for seeky -+ */ -+ else /* Charge at least one seek */ -+ *delta_ms = jiffies_to_msecs(bfq_slice_idle); ++ /* ++ * give same worst-case guarantees as idling ++ * for seeky ++ */ ++ *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC; ++ else /* charge at least one seek */ ++ *delta_ms = bfq_slice_idle / NSEC_PER_MSEC; ++ ++ bfq_log(bfqd, "bfq_bfqq_is_slow: unrealistic %u", delta_usecs); ++ + return slow; + } -+ -+ delta_ms_tmp = delta_usecs; -+ do_div(delta_ms_tmp, 1000); -+ *delta_ms = delta_ms_tmp; - /* - * Calculate the bandwidth for the last slice. We use a 64 bit -@@ -2048,32 +2547,51 @@ static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq, - * and to avoid overflows. - */ - bw = (u64)bfqq->entity.service << BFQ_RATE_SHIFT; -- do_div(bw, (unsigned long)usecs); -- - timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]); -+ do_div(bw, (unsigned long)delta_usecs); ++ *delta_ms = delta_usecs / USEC_PER_MSEC; -+ bfq_log(bfqd, "measured bw = %llu sects/sec", -+ (1000000*bw)>>BFQ_RATE_SHIFT); /* - * Use only long (> 20ms) intervals to filter out spikes for - * the peak rate estimation. +- * Use only long (> 20ms) intervals to filter out spikes for +- * the peak rate estimation. ++ * Use only long (> 20ms) intervals to filter out excessive ++ * spikes in service rate estimation. */ - if (usecs > 20000) { -+ if (delta_usecs > 20000) { -+ bool fully_sequential = bfqq->seek_history == 0; -+ /* -+ * Soft real-time queues are not good candidates for -+ * evaluating bw, as they are likely to be slow even -+ * if sequential. -+ */ -+ bool non_soft_rt = bfqq->wr_coeff == 1 || -+ bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time; -+ bool consumed_large_budget = -+ reason == BFQ_BFQQ_BUDGET_EXHAUSTED && -+ bfqq->entity.budget >= bfqd->bfq_max_budget * 2 / 3; -+ bool served_for_long_time = -+ reason == BFQ_BFQQ_BUDGET_TIMEOUT || -+ consumed_large_budget; -+ -+ BUG_ON(bfqq->seek_history == 0 && -+ hweight32(bfqq->seek_history) != 0); -+ - if (bw > bfqd->peak_rate || +- if (bw > bfqd->peak_rate || - (!BFQQ_SEEKY(bfqq) && - reason == BFQ_BFQQ_BUDGET_TIMEOUT)) { - bfq_log(bfqd, "measured bw =%llu", bw); -+ (bfq_bfqq_sync(bfqq) && fully_sequential && non_soft_rt && -+ served_for_long_time)) { - /* - * To smooth oscillations use a low-pass filter with +- /* +- * To smooth oscillations use a low-pass filter with - * alpha=7/8, i.e., - * new_rate = (7/8) * old_rate + (1/8) * bw -+ * alpha=9/10, i.e., -+ * new_rate = (9/10) * old_rate + (1/10) * bw - */ +- */ - do_div(bw, 8); - if (bw == 0) - return 0; - bfqd->peak_rate *= 7; - do_div(bfqd->peak_rate, 8); - bfqd->peak_rate += bw; -+ bwdiv10 = bw; -+ do_div(bwdiv10, 10); -+ if (bwdiv10 == 0) -+ return false; /* bw too low to be used */ -+ bfqd->peak_rate *= 9; -+ do_div(bfqd->peak_rate, 10); -+ bfqd->peak_rate += bwdiv10; - update = 1; +- update = 1; - bfq_log(bfqd, "new peak_rate=%llu", bfqd->peak_rate); -+ bfq_log(bfqd, "new peak_rate = %llu sects/sec", -+ (1000000*bfqd->peak_rate)>>BFQ_RATE_SHIFT); - } - - update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1; -@@ -2086,9 +2604,8 @@ static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq, - int dev_type = blk_queue_nonrot(bfqd->queue); - if (bfqd->bfq_user_max_budget == 0) { - bfqd->bfq_max_budget = +- } +- +- update |= bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES - 1; +- +- if (bfqd->peak_rate_samples < BFQ_PEAK_RATE_SAMPLES) +- bfqd->peak_rate_samples++; +- +- if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES && +- update) { +- int dev_type = blk_queue_nonrot(bfqd->queue); +- +- if (bfqd->bfq_user_max_budget == 0) { +- bfqd->bfq_max_budget = - bfq_calc_max_budget(bfqd->peak_rate, - timeout); - bfq_log(bfqd, "new max_budget=%d", -+ bfq_calc_max_budget(bfqd); -+ bfq_log(bfqd, "new max_budget = %d", - bfqd->bfq_max_budget); - } - if (bfqd->device_speed == BFQ_BFQD_FAST && -@@ -2102,38 +2619,35 @@ static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq, - bfqd->RT_prod = R_fast[dev_type] * - T_fast[dev_type]; - } -+ bfq_log(bfqd, "dev_speed_class = %d (%d sects/sec), " -+ "thresh %d setcs/sec", -+ bfqd->device_speed, -+ bfqd->device_speed == BFQ_BFQD_FAST ? -+ (1000000*R_fast[dev_type])>>BFQ_RATE_SHIFT : -+ (1000000*R_slow[dev_type])>>BFQ_RATE_SHIFT, -+ (1000000*device_speed_thresh[dev_type])>> -+ BFQ_RATE_SHIFT); - } +- bfqd->bfq_max_budget); +- } +- if (bfqd->device_speed == BFQ_BFQD_FAST && +- bfqd->peak_rate < device_speed_thresh[dev_type]) { +- bfqd->device_speed = BFQ_BFQD_SLOW; +- bfqd->RT_prod = R_slow[dev_type] * +- T_slow[dev_type]; +- } else if (bfqd->device_speed == BFQ_BFQD_SLOW && +- bfqd->peak_rate > device_speed_thresh[dev_type]) { +- bfqd->device_speed = BFQ_BFQD_FAST; +- bfqd->RT_prod = R_fast[dev_type] * +- T_fast[dev_type]; +- } +- } ++ if (delta_usecs > 20000) { + /* -+ * Caveat: processes doing IO in the slower disk zones -+ * tend to be slow(er) even if not seeky. In this -+ * respect, the estimated peak rate is likely to be an -+ * average over the disk surface. Accordingly, to not -+ * be too harsh with unlucky processes, a process is -+ * deemed slow only if its bw has been lower than half -+ * of the estimated peak rate. ++ * Caveat for rotational devices: processes doing I/O ++ * in the slower disk zones tend to be slow(er) even ++ * if not seeky. In this respect, the estimated peak ++ * rate is likely to be an average over the disk ++ * surface. Accordingly, to not be too harsh with ++ * unlucky processes, a process is deemed slow only if ++ * its rate has been lower than half of the estimated ++ * peak rate. + */ -+ slow = bw < bfqd->peak_rate / 2; ++ slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; ++ bfq_log(bfqd, "bfq_bfqq_is_slow: relative rate %d/%d", ++ bfqq->entity.service, bfqd->bfq_max_budget); } - /* @@ -3019,12 +4254,7 @@ index d1f648d..3bc1f8b 100644 - * before the budget timeout expiration. - */ - expected = bw * 1000 * timeout >> BFQ_RATE_SHIFT; -+ bfq_log_bfqq(bfqd, bfqq, -+ "update_peak_rate: bw %llu sect/s, peak rate %llu, " -+ "slow %d", -+ (1000000*bw)>>BFQ_RATE_SHIFT, -+ (1000000*bfqd->peak_rate)>>BFQ_RATE_SHIFT, -+ bw < bfqd->peak_rate / 2); ++ bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d", slow); - /* - * Caveat: processes doing IO in the slower disk zones will @@ -3039,14 +4269,12 @@ index d1f648d..3bc1f8b 100644 } /* -@@ -2191,6 +2705,15 @@ static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -2193,20 +3000,35 @@ static bool bfq_update_peak_rate(struct bfq_data *bfqd, struct bfq_queue *bfqq, static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, struct bfq_queue *bfqq) { + bfq_log_bfqq(bfqd, bfqq, -+ "softrt_next_start: service_blkg %lu " -+ "soft_rate %u sects/sec" -+ "interval %u", ++"softrt_next_start: service_blkg %lu soft_rate %u sects/sec interval %u", + bfqq->service_from_backlogged, + bfqd->bfq_wr_max_softrt_rate, + jiffies_to_msecs(HZ * bfqq->service_from_backlogged / @@ -3055,7 +4283,8 @@ index d1f648d..3bc1f8b 100644 return max(bfqq->last_idle_bklogged + HZ * bfqq->service_from_backlogged / bfqd->bfq_wr_max_softrt_rate, -@@ -2198,13 +2721,21 @@ static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, +- jiffies + bfqq->bfqd->bfq_slice_idle + 4); ++ jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); } /* @@ -3064,25 +4293,25 @@ index d1f648d..3bc1f8b 100644 - * time_is_before_jiffies(). + * Return the farthest future time instant according to jiffies + * macros. -+ */ + */ +-static unsigned long bfq_infinity_from_now(unsigned long now) +static unsigned long bfq_greatest_from_now(void) -+{ + { +- return now + ULONG_MAX / 2; + return jiffies + MAX_JIFFY_OFFSET; +} + +/* + * Return the farthest past time instant according to jiffies + * macros. - */ --static unsigned long bfq_infinity_from_now(unsigned long now) ++ */ +static unsigned long bfq_smallest_from_now(void) - { -- return now + ULONG_MAX / 2; ++{ + return jiffies - MAX_JIFFY_OFFSET; } /** -@@ -2214,28 +2745,24 @@ static unsigned long bfq_infinity_from_now(unsigned long now) +@@ -2216,28 +3038,24 @@ static unsigned long bfq_infinity_from_now(unsigned long now) * @compensate: if true, compensate for the time spent idling. * @reason: the reason causing the expiration. * @@ -3128,33 +4357,23 @@ index d1f648d..3bc1f8b 100644 */ static void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq, -@@ -2243,40 +2770,53 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, +@@ -2245,41 +3063,52 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, enum bfqq_expiration reason) { bool slow; + unsigned long delta = 0; + struct bfq_entity *entity = &bfqq->entity; -+ + BUG_ON(bfqq != bfqd->in_service_queue); /* - * Update disk peak rate for autotuning and check whether the -+ * Update device peak rate for autotuning and check whether the - * process is slow (see bfq_update_peak_rate). - */ -- slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason); -+ slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason, &delta); - - /* -- * As above explained, 'punish' slow (i.e., seeky), timed-out -- * and async queues, to favor sequential sync workloads. -- * -- * Processes doing I/O in the slower disk zones will tend to be -- * slow(er) even if not seeky. Hence, since the estimated peak -- * rate is actually an average over the disk surface, these -- * processes may timeout just for bad luck. To avoid punishing -- * them we do not charge a full budget to a process that -- * succeeded in consuming at least 2/3 of its budget. +- * process is slow (see bfq_update_peak_rate). ++ * Check whether the process is slow (see bfq_bfqq_is_slow). ++ */ ++ slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, reason, &delta); ++ ++ /* + * Increase service_from_backlogged before next statement, + * because the possible next invocation of + * bfq_bfqq_charge_time would likely inflate @@ -3163,17 +4382,22 @@ index d1f648d..3bc1f8b 100644 + * heuristic to correctly compute the bandwidth consumed by + * bfqq. */ -- if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT && -- bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)) -- bfq_bfqq_charge_full_budget(bfqq); +- slow = bfq_update_peak_rate(bfqd, bfqq, compensate, reason); + bfqq->service_from_backlogged += entity->service; -- bfqq->service_from_backlogged += bfqq->entity.service; -+ /* + /* +- * As above explained, 'punish' slow (i.e., seeky), timed-out +- * and async queues, to favor sequential sync workloads. + * As above explained, charge slow (typically seeky) and + * timed-out queues with the time and not the service + * received, to favor sequential workloads. -+ * + * +- * Processes doing I/O in the slower disk zones will tend to be +- * slow(er) even if not seeky. Hence, since the estimated peak +- * rate is actually an average over the disk surface, these +- * processes may timeout just for bad luck. To avoid punishing +- * them we do not charge a full budget to a process that +- * succeeded in consuming at least 2/3 of its budget. + * Processes doing I/O in the slower disk zones will tend to + * be slow(er) even if not seeky. Therefore, since the + * estimated peak rate is actually an average over the disk @@ -3183,7 +4407,12 @@ index d1f648d..3bc1f8b 100644 + * allows BFQ to preserve enough elasticity to still perform + * bandwidth, and not time, distribution with little unlucky + * or quasi-sequential processes. -+ */ + */ +- if (slow || (reason == BFQ_BFQQ_BUDGET_TIMEOUT && +- bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3)) +- bfq_bfqq_charge_full_budget(bfqq); +- +- bfqq->service_from_backlogged += bfqq->entity.service; + if (bfqq->wr_coeff == 1 && + (slow || + (reason == BFQ_BFQQ_BUDGET_TIMEOUT && @@ -3199,12 +4428,12 @@ index d1f648d..3bc1f8b 100644 + BUG_ON(bfqq->entity.budget < bfqq->entity.service); if (reason == BFQ_BFQQ_TOO_IDLE && -- bfqq->entity.service <= 2 * bfqq->entity.budget / 10 ) -+ entity->service <= 2 * entity->budget / 10 ) +- bfqq->entity.service <= 2 * bfqq->entity.budget / 10) ++ entity->service <= 2 * entity->budget / 10) bfq_clear_bfqq_IO_bound(bfqq); if (bfqd->low_latency && bfqq->wr_coeff == 1) -@@ -2285,19 +2825,23 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, +@@ -2288,19 +3117,23 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && RB_EMPTY_ROOT(&bfqq->sort_list)) { /* @@ -3236,7 +4465,7 @@ index d1f648d..3bc1f8b 100644 /* * The application is still waiting for the * completion of one or more requests: -@@ -2314,7 +2858,7 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, +@@ -2317,7 +3150,7 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, * happened to be in the past. */ bfqq->soft_rt_next_start = @@ -3245,7 +4474,7 @@ index d1f648d..3bc1f8b 100644 /* * Schedule an update of soft_rt_next_start to when * the task may be discovered to be isochronous. -@@ -2324,15 +2868,27 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, +@@ -2327,15 +3160,27 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, } bfq_log_bfqq(bfqd, bfqq, @@ -3275,7 +4504,7 @@ index d1f648d..3bc1f8b 100644 } /* -@@ -2342,20 +2898,17 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, +@@ -2345,20 +3190,17 @@ static void bfq_bfqq_expire(struct bfq_data *bfqd, */ static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) { @@ -3304,7 +4533,7 @@ index d1f648d..3bc1f8b 100644 static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) { bfq_log_bfqq(bfqq->bfqd, bfqq, -@@ -2397,10 +2950,12 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) +@@ -2400,10 +3242,12 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) { struct bfq_data *bfqd = bfqq->bfqd; bool idling_boosts_thr, idling_boosts_thr_without_issues, @@ -3318,16 +4547,7 @@ index d1f648d..3bc1f8b 100644 /* * The next variable takes into account the cases where idling * boosts the throughput. -@@ -2422,7 +2977,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) - */ - idling_boosts_thr = !bfqd->hw_tag || - (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) && -- bfq_bfqq_idle_window(bfqq)) ; -+ bfq_bfqq_idle_window(bfqq)); - - /* - * The value of the next variable, -@@ -2463,74 +3018,27 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) +@@ -2466,74 +3310,27 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) bfqd->wr_busy_queues == 0; /* @@ -3421,7 +4641,7 @@ index d1f648d..3bc1f8b 100644 * (i) each of these processes must get the same throughput as * the others; * (ii) all these processes have the same I/O pattern -@@ -2552,26 +3060,53 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) +@@ -2555,26 +3352,53 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) * words, only if sub-condition (i) holds, then idling is * allowed, and the device tends to be prevented from queueing * many requests, possibly of several processes. The reason @@ -3495,7 +4715,7 @@ index d1f648d..3bc1f8b 100644 * * According to the above considerations, the next variable is * true (only) if sub-condition (i) holds. To compute the -@@ -2579,7 +3114,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) +@@ -2582,7 +3406,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) * the function bfq_symmetric_scenario(), but also check * whether bfqq is being weight-raised, because * bfq_symmetric_scenario() does not take into account also @@ -3504,7 +4724,7 @@ index d1f648d..3bc1f8b 100644 * bfq_weights_tree_add()). * * As a side note, it is worth considering that the above -@@ -2601,17 +3136,16 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) +@@ -2604,17 +3428,16 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) * bfqq. Such a case is when bfqq became active in a burst of * queue activations. Queues that became active during a large * burst benefit only from throughput, as discussed in the @@ -3527,13 +4747,15 @@ index d1f648d..3bc1f8b 100644 /* * We have now all the components we need to compute the return -@@ -2621,6 +3155,14 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) +@@ -2624,6 +3447,16 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) * 2) idling either boosts the throughput (without issues), or * is necessary to preserve service guarantees. */ -+ bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d " -+ "wr_busy %d boosts %d IO-bound %d guar %d", -+ bfq_bfqq_sync(bfqq), idling_boosts_thr, ++ bfq_log_bfqq(bfqd, bfqq, "may_idle: sync %d idling_boosts_thr %d", ++ bfq_bfqq_sync(bfqq), idling_boosts_thr); ++ ++ bfq_log_bfqq(bfqd, bfqq, ++ "may_idle: wr_busy %d boosts %d IO-bound %d guar %d", + bfqd->wr_busy_queues, + idling_boosts_thr_without_issues, + bfq_bfqq_IO_bound(bfqq), @@ -3542,7 +4764,7 @@ index d1f648d..3bc1f8b 100644 return bfq_bfqq_sync(bfqq) && (idling_boosts_thr_without_issues || idling_needed_for_service_guarantees); -@@ -2632,7 +3174,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) +@@ -2635,7 +3468,7 @@ static bool bfq_bfqq_may_idle(struct bfq_queue *bfqq) * 1) the queue must remain in service and cannot be expired, and * 2) the device must be idled to wait for the possible arrival of a new * request for the queue. @@ -3551,17 +4773,102 @@ index d1f648d..3bc1f8b 100644 * why performing device idling is the best choice to boost the throughput * and preserve service guarantees when bfq_bfqq_may_idle itself * returns true. -@@ -2698,9 +3240,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) +@@ -2665,18 +3498,33 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) + bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue"); + + if (bfq_may_expire_for_budg_timeout(bfqq) && +- !timer_pending(&bfqd->idle_slice_timer) && ++ !hrtimer_active(&bfqd->idle_slice_timer) && + !bfq_bfqq_must_idle(bfqq)) + goto expire; + ++check_queue: ++ /* ++ * This loop is rarely executed more than once. Even when it ++ * happens, it is much more convenient to re-execute this loop ++ * than to return NULL and trigger a new dispatch to get a ++ * request served. ++ */ + next_rq = bfqq->next_rq; + /* + * If bfqq has requests queued and it has enough budget left to + * serve them, keep the queue, otherwise expire it. + */ + if (next_rq) { ++ BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); ++ + if (bfq_serv_to_charge(next_rq, bfqq) > + bfq_bfqq_budget_left(bfqq)) { ++ /* ++ * Expire the queue for budget exhaustion, ++ * which makes sure that the next budget is ++ * enough to serve the next request, even if ++ * it comes from the fifo expired path. ++ */ + reason = BFQ_BFQQ_BUDGET_EXHAUSTED; + goto expire; + } else { +@@ -2685,7 +3533,8 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) + * not disable disk idling even when a new request + * arrives. + */ +- if (timer_pending(&bfqd->idle_slice_timer)) { ++ if (bfq_bfqq_wait_request(bfqq)) { ++ BUG_ON(!hrtimer_active(&bfqd->idle_slice_timer)); + /* + * If we get here: 1) at least a new request + * has arrived but we have not disabled the +@@ -2700,10 +3549,8 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) + * So we disable idling. */ bfq_clear_bfqq_wait_request(bfqq); - del_timer(&bfqd->idle_slice_timer); +- del_timer(&bfqd->idle_slice_timer); -#ifdef CONFIG_BFQ_GROUP_IOSCHED ++ hrtimer_try_to_cancel(&bfqd->idle_slice_timer); bfqg_stats_update_idle_time(bfqq_group(bfqq)); -#endif } goto keep_queue; } -@@ -2745,14 +3285,11 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -2714,7 +3561,7 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) + * for a new request, or has requests waiting for a completion and + * may idle after their completion, then keep it anyway. + */ +- if (timer_pending(&bfqd->idle_slice_timer) || ++ if (hrtimer_active(&bfqd->idle_slice_timer) || + (bfqq->dispatched != 0 && bfq_bfqq_may_idle(bfqq))) { + bfqq = NULL; + goto keep_queue; +@@ -2725,9 +3572,16 @@ static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) + bfq_bfqq_expire(bfqd, bfqq, false, reason); + new_queue: + bfqq = bfq_set_in_service_queue(bfqd); +- bfq_log(bfqd, "select_queue: new queue %d returned", +- bfqq ? bfqq->pid : 0); ++ if (bfqq) { ++ bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue"); ++ goto check_queue; ++ } + keep_queue: ++ if (bfqq) ++ bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue"); ++ else ++ bfq_log(bfqd, "select_queue: no queue returned"); ++ + return bfqq; + } + +@@ -2736,6 +3590,9 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) + struct bfq_entity *entity = &bfqq->entity; + + if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ ++ BUG_ON(bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && ++ time_is_after_jiffies(bfqq->last_wr_start_finish)); ++ + bfq_log_bfqq(bfqd, bfqq, + "raising period dur %u/%u msec, old coeff %u, w %d(%d)", + jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), +@@ -2749,22 +3606,30 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change"); /* @@ -3574,21 +4881,84 @@ index d1f648d..3bc1f8b 100644 + * time has elapsed from the beginning of this + * weight-raising period, then end weight raising. */ - if (bfq_bfqq_in_large_burst(bfqq) || +- if (bfq_bfqq_in_large_burst(bfqq) || - bfq_bfqq_cooperations(bfqq) >= bfqd->bfq_coop_thresh || - time_is_before_jiffies(bfqq->last_wr_start_finish + - bfqq->wr_cur_max_time)) { - bfqq->last_wr_start_finish = jiffies; -@@ -2811,13 +3348,29 @@ static int bfq_dispatch_request(struct bfq_data *bfqd, - */ - if (!bfqd->rq_in_driver) - bfq_schedule_dispatch(bfqd); -+ BUG_ON(bfqq->entity.budget < bfqq->entity.service); - goto expire; +- time_is_before_jiffies(bfqq->last_wr_start_finish + +- bfqq->wr_cur_max_time)) { +- bfqq->last_wr_start_finish = jiffies; +- bfq_log_bfqq(bfqd, bfqq, +- "wrais ending at %lu, rais_max_time %u", +- bfqq->last_wr_start_finish, +- jiffies_to_msecs(bfqq->wr_cur_max_time)); ++ if (bfq_bfqq_in_large_burst(bfqq)) + bfq_bfqq_end_wr(bfqq); ++ else if (time_is_before_jiffies(bfqq->last_wr_start_finish + ++ bfqq->wr_cur_max_time)) { ++ if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time || ++ time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt + ++ bfq_wr_duration(bfqd))) ++ bfq_bfqq_end_wr(bfqq); ++ else { ++ /* switch back to interactive wr */ ++ bfqq->wr_coeff = bfqd->bfq_wr_coeff; ++ bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); ++ bfqq->last_wr_start_finish = ++ bfqq->wr_start_at_switch_to_srt; ++ BUG_ON(time_is_after_jiffies( ++ bfqq->last_wr_start_finish)); ++ bfqq->entity.prio_changed = 1; ++ bfq_log_bfqq(bfqd, bfqq, ++ "back to interactive wr"); ++ } + } } + /* Update weight both if it must be raised and if it must be lowered */ +@@ -2782,46 +3647,34 @@ static int bfq_dispatch_request(struct bfq_data *bfqd, + struct bfq_queue *bfqq) + { + int dispatched = 0; +- struct request *rq; ++ struct request *rq = bfqq->next_rq; + unsigned long service_to_charge; + BUG_ON(RB_EMPTY_ROOT(&bfqq->sort_list)); +- +- /* Follow expired path, else get first next available. */ +- rq = bfq_check_fifo(bfqq); +- if (!rq) +- rq = bfqq->next_rq; ++ BUG_ON(!rq); + service_to_charge = bfq_serv_to_charge(rq, bfqq); + +- if (service_to_charge > bfq_bfqq_budget_left(bfqq)) { +- /* +- * This may happen if the next rq is chosen in fifo order +- * instead of sector order. The budget is properly +- * dimensioned to be always sufficient to serve the next +- * request only if it is chosen in sector order. The reason +- * is that it would be quite inefficient and little useful +- * to always make sure that the budget is large enough to +- * serve even the possible next rq in fifo order. +- * In fact, requests are seldom served in fifo order. +- * +- * Expire the queue for budget exhaustion, and make sure +- * that the next act_budget is enough to serve the next +- * request, even if it comes from the fifo expired path. +- */ +- bfqq->next_rq = rq; +- /* +- * Since this dispatch is failed, make sure that +- * a new one will be performed +- */ +- if (!bfqd->rq_in_driver) +- bfq_schedule_dispatch(bfqd); +- goto expire; +- } ++ BUG_ON(service_to_charge > bfq_bfqq_budget_left(bfqq)); ++ + BUG_ON(bfqq->entity.budget < bfqq->entity.service); - /* Finally, insert request into driver dispatch list. */ + +- /* Finally, insert request into driver dispatch list. */ bfq_bfqq_served(bfqq, service_to_charge); + + BUG_ON(bfqq->entity.budget < bfqq->entity.service); @@ -3609,7 +4979,7 @@ index d1f648d..3bc1f8b 100644 bfq_update_wr_data(bfqd, bfqq); bfq_log_bfqq(bfqd, bfqq, -@@ -2833,9 +3386,7 @@ static int bfq_dispatch_request(struct bfq_data *bfqd, +@@ -2837,9 +3690,7 @@ static int bfq_dispatch_request(struct bfq_data *bfqd, bfqd->in_service_bic = RQ_BIC(rq); } @@ -3620,7 +4990,7 @@ index d1f648d..3bc1f8b 100644 goto expire; return dispatched; -@@ -2881,8 +3432,8 @@ static int bfq_forced_dispatch(struct bfq_data *bfqd) +@@ -2885,8 +3736,8 @@ static int bfq_forced_dispatch(struct bfq_data *bfqd) st = bfq_entity_service_tree(&bfqq->entity); dispatched += __bfq_forced_dispatch_bfqq(bfqq); @@ -3630,7 +5000,7 @@ index d1f648d..3bc1f8b 100644 bfq_forget_idle(st); } -@@ -2895,9 +3446,9 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) +@@ -2899,37 +3750,37 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) { struct bfq_data *bfqd = q->elevator->elevator_data; struct bfq_queue *bfqq; @@ -3641,7 +5011,25 @@ index d1f648d..3bc1f8b 100644 if (bfqd->busy_queues == 0) return 0; -@@ -2908,21 +3459,7 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) + if (unlikely(force)) + return bfq_forced_dispatch(bfqd); + ++ /* ++ * Force device to serve one request at a time if ++ * strict_guarantees is true. Forcing this service scheme is ++ * currently the ONLY way to guarantee that the request ++ * service order enforced by the scheduler is respected by a ++ * queueing device. Otherwise the device is free even to make ++ * some unlucky request wait for as long as the device ++ * wishes. ++ * ++ * Of course, serving one request at at time may cause loss of ++ * throughput. ++ */ ++ if (bfqd->strict_guarantees && bfqd->rq_in_driver > 0) ++ return 0; ++ + bfqq = bfq_select_queue(bfqd); if (!bfqq) return 0; @@ -3662,9 +5050,13 @@ index d1f648d..3bc1f8b 100644 - return 0; + BUG_ON(bfqq->entity.budget < bfqq->entity.service); - bfq_clear_bfqq_wait_request(bfqq); - BUG_ON(timer_pending(&bfqd->idle_slice_timer)); -@@ -2933,6 +3470,8 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) +- bfq_clear_bfqq_wait_request(bfqq); +- BUG_ON(timer_pending(&bfqd->idle_slice_timer)); ++ BUG_ON(bfq_bfqq_wait_request(bfqq)); + + if (!bfq_dispatch_request(bfqd, bfqq)) + return 0; +@@ -2937,6 +3788,8 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) bfq_log_bfqq(bfqd, bfqq, "dispatched %s request", bfq_bfqq_sync(bfqq) ? "sync" : "async"); @@ -3673,7 +5065,7 @@ index d1f648d..3bc1f8b 100644 return 1; } -@@ -2944,23 +3483,22 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) +@@ -2948,23 +3801,21 @@ static int bfq_dispatch_requests(struct request_queue *q, int force) */ static void bfq_put_queue(struct bfq_queue *bfqq) { @@ -3698,11 +5090,10 @@ index d1f648d..3bc1f8b 100644 BUG_ON(bfqq->entity.tree); BUG_ON(bfq_bfqq_busy(bfqq)); - BUG_ON(bfqd->in_service_queue == bfqq); -+ BUG_ON(bfqq->bfqd->in_service_queue == bfqq); if (bfq_bfqq_sync(bfqq)) /* -@@ -2973,7 +3511,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq) +@@ -2977,7 +3828,7 @@ static void bfq_put_queue(struct bfq_queue *bfqq) */ hlist_del_init(&bfqq->burst_list_node); @@ -3711,7 +5102,7 @@ index d1f648d..3bc1f8b 100644 kmem_cache_free(bfq_pool, bfqq); #ifdef CONFIG_BFQ_GROUP_IOSCHED -@@ -3007,8 +3545,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -3011,8 +3862,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_schedule_dispatch(bfqd); } @@ -3721,10 +5112,12 @@ index d1f648d..3bc1f8b 100644 bfq_put_cooperator(bfqq); -@@ -3019,26 +3556,7 @@ static void bfq_init_icq(struct io_cq *icq) - { - struct bfq_io_cq *bic = icq_to_bic(icq); +@@ -3021,28 +3871,7 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) + static void bfq_init_icq(struct io_cq *icq) + { +- struct bfq_io_cq *bic = icq_to_bic(icq); +- - bic->ttime.last_end_request = jiffies; - /* - * A newly created bic indicates that the process has just @@ -3745,11 +5138,11 @@ index d1f648d..3bc1f8b 100644 - * as needing weight raising. - */ - bic->wr_time_left = 1; -+ bic->ttime.last_end_request = bfq_smallest_from_now(); ++ icq_to_bic(icq)->ttime.last_end_request = ktime_get_ns() - (1ULL<<32); } static void bfq_exit_icq(struct io_cq *icq) -@@ -3046,21 +3564,21 @@ static void bfq_exit_icq(struct io_cq *icq) +@@ -3050,21 +3879,21 @@ static void bfq_exit_icq(struct io_cq *icq) struct bfq_io_cq *bic = icq_to_bic(icq); struct bfq_data *bfqd = bic_to_bfqd(bic); @@ -3778,26 +5171,27 @@ index d1f648d..3bc1f8b 100644 } } -@@ -3068,7 +3586,8 @@ static void bfq_exit_icq(struct io_cq *icq) +@@ -3072,8 +3901,8 @@ static void bfq_exit_icq(struct io_cq *icq) * Update the entity prio values; note that the new values will not * be used until the next (re)activation. */ --static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) +-static void +-bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) +static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, + struct bfq_io_cq *bic) { struct task_struct *tsk = current; int ioprio_class; -@@ -3100,7 +3619,7 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *b +@@ -3105,7 +3934,7 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) break; } - if (bfqq->new_ioprio < 0 || bfqq->new_ioprio >= IOPRIO_BE_NR) { + if (bfqq->new_ioprio >= IOPRIO_BE_NR) { - printk(KERN_CRIT "bfq_set_next_ioprio_data: new_ioprio %d\n", - bfqq->new_ioprio); + pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n", + bfqq->new_ioprio); BUG(); -@@ -3108,45 +3627,40 @@ static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *b +@@ -3113,45 +3942,40 @@ bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) bfqq->entity.new_weight = bfq_ioprio_to_weight(bfqq->new_ioprio); bfqq->entity.prio_changed = 1; @@ -3857,7 +5251,7 @@ index d1f648d..3bc1f8b 100644 } static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, -@@ -3155,8 +3669,9 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -3160,8 +3984,9 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, RB_CLEAR_NODE(&bfqq->entity.rb_node); INIT_LIST_HEAD(&bfqq->fifo); INIT_HLIST_NODE(&bfqq->burst_list_node); @@ -3868,7 +5262,7 @@ index d1f648d..3bc1f8b 100644 bfqq->bfqd = bfqd; if (bic) -@@ -3166,6 +3681,7 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -3171,6 +3996,7 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, if (!bfq_class_idle(bfqq)) bfq_mark_bfqq_idle_window(bfqq); bfq_mark_bfqq_sync(bfqq); @@ -3876,14 +5270,16 @@ index d1f648d..3bc1f8b 100644 } else bfq_clear_bfqq_sync(bfqq); bfq_mark_bfqq_IO_bound(bfqq); -@@ -3175,72 +3691,17 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -3180,72 +4006,19 @@ static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfqq->pid = pid; bfqq->wr_coeff = 1; - bfqq->last_wr_start_finish = 0; -+ bfqq->last_wr_start_finish = bfq_smallest_from_now(); ++ bfqq->last_wr_start_finish = jiffies; ++ bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now(); + bfqq->budget_timeout = bfq_smallest_from_now(); + bfqq->split_time = bfq_smallest_from_now(); ++ /* * Set to the value for which bfqq will not be deemed as * soft rt when it becomes backlogged. @@ -3934,7 +5330,7 @@ index d1f648d..3bc1f8b 100644 - - if (bfqq) { - bfq_init_bfqq(bfqd, bfqq, bic, current->pid, -- is_sync); +- is_sync); - bfq_init_entity(&bfqq->entity, bfqg); - bfq_log_bfqq(bfqd, bfqq, "allocated"); - } else { @@ -3955,7 +5351,7 @@ index d1f648d..3bc1f8b 100644 } static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, -@@ -3263,44 +3724,60 @@ static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, +@@ -3268,90 +4041,93 @@ static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, } static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, @@ -3976,7 +5372,7 @@ index d1f648d..3bc1f8b 100644 - struct bfq_group *bfqg; + rcu_read_lock(); + -+ bfqg = bfq_find_set_group(bfqd,bio_blkcg(bio)); ++ bfqg = bfq_find_set_group(bfqd, bio_blkcg(bio)); + if (!bfqg) { + bfqq = &bfqd->oom_bfqq; + goto out; @@ -3996,7 +5392,8 @@ index d1f648d..3bc1f8b 100644 - if (!bfqq) - bfqq = bfq_find_alloc_queue(bfqd, bio, is_sync, bic, gfp_mask); -+ bfqq = kmem_cache_alloc_node(bfq_pool, GFP_NOWAIT | __GFP_ZERO, ++ bfqq = kmem_cache_alloc_node(bfq_pool, ++ GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN, + bfqd->queue->node); + + if (bfqq) { @@ -4017,7 +5414,13 @@ index d1f648d..3bc1f8b 100644 - if (!is_sync && !(*async_bfqq)) { - atomic_inc(&bfqq->ref); + if (async_bfqq) { -+ bfqq->ref++; ++ bfqq->ref++; /* ++ * Extra group reference, w.r.t. sync ++ * queue. This extra reference is removed ++ * only if bfqq->bfqg disappears, to ++ * guarantee that this queue is not freed ++ * until its group goes away. ++ */ bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d", - bfqq, atomic_read(&bfqq->ref)); + bfqq, bfqq->ref); @@ -4034,14 +5437,33 @@ index d1f648d..3bc1f8b 100644 return bfqq; } -@@ -3316,37 +3793,21 @@ static void bfq_update_io_thinktime(struct bfq_data *bfqd, - bic->ttime.ttime_samples; + static void bfq_update_io_thinktime(struct bfq_data *bfqd, + struct bfq_io_cq *bic) + { +- unsigned long elapsed = jiffies - bic->ttime.last_end_request; +- unsigned long ttime = min(elapsed, 2UL * bfqd->bfq_slice_idle); ++ struct bfq_ttime *ttime = &bic->ttime; ++ u64 elapsed = ktime_get_ns() - bic->ttime.last_end_request; + +- bic->ttime.ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8; +- bic->ttime.ttime_total = (7*bic->ttime.ttime_total + 256*ttime) / 8; +- bic->ttime.ttime_mean = (bic->ttime.ttime_total + 128) / +- bic->ttime.ttime_samples; ++ elapsed = min_t(u64, elapsed, 2 * bfqd->bfq_slice_idle); ++ ++ ttime->ttime_samples = (7*bic->ttime.ttime_samples + 256) / 8; ++ ttime->ttime_total = div_u64(7*ttime->ttime_total + 256*elapsed, 8); ++ ttime->ttime_mean = div64_ul(ttime->ttime_total + 128, ++ ttime->ttime_samples); } -static void bfq_update_io_seektime(struct bfq_data *bfqd, - struct bfq_queue *bfqq, - struct request *rq) --{ ++static void ++bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, ++ struct request *rq) + { - sector_t sdist; - u64 total; - @@ -4060,32 +5482,24 @@ index d1f648d..3bc1f8b 100644 - sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*1024); - else - sdist = min(sdist, (bfqq->seek_mean * 4) + 2*1024*64); - +- - bfqq->seek_samples = (7*bfqq->seek_samples + 256) / 8; - bfqq->seek_total = (7*bfqq->seek_total + (u64)256*sdist) / 8; - total = bfqq->seek_total + (bfqq->seek_samples/2); - do_div(total, bfqq->seek_samples); - bfqq->seek_mean = (sector_t)total; -+static void -+bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, -+ struct request *rq) -+{ -+ sector_t sdist = 0; -+ if (bfqq->last_request_pos) { -+ if (bfqq->last_request_pos < blk_rq_pos(rq)) -+ sdist = blk_rq_pos(rq) - bfqq->last_request_pos; -+ else -+ sdist = bfqq->last_request_pos - blk_rq_pos(rq); -+ } - +- - bfq_log_bfqq(bfqd, bfqq, "dist=%llu mean=%llu", (u64)sdist, - (u64)bfqq->seek_mean); + bfqq->seek_history <<= 1; -+ bfqq->seek_history |= (sdist > BFQQ_SEEK_THR); ++ bfqq->seek_history |= ++ get_sdist(bfqq->last_request_pos, rq) > BFQQ_SEEK_THR && ++ (!blk_queue_nonrot(bfqd->queue) || ++ blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT); } /* -@@ -3364,7 +3825,8 @@ static void bfq_update_idle_window(struct bfq_data *bfqd, +@@ -3369,7 +4145,8 @@ static void bfq_update_idle_window(struct bfq_data *bfqd, return; /* Idle window just restored, statistics are meaningless. */ @@ -4095,7 +5509,7 @@ index d1f648d..3bc1f8b 100644 return; enable_idle = bfq_bfqq_idle_window(bfqq); -@@ -3404,22 +3866,13 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -3409,22 +4186,13 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, bfq_update_io_thinktime(bfqd, bic); bfq_update_io_seektime(bfqd, bfqq, rq); @@ -4114,13 +5528,13 @@ index d1f648d..3bc1f8b 100644 bfq_log_bfqq(bfqd, bfqq, - "rq_enqueued: idle_window=%d (seeky %d, mean %llu)", - bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq), -- (long long unsigned)bfqq->seek_mean); +- (unsigned long long) bfqq->seek_mean); + "rq_enqueued: idle_window=%d (seeky %d)", + bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq)); bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); -@@ -3433,14 +3886,15 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -3438,14 +4206,15 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, * is small and the queue is not to be expired, then * just exit. * @@ -4144,17 +5558,19 @@ index d1f648d..3bc1f8b 100644 */ if (small_req && !budget_timeout) return; -@@ -3453,9 +3907,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -3457,10 +4226,8 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, + * timer. */ bfq_clear_bfqq_wait_request(bfqq); - del_timer(&bfqd->idle_slice_timer); +- del_timer(&bfqd->idle_slice_timer); -#ifdef CONFIG_BFQ_GROUP_IOSCHED ++ hrtimer_try_to_cancel(&bfqd->idle_slice_timer); bfqg_stats_update_idle_time(bfqq_group(bfqq)); -#endif /* * The queue is not empty, because a new request just -@@ -3499,27 +3951,19 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq) +@@ -3504,28 +4271,20 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq) */ new_bfqq->allocated[rq_data_dir(rq)]++; bfqq->allocated[rq_data_dir(rq)]--; @@ -4182,10 +5598,12 @@ index d1f648d..3bc1f8b 100644 - */ - if (bfqq->bic) - bfqq->bic->wr_time_left = 0; - rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; +- rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; ++ rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; list_add_tail(&rq->queuelist, &bfqq->fifo); -@@ -3528,8 +3972,8 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq) + bfq_rq_enqueued(bfqd, bfqq, rq); +@@ -3533,8 +4292,8 @@ static void bfq_insert_request(struct request_queue *q, struct request *rq) static void bfq_update_hw_tag(struct bfq_data *bfqd) { @@ -4196,11 +5614,13 @@ index d1f648d..3bc1f8b 100644 if (bfqd->hw_tag == 1) return; -@@ -3555,48 +3999,45 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq) +@@ -3560,48 +4319,85 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq) { struct bfq_queue *bfqq = RQ_BFQQ(rq); struct bfq_data *bfqd = bfqq->bfqd; - bool sync = bfq_bfqq_sync(bfqq); ++ u64 now_ns; ++ u32 delta_us; - bfq_log_bfqq(bfqd, bfqq, "completed one req with %u sects left (%d)", - blk_rq_sectors(rq), sync); @@ -4217,8 +5637,10 @@ index d1f648d..3bc1f8b 100644 -#ifdef CONFIG_BFQ_GROUP_IOSCHED bfqg_stats_update_completion(bfqq_group(bfqq), rq_start_time_ns(rq), - rq_io_start_time_ns(rq), rq->cmd_flags); +- rq_io_start_time_ns(rq), rq->cmd_flags); -#endif ++ rq_io_start_time_ns(rq), ++ rq->cmd_flags); if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { + BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); @@ -4247,7 +5669,44 @@ index d1f648d..3bc1f8b 100644 - bfqd->sync_flight--; - RQ_BIC(rq)->ttime.last_end_request = jiffies; - } -+ RQ_BIC(rq)->ttime.last_end_request = jiffies; ++ now_ns = ktime_get_ns(); ++ ++ RQ_BIC(rq)->ttime.last_end_request = now_ns; ++ ++ /* ++ * Using us instead of ns, to get a reasonable precision in ++ * computing rate in next check. ++ */ ++ delta_us = div_u64(now_ns - bfqd->last_completion, NSEC_PER_USEC); ++ ++ bfq_log(bfqd, "rq_completed: delta %uus/%luus max_size %u rate %llu/%llu", ++ delta_us, BFQ_MIN_TT/NSEC_PER_USEC, bfqd->last_rq_max_size, ++ (USEC_PER_SEC* ++ (u64)((bfqd->last_rq_max_size<>BFQ_RATE_SHIFT, ++ (USEC_PER_SEC*(u64)(1UL<<(BFQ_RATE_SHIFT-10)))>>BFQ_RATE_SHIFT); ++ ++ /* ++ * If the request took rather long to complete, and, according ++ * to the maximum request size recorded, this completion latency ++ * implies that the request was certainly served at a very low ++ * rate (less than 1M sectors/sec), then the whole observation ++ * interval that lasts up to this time instant cannot be a ++ * valid time interval for computing a new peak rate. Invoke ++ * bfq_update_rate_reset to have the following three steps ++ * taken: ++ * - close the observation interval at the last (previous) ++ * request dispatch or completion ++ * - compute rate, if possible, for that observation interval ++ * - reset to zero samples, which will trigger a proper ++ * re-initialization of the observation interval on next ++ * dispatch ++ */ ++ if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC && ++ (bfqd->last_rq_max_size<last_completion = now_ns; /* - * If we are waiting to discover whether the request pattern of the @@ -4265,7 +5724,7 @@ index d1f648d..3bc1f8b 100644 */ if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 && RB_EMPTY_ROOT(&bfqq->sort_list)) -@@ -3608,10 +4049,7 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq) +@@ -3613,10 +4409,7 @@ static void bfq_completed_request(struct request_queue *q, struct request *rq) * or if we want to idle in case it has no pending requests. */ if (bfqd->in_service_queue == bfqq) { @@ -4277,7 +5736,25 @@ index d1f648d..3bc1f8b 100644 bfq_arm_slice_timer(bfqd); goto out; } else if (bfq_may_expire_for_budg_timeout(bfqq)) -@@ -3682,14 +4120,14 @@ static void bfq_put_request(struct request *rq) +@@ -3646,7 +4439,7 @@ static int __bfq_may_queue(struct bfq_queue *bfqq) + return ELV_MQUEUE_MAY; + } + +-static int bfq_may_queue(struct request_queue *q, int rw) ++static int bfq_may_queue(struct request_queue *q, unsigned int op) + { + struct bfq_data *bfqd = q->elevator->elevator_data; + struct task_struct *tsk = current; +@@ -3663,7 +4456,7 @@ static int bfq_may_queue(struct request_queue *q, int rw) + if (!bic) + return ELV_MQUEUE_MAY; + +- bfqq = bic_to_bfqq(bic, rw_is_sync(rw)); ++ bfqq = bic_to_bfqq(bic, op_is_sync(op)); + if (bfqq) + return __bfq_may_queue(bfqq); + +@@ -3687,14 +4480,14 @@ static void bfq_put_request(struct request *rq) rq->elv.priv[1] = NULL; bfq_log_bfqq(bfqq->bfqd, bfqq, "put_request %p, %d", @@ -4294,7 +5771,7 @@ index d1f648d..3bc1f8b 100644 */ static struct bfq_queue * bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) -@@ -3727,11 +4165,8 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, +@@ -3732,11 +4525,8 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, unsigned long flags; bool split = false; @@ -4307,7 +5784,7 @@ index d1f648d..3bc1f8b 100644 if (!bic) goto queue_fail; -@@ -3741,23 +4176,47 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, +@@ -3746,23 +4536,47 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, new_queue: bfqq = bic_to_bfqq(bic, is_sync); if (!bfqq || bfqq == &bfqd->oom_bfqq) { @@ -4335,18 +5812,14 @@ index d1f648d..3bc1f8b 100644 + "large burst"); bfq_mark_bfqq_in_large_burst(bfqq); - else { -- bfq_clear_bfqq_in_large_burst(bfqq); -- if (bic->was_in_burst_list) -- hlist_add_head(&bfqq->burst_list_node, -- &bfqd->burst_list); + } else { + bfq_log_bfqq(bfqd, bfqq, + "set_request: clearing in " + "large burst"); -+ bfq_clear_bfqq_in_large_burst(bfqq); -+ if (bic->was_in_burst_list) -+ hlist_add_head(&bfqq->burst_list_node, -+ &bfqd->burst_list); + bfq_clear_bfqq_in_large_burst(bfqq); + if (bic->was_in_burst_list) + hlist_add_head(&bfqq->burst_list_node, + &bfqd->burst_list); } + bfqq->split_time = jiffies; } @@ -4362,7 +5835,7 @@ index d1f648d..3bc1f8b 100644 bfqq = bfq_split_bfqq(bic, bfqq); split = true; if (!bfqq) -@@ -3766,9 +4225,8 @@ new_queue: +@@ -3771,9 +4585,8 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, } bfqq->allocated[rw]++; @@ -4374,7 +5847,7 @@ index d1f648d..3bc1f8b 100644 rq->elv.priv[0] = bic; rq->elv.priv[1] = bfqq; -@@ -3783,7 +4241,6 @@ new_queue: +@@ -3788,7 +4601,6 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { bfqq->bic = bic; if (split) { @@ -4382,7 +5855,7 @@ index d1f648d..3bc1f8b 100644 /* * If the queue has just been split from a shared * queue, restore the idle window and the possible -@@ -3793,6 +4250,9 @@ new_queue: +@@ -3798,6 +4610,9 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, } } @@ -4392,15 +5865,43 @@ index d1f648d..3bc1f8b 100644 spin_unlock_irqrestore(q->queue_lock, flags); return 0; -@@ -3872,6 +4332,7 @@ static void bfq_shutdown_timer_wq(struct bfq_data *bfqd) +@@ -3824,9 +4639,10 @@ static void bfq_kick_queue(struct work_struct *work) + * Handler of the expiration of the timer running if the in-service queue + * is idling inside its time slice. + */ +-static void bfq_idle_slice_timer(unsigned long data) ++static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) + { +- struct bfq_data *bfqd = (struct bfq_data *)data; ++ struct bfq_data *bfqd = container_of(timer, struct bfq_data, ++ idle_slice_timer); + struct bfq_queue *bfqq; + unsigned long flags; + enum bfqq_expiration reason; +@@ -3844,6 +4660,8 @@ static void bfq_idle_slice_timer(unsigned long data) + */ + if (bfqq) { + bfq_log_bfqq(bfqd, bfqq, "slice_timer expired"); ++ bfq_clear_bfqq_wait_request(bfqq); ++ + if (bfq_bfqq_budget_timeout(bfqq)) + /* + * Also here the queue can be safely expired +@@ -3869,11 +4687,12 @@ static void bfq_idle_slice_timer(unsigned long data) + bfq_schedule_dispatch(bfqd); + + spin_unlock_irqrestore(bfqd->queue->queue_lock, flags); ++ return HRTIMER_NORESTART; + } + + static void bfq_shutdown_timer_wq(struct bfq_data *bfqd) + { +- del_timer_sync(&bfqd->idle_slice_timer); ++ hrtimer_cancel(&bfqd->idle_slice_timer); cancel_work_sync(&bfqd->unplug_work); } -+#ifdef CONFIG_BFQ_GROUP_IOSCHED - static void __bfq_put_async_bfqq(struct bfq_data *bfqd, - struct bfq_queue **bfqq_ptr) - { -@@ -3880,9 +4341,9 @@ static void __bfq_put_async_bfqq(struct bfq_data *bfqd, +@@ -3885,9 +4704,9 @@ static void __bfq_put_async_bfqq(struct bfq_data *bfqd, bfq_log(bfqd, "put_async_bfqq: %p", bfqq); if (bfqq) { @@ -4412,24 +5913,38 @@ index d1f648d..3bc1f8b 100644 bfq_put_queue(bfqq); *bfqq_ptr = NULL; } -@@ -3904,6 +4365,7 @@ static void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) +@@ -3922,19 +4741,18 @@ static void bfq_exit_queue(struct elevator_queue *e) - __bfq_put_async_bfqq(bfqd, &bfqg->async_idle_bfqq); - } -+#endif + BUG_ON(bfqd->in_service_queue); + list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) +- bfq_deactivate_bfqq(bfqd, bfqq, 0); ++ bfq_deactivate_bfqq(bfqd, bfqq, false, false); - static void bfq_exit_queue(struct elevator_queue *e) - { -@@ -3923,8 +4385,6 @@ static void bfq_exit_queue(struct elevator_queue *e) + spin_unlock_irq(q->queue_lock); bfq_shutdown_timer_wq(bfqd); - synchronize_rcu(); - - BUG_ON(timer_pending(&bfqd->idle_slice_timer)); +- BUG_ON(timer_pending(&bfqd->idle_slice_timer)); ++ BUG_ON(hrtimer_active(&bfqd->idle_slice_timer)); #ifdef CONFIG_BFQ_GROUP_IOSCHED -@@ -3973,11 +4433,14 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) + blkcg_deactivate_policy(q, &blkcg_policy_bfq); + #else ++ bfq_put_async_queues(bfqd, bfqd->root_group); + kfree(bfqd->root_group); + #endif + +@@ -3954,6 +4772,7 @@ static void bfq_init_root_group(struct bfq_group *root_group, + root_group->rq_pos_tree = RB_ROOT; + for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) + root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; ++ root_group->sched_data.bfq_class_idle_last_service = jiffies; + } + + static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) +@@ -3978,11 +4797,14 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) * will not attempt to free it. */ bfq_init_bfqq(bfqd, &bfqd->oom_bfqq, NULL, 1, 0); @@ -4445,7 +5960,7 @@ index d1f648d..3bc1f8b 100644 /* * Trigger weight initialization, according to ioprio, at the * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio -@@ -3996,9 +4459,6 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) +@@ -4001,13 +4823,10 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) goto out_free; bfq_init_root_group(bfqd->root_group, bfqd); bfq_init_entity(&bfqd->oom_bfqq.entity, bfqd->root_group); @@ -4453,12 +5968,19 @@ index d1f648d..3bc1f8b 100644 - bfqd->active_numerous_groups = 0; -#endif - init_timer(&bfqd->idle_slice_timer); +- init_timer(&bfqd->idle_slice_timer); ++ hrtimer_init(&bfqd->idle_slice_timer, CLOCK_MONOTONIC, ++ HRTIMER_MODE_REL); bfqd->idle_slice_timer.function = bfq_idle_slice_timer; -@@ -4023,20 +4483,19 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) +- bfqd->idle_slice_timer.data = (unsigned long)bfqd; + + bfqd->queue_weights_tree = RB_ROOT; + bfqd->group_weights_tree = RB_ROOT; +@@ -4027,21 +4846,19 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) + bfqd->bfq_back_max = bfq_back_max; bfqd->bfq_back_penalty = bfq_back_penalty; bfqd->bfq_slice_idle = bfq_slice_idle; - bfqd->bfq_class_idle_last_service = 0; +- bfqd->bfq_class_idle_last_service = 0; - bfqd->bfq_max_budget_async_rq = bfq_max_budget_async_rq; - bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async; - bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync; @@ -4483,7 +6005,7 @@ index d1f648d..3bc1f8b 100644 bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(300); bfqd->bfq_wr_max_time = 0; bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(2000); -@@ -4048,16 +4507,15 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) +@@ -4053,16 +4870,15 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) * video. */ bfqd->wr_busy_queues = 0; @@ -4504,9 +6026,36 @@ index d1f648d..3bc1f8b 100644 bfqd->device_speed = BFQ_BFQD_FAST; return 0; -@@ -4161,10 +4619,8 @@ SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0); +@@ -4088,7 +4904,7 @@ static int __init bfq_slab_setup(void) + + static ssize_t bfq_var_show(unsigned int var, char *page) + { +- return sprintf(page, "%d\n", var); ++ return sprintf(page, "%u\n", var); + } + + static ssize_t bfq_var_store(unsigned long *var, const char *page, +@@ -4159,21 +4975,21 @@ static ssize_t bfq_weights_show(struct elevator_queue *e, char *page) + static ssize_t __FUNC(struct elevator_queue *e, char *page) \ + { \ + struct bfq_data *bfqd = e->elevator_data; \ +- unsigned int __data = __VAR; \ +- if (__CONV) \ ++ u64 __data = __VAR; \ ++ if (__CONV == 1) \ + __data = jiffies_to_msecs(__data); \ ++ else if (__CONV == 2) \ ++ __data = div_u64(__data, NSEC_PER_MSEC); \ + return bfq_var_show(__data, (page)); \ + } +-SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 1); +-SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 1); ++SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2); ++SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2); + SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0); SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0); - SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1); +-SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 1); ++SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2); SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0); -SHOW_FUNCTION(bfq_max_budget_async_rq_show, - bfqd->bfq_max_budget_async_rq, 0); @@ -4517,52 +6066,129 @@ index d1f648d..3bc1f8b 100644 SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0); SHOW_FUNCTION(bfq_wr_coeff_show, bfqd->bfq_wr_coeff, 0); SHOW_FUNCTION(bfq_wr_rt_max_time_show, bfqd->bfq_wr_rt_max_time, 1); -@@ -4199,10 +4655,6 @@ STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0); +@@ -4183,6 +4999,17 @@ SHOW_FUNCTION(bfq_wr_min_inter_arr_async_show, bfqd->bfq_wr_min_inter_arr_async, + SHOW_FUNCTION(bfq_wr_max_softrt_rate_show, bfqd->bfq_wr_max_softrt_rate, 0); + #undef SHOW_FUNCTION + ++#define USEC_SHOW_FUNCTION(__FUNC, __VAR) \ ++static ssize_t __FUNC(struct elevator_queue *e, char *page) \ ++{ \ ++ struct bfq_data *bfqd = e->elevator_data; \ ++ u64 __data = __VAR; \ ++ __data = div_u64(__data, NSEC_PER_USEC); \ ++ return bfq_var_show(__data, (page)); \ ++} ++USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle); ++#undef USEC_SHOW_FUNCTION ++ + #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ + static ssize_t \ + __FUNC(struct elevator_queue *e, const char *page, size_t count) \ +@@ -4194,24 +5021,22 @@ __FUNC(struct elevator_queue *e, const char *page, size_t count) \ + __data = (MIN); \ + else if (__data > (MAX)) \ + __data = (MAX); \ +- if (__CONV) \ ++ if (__CONV == 1) \ + *(__PTR) = msecs_to_jiffies(__data); \ ++ else if (__CONV == 2) \ ++ *(__PTR) = (u64)__data * NSEC_PER_MSEC; \ + else \ + *(__PTR) = __data; \ + return ret; \ + } + STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, +- INT_MAX, 1); ++ INT_MAX, 2); + STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1, +- INT_MAX, 1); ++ INT_MAX, 2); + STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0); STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1, INT_MAX, 0); - STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1); +-STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 1); -STORE_FUNCTION(bfq_max_budget_async_rq_store, &bfqd->bfq_max_budget_async_rq, - 1, INT_MAX, 0); -STORE_FUNCTION(bfq_timeout_async_store, &bfqd->bfq_timeout[BLK_RW_ASYNC], 0, - INT_MAX, 1); ++STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2); STORE_FUNCTION(bfq_wr_coeff_store, &bfqd->bfq_wr_coeff, 1, INT_MAX, 0); STORE_FUNCTION(bfq_wr_max_time_store, &bfqd->bfq_wr_max_time, 0, INT_MAX, 1); STORE_FUNCTION(bfq_wr_rt_max_time_store, &bfqd->bfq_wr_rt_max_time, 0, INT_MAX, -@@ -4224,10 +4676,8 @@ static ssize_t bfq_weights_store(struct elevator_queue *e, +@@ -4224,6 +5049,23 @@ STORE_FUNCTION(bfq_wr_max_softrt_rate_store, &bfqd->bfq_wr_max_softrt_rate, 0, + INT_MAX, 0); + #undef STORE_FUNCTION - static unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd) - { ++#define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ ++static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\ ++{ \ ++ struct bfq_data *bfqd = e->elevator_data; \ ++ unsigned long uninitialized_var(__data); \ ++ int ret = bfq_var_store(&__data, (page), count); \ ++ if (__data < (MIN)) \ ++ __data = (MIN); \ ++ else if (__data > (MAX)) \ ++ __data = (MAX); \ ++ *(__PTR) = (u64)__data * NSEC_PER_USEC; \ ++ return ret; \ ++} ++USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0, ++ UINT_MAX); ++#undef USEC_STORE_FUNCTION ++ + /* do nothing for the moment */ + static ssize_t bfq_weights_store(struct elevator_queue *e, + const char *page, size_t count) +@@ -4231,16 +5073,6 @@ static ssize_t bfq_weights_store(struct elevator_queue *e, + return count; + } + +-static unsigned long bfq_estimated_max_budget(struct bfq_data *bfqd) +-{ - u64 timeout = jiffies_to_msecs(bfqd->bfq_timeout[BLK_RW_SYNC]); - - if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES) +- if (bfqd->peak_rate_samples >= BFQ_PEAK_RATE_SAMPLES) - return bfq_calc_max_budget(bfqd->peak_rate, timeout); -+ return bfq_calc_max_budget(bfqd); - else - return bfq_default_max_budget; - } -@@ -4252,6 +4702,10 @@ static ssize_t bfq_max_budget_store(struct elevator_queue *e, +- else +- return bfq_default_max_budget; +-} +- + static ssize_t bfq_max_budget_store(struct elevator_queue *e, + const char *page, size_t count) + { +@@ -4249,7 +5081,7 @@ static ssize_t bfq_max_budget_store(struct elevator_queue *e, + int ret = bfq_var_store(&__data, (page), count); + + if (__data == 0) +- bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd); ++ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); + else { + if (__data > INT_MAX) + __data = INT_MAX; +@@ -4261,6 +5093,10 @@ static ssize_t bfq_max_budget_store(struct elevator_queue *e, return ret; } -+/* ++/* + * Leaving this name to preserve name compatibility with cfq + * parameters, but this timeout is used for both sync and async. + */ static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, const char *page, size_t count) { -@@ -4264,13 +4718,31 @@ static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, +@@ -4273,9 +5109,27 @@ static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, else if (__data > INT_MAX) __data = INT_MAX; - bfqd->bfq_timeout[BLK_RW_SYNC] = msecs_to_jiffies(__data); + bfqd->bfq_timeout = msecs_to_jiffies(__data); if (bfqd->bfq_user_max_budget == 0) - bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd); - - return ret; - } - +- bfqd->bfq_max_budget = bfq_estimated_max_budget(bfqd); ++ bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); ++ ++ return ret; ++} ++ +static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e, + const char *page, size_t count) +{ @@ -4573,20 +6199,18 @@ index d1f648d..3bc1f8b 100644 + if (__data > 1) + __data = 1; + if (!bfqd->strict_guarantees && __data == 1 -+ && bfqd->bfq_slice_idle < msecs_to_jiffies(8)) -+ bfqd->bfq_slice_idle = msecs_to_jiffies(8); ++ && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC) ++ bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC; + + bfqd->strict_guarantees = __data; -+ -+ return ret; -+} -+ - static ssize_t bfq_low_latency_store(struct elevator_queue *e, - const char *page, size_t count) - { -@@ -4297,9 +4769,8 @@ static struct elv_fs_entry bfq_attrs[] = { + + return ret; + } +@@ -4305,10 +5159,10 @@ static struct elv_fs_entry bfq_attrs[] = { + BFQ_ATTR(back_seek_max), BFQ_ATTR(back_seek_penalty), BFQ_ATTR(slice_idle), ++ BFQ_ATTR(slice_idle_us), BFQ_ATTR(max_budget), - BFQ_ATTR(max_budget_async_rq), BFQ_ATTR(timeout_sync), @@ -4595,7 +6219,17 @@ index d1f648d..3bc1f8b 100644 BFQ_ATTR(low_latency), BFQ_ATTR(wr_coeff), BFQ_ATTR(wr_max_time), -@@ -4342,9 +4813,28 @@ static struct elevator_type iosched_bfq = { +@@ -4328,7 +5182,8 @@ static struct elevator_type iosched_bfq = { + #ifdef CONFIG_BFQ_GROUP_IOSCHED + .elevator_bio_merged_fn = bfq_bio_merged, + #endif +- .elevator_allow_merge_fn = bfq_allow_merge, ++ .elevator_allow_bio_merge_fn = bfq_allow_bio_merge, ++ .elevator_allow_rq_merge_fn = bfq_allow_rq_merge, + .elevator_dispatch_fn = bfq_dispatch_requests, + .elevator_add_req_fn = bfq_insert_request, + .elevator_activate_req_fn = bfq_activate_request, +@@ -4351,18 +5206,28 @@ static struct elevator_type iosched_bfq = { .elevator_owner = THIS_MODULE, }; @@ -4620,32 +6254,46 @@ index d1f648d..3bc1f8b 100644 static int __init bfq_init(void) { int ret; -+ char msg[50] = "BFQ I/O-scheduler: v8r3"; - - /* - * Can be 0 on HZ < 1000 setups. -@@ -4352,9 +4842,6 @@ static int __init bfq_init(void) - if (bfq_slice_idle == 0) - bfq_slice_idle = 1; - +- +- /* +- * Can be 0 on HZ < 1000 setups. +- */ +- if (bfq_slice_idle == 0) +- bfq_slice_idle = 1; +- - if (bfq_timeout_async == 0) - bfq_timeout_async = 1; -- ++ char msg[60] = "BFQ I/O-scheduler: v8r8"; + #ifdef CONFIG_BFQ_GROUP_IOSCHED ret = blkcg_policy_register(&blkcg_policy_bfq); - if (ret) -@@ -4370,23 +4857,34 @@ static int __init bfq_init(void) - * installed on the reference devices (see the comments before the - * definitions of the two arrays). +@@ -4375,27 +5240,46 @@ static int __init bfq_init(void) + goto err_pol_unreg; + + /* +- * Times to load large popular applications for the typical systems +- * installed on the reference devices (see the comments before the +- * definitions of the two arrays). ++ * Times to load large popular applications for the typical ++ * systems installed on the reference devices (see the ++ * comments before the definitions of the next two ++ * arrays). Actually, we use slightly slower values, as the ++ * estimated peak rate tends to be smaller than the actual ++ * peak rate. The reason for this last fact is that estimates ++ * are computed over much shorter time intervals than the long ++ * intervals typically used for benchmarking. Why? First, to ++ * adapt more quickly to variations. Second, because an I/O ++ * scheduler cannot rely on a peak-rate-evaluation workload to ++ * be run for a long time. */ - T_slow[0] = msecs_to_jiffies(2600); - T_slow[1] = msecs_to_jiffies(1000); - T_fast[0] = msecs_to_jiffies(5500); - T_fast[1] = msecs_to_jiffies(2000); -+ T_slow[0] = msecs_to_jiffies(3500); -+ T_slow[1] = msecs_to_jiffies(1500); -+ T_fast[0] = msecs_to_jiffies(8000); -+ T_fast[1] = msecs_to_jiffies(3000); ++ T_slow[0] = msecs_to_jiffies(3500); /* actually 4 sec */ ++ T_slow[1] = msecs_to_jiffies(6000); /* actually 6.5 sec */ ++ T_fast[0] = msecs_to_jiffies(7000); /* actually 8 sec */ ++ T_fast[1] = msecs_to_jiffies(2500); /* actually 3 sec */ /* - * Thresholds that determine the switch between speed classes (see @@ -4679,50 +6327,145 @@ index d1f648d..3bc1f8b 100644 return 0; diff --git a/block/bfq-sched.c b/block/bfq-sched.c -index a64fec1..7d73b9d 100644 +index a5ed694..2e9dc59 100644 --- a/block/bfq-sched.c +++ b/block/bfq-sched.c -@@ -7,9 +7,11 @@ +@@ -7,28 +7,166 @@ * Copyright (C) 2008 Fabio Checconi * Paolo Valente * - * Copyright (C) 2010 Paolo Valente -+ * Copyright (C) 2016 Paolo Valente - */ - ++ * Copyright (C) 2015 Paolo Valente ++ * ++ * Copyright (C) 2016 Paolo Valente ++ */ ++ +static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); + - #ifdef CONFIG_BFQ_GROUP_IOSCHED - #define for_each_entity(entity) \ - for (; entity ; entity = entity->parent) -@@ -22,8 +24,6 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, - int extract, - struct bfq_data *bfqd); - --static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); -- - static void bfq_update_budget(struct bfq_entity *next_in_service) - { - struct bfq_entity *bfqg_entity; -@@ -48,6 +48,7 @@ static void bfq_update_budget(struct bfq_entity *next_in_service) - static int bfq_update_next_in_service(struct bfq_sched_data *sd) - { - struct bfq_entity *next_in_service; ++/** ++ * bfq_gt - compare two timestamps. ++ * @a: first ts. ++ * @b: second ts. ++ * ++ * Return @a > @b, dealing with wrapping correctly. ++ */ ++static int bfq_gt(u64 a, u64 b) ++{ ++ return (s64)(a - b) > 0; ++} ++ ++static struct bfq_entity *bfq_root_active_entity(struct rb_root *tree) ++{ ++ struct rb_node *node = tree->rb_node; ++ ++ return rb_entry(node, struct bfq_entity, rb_node); ++} ++ ++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd); ++ ++static bool bfq_update_parent_budget(struct bfq_entity *next_in_service); ++ ++/** ++ * bfq_update_next_in_service - update sd->next_in_service ++ * @sd: sched_data for which to perform the update. ++ * @new_entity: if not NULL, pointer to the entity whose activation, ++ * requeueing or repositionig triggered the invocation of ++ * this function. ++ * ++ * This function is called to update sd->next_in_service, which, in ++ * its turn, may change as a consequence of the insertion or ++ * extraction of an entity into/from one of the active trees of ++ * sd. These insertions/extractions occur as a consequence of ++ * activations/deactivations of entities, with some activations being ++ * 'true' activations, and other activations being requeueings (i.e., ++ * implementing the second, requeueing phase of the mechanism used to ++ * reposition an entity in its active tree; see comments on ++ * __bfq_activate_entity and __bfq_requeue_entity for details). In ++ * both the last two activation sub-cases, new_entity points to the ++ * just activated or requeued entity. ++ * ++ * Returns true if sd->next_in_service changes in such a way that ++ * entity->parent may become the next_in_service for its parent ++ * entity. + */ ++static bool bfq_update_next_in_service(struct bfq_sched_data *sd, ++ struct bfq_entity *new_entity) ++{ ++ struct bfq_entity *next_in_service = sd->next_in_service; + struct bfq_queue *bfqq; - - if (sd->in_service_entity) - /* will update/requeue at the end of service */ -@@ -65,14 +66,29 @@ static int bfq_update_next_in_service(struct bfq_sched_data *sd) - - if (next_in_service) - bfq_update_budget(next_in_service); -+ else -+ goto exit; ++ bool parent_sched_may_change = false; ++ ++ /* ++ * If this update is triggered by the activation, requeueing ++ * or repositiong of an entity that does not coincide with ++ * sd->next_in_service, then a full lookup in the active tree ++ * can be avoided. In fact, it is enough to check whether the ++ * just-modified entity has a higher priority than ++ * sd->next_in_service, or, even if it has the same priority ++ * as sd->next_in_service, is eligible and has a lower virtual ++ * finish time than sd->next_in_service. If this compound ++ * condition holds, then the new entity becomes the new ++ * next_in_service. Otherwise no change is needed. ++ */ ++ if (new_entity && new_entity != sd->next_in_service) { ++ /* ++ * Flag used to decide whether to replace ++ * sd->next_in_service with new_entity. Tentatively ++ * set to true, and left as true if ++ * sd->next_in_service is NULL. ++ */ ++ bool replace_next = true; ++ ++ /* ++ * If there is already a next_in_service candidate ++ * entity, then compare class priorities or timestamps ++ * to decide whether to replace sd->service_tree with ++ * new_entity. ++ */ ++ if (next_in_service) { ++ unsigned int new_entity_class_idx = ++ bfq_class_idx(new_entity); ++ struct bfq_service_tree *st = ++ sd->service_tree + new_entity_class_idx; ++ ++ /* ++ * For efficiency, evaluate the most likely ++ * sub-condition first. ++ */ ++ replace_next = ++ (new_entity_class_idx == ++ bfq_class_idx(next_in_service) ++ && ++ !bfq_gt(new_entity->start, st->vtime) ++ && ++ bfq_gt(next_in_service->finish, ++ new_entity->finish)) ++ || ++ new_entity_class_idx < ++ bfq_class_idx(next_in_service); ++ } ++ ++ if (replace_next) ++ next_in_service = new_entity; ++ } else /* invoked because of a deactivation: lookup needed */ ++ next_in_service = bfq_lookup_next_entity(sd); ++ ++ if (next_in_service) { ++ parent_sched_may_change = !sd->next_in_service || ++ bfq_update_parent_budget(next_in_service); ++ } ++ ++ sd->next_in_service = next_in_service; ++ ++ if (!next_in_service) ++ return parent_sched_may_change; + bfqq = bfq_entity_to_bfqq(next_in_service); + if (bfqq) + bfq_log_bfqq(bfqq->bfqd, bfqq, + "update_next_in_service: chosen this queue"); + #ifdef CONFIG_BFQ_GROUP_IOSCHED +-#define for_each_entity(entity) \ + else { + struct bfq_group *bfqg = + container_of(next_in_service, @@ -4731,24 +6474,165 @@ index a64fec1..7d73b9d 100644 + bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, + "update_next_in_service: chosen this entity"); + } -+exit: - return 1; ++#endif ++ return parent_sched_may_change; ++} ++ ++#ifdef CONFIG_BFQ_GROUP_IOSCHED ++/* both next loops stop at one of the child entities of the root group */ ++#define for_each_entity(entity) \ + for (; entity ; entity = entity->parent) + + #define for_each_entity_safe(entity, parent) \ + for (; entity && ({ parent = entity->parent; 1; }); entity = parent) + +- +-static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, +- int extract, +- struct bfq_data *bfqd); +- +-static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); +- +-static void bfq_update_budget(struct bfq_entity *next_in_service) ++/* ++ * Returns true if this budget changes may let next_in_service->parent ++ * become the next_in_service entity for its parent entity. ++ */ ++static bool bfq_update_parent_budget(struct bfq_entity *next_in_service) + { + struct bfq_entity *bfqg_entity; + struct bfq_group *bfqg; + struct bfq_sched_data *group_sd; ++ bool ret = false; + + BUG_ON(!next_in_service); + +@@ -41,60 +179,68 @@ static void bfq_update_budget(struct bfq_entity *next_in_service) + * as it must never become an in-service entity. + */ + bfqg_entity = bfqg->my_entity; +- if (bfqg_entity) ++ if (bfqg_entity) { ++ if (bfqg_entity->budget > next_in_service->budget) ++ ret = true; + bfqg_entity->budget = next_in_service->budget; ++ } ++ ++ return ret; } - static void bfq_check_next_in_service(struct bfq_sched_data *sd, - struct bfq_entity *entity) +-static int bfq_update_next_in_service(struct bfq_sched_data *sd) ++/* ++ * This function tells whether entity stops being a candidate for next ++ * service, according to the following logic. ++ * ++ * This function is invoked for an entity that is about to be set in ++ * service. If such an entity is a queue, then the entity is no longer ++ * a candidate for next service (i.e, a candidate entity to serve ++ * after the in-service entity is expired). The function then returns ++ * true. ++ * ++ * In contrast, the entity could stil be a candidate for next service ++ * if it is not a queue, and has more than one child. In fact, even if ++ * one of its children is about to be set in service, other children ++ * may still be the next to serve. As a consequence, a non-queue ++ * entity is not a candidate for next-service only if it has only one ++ * child. And only if this condition holds, then the function returns ++ * true for a non-queue entity. ++ */ ++static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) { -- BUG_ON(sd->next_in_service != entity); -+ WARN_ON(sd->next_in_service != entity); +- struct bfq_entity *next_in_service; ++ struct bfq_group *bfqg; + +- if (sd->in_service_entity) +- /* will update/requeue at the end of service */ +- return 0; ++ if (bfq_entity_to_bfqq(entity)) ++ return true; + +- /* +- * NOTE: this can be improved in many ways, such as returning +- * 1 (and thus propagating upwards the update) only when the +- * budget changes, or caching the bfqq that will be scheduled +- * next from this subtree. By now we worry more about +- * correctness than about performance... +- */ +- next_in_service = bfq_lookup_next_entity(sd, 0, NULL); +- sd->next_in_service = next_in_service; ++ bfqg = container_of(entity, struct bfq_group, entity); + +- if (next_in_service) +- bfq_update_budget(next_in_service); ++ BUG_ON(bfqg == ((struct bfq_data *)(bfqg->bfqd))->root_group); ++ BUG_ON(bfqg->active_entities == 0); ++ if (bfqg->active_entities == 1) ++ return true; + +- return 1; ++ return false; } - #else + +-static void bfq_check_next_in_service(struct bfq_sched_data *sd, +- struct bfq_entity *entity) +-{ +- BUG_ON(sd->next_in_service != entity); +-} +-#else ++#else /* CONFIG_BFQ_GROUP_IOSCHED */ #define for_each_entity(entity) \ -@@ -151,20 +167,35 @@ static u64 bfq_delta(unsigned long service, unsigned long weight) + for (; entity ; entity = NULL) + + #define for_each_entity_safe(entity, parent) \ + for (parent = NULL; entity ; entity = parent) + +-static int bfq_update_next_in_service(struct bfq_sched_data *sd) ++static bool bfq_update_parent_budget(struct bfq_entity *next_in_service) + { +- return 0; ++ return false; + } + +-static void bfq_check_next_in_service(struct bfq_sched_data *sd, +- struct bfq_entity *entity) ++static bool bfq_no_longer_next_in_service(struct bfq_entity *entity) + { ++ return true; + } + +-static void bfq_update_budget(struct bfq_entity *next_in_service) +-{ +-} +-#endif ++#endif /* CONFIG_BFQ_GROUP_IOSCHED */ + + /* + * Shift for timestamp calculations. This actually limits the maximum +@@ -105,18 +251,6 @@ static void bfq_update_budget(struct bfq_entity *next_in_service) + */ + #define WFQ_SERVICE_SHIFT 22 + +-/** +- * bfq_gt - compare two timestamps. +- * @a: first ts. +- * @b: second ts. +- * +- * Return @a > @b, dealing with wrapping correctly. +- */ +-static int bfq_gt(u64 a, u64 b) +-{ +- return (s64)(a - b) > 0; +-} +- + static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity) + { + struct bfq_queue *bfqq = NULL; +@@ -151,20 +285,36 @@ static u64 bfq_delta(unsigned long service, unsigned long weight) static void bfq_calc_finish(struct bfq_entity *entity, unsigned long service) { struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -- -+ unsigned long long start, finish, delta ; ++ unsigned long long start, finish, delta; + BUG_ON(entity->weight == 0); entity->finish = entity->start + @@ -4782,7 +6666,34 @@ index a64fec1..7d73b9d 100644 } } -@@ -386,8 +417,6 @@ static void bfq_active_insert(struct bfq_service_tree *st, +@@ -293,10 +443,26 @@ static void bfq_update_min(struct bfq_entity *entity, struct rb_node *node) + static void bfq_update_active_node(struct rb_node *node) + { + struct bfq_entity *entity = rb_entry(node, struct bfq_entity, rb_node); ++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + + entity->min_start = entity->start; + bfq_update_min(entity, node->rb_right); + bfq_update_min(entity, node->rb_left); ++ ++ if (bfqq) { ++ bfq_log_bfqq(bfqq->bfqd, bfqq, ++ "update_active_node: new min_start %llu", ++ ((entity->min_start>>10)*1000)>>12); ++#ifdef CONFIG_BFQ_GROUP_IOSCHED ++ } else { ++ struct bfq_group *bfqg = ++ container_of(entity, struct bfq_group, entity); ++ ++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, ++ "update_active_node: new min_start %llu", ++ ((entity->min_start>>10)*1000)>>12); ++#endif ++ } + } + + /** +@@ -386,8 +552,6 @@ static void bfq_active_insert(struct bfq_service_tree *st, BUG_ON(!bfqg); BUG_ON(!bfqd); bfqg->active_entities++; @@ -4791,16 +6702,16 @@ index a64fec1..7d73b9d 100644 } #endif } -@@ -399,7 +428,7 @@ static void bfq_active_insert(struct bfq_service_tree *st, +@@ -399,7 +563,7 @@ static void bfq_active_insert(struct bfq_service_tree *st, static unsigned short bfq_ioprio_to_weight(int ioprio) { BUG_ON(ioprio < 0 || ioprio >= IOPRIO_BE_NR); - return IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - ioprio; -+ return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF ; ++ return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF; } /** -@@ -422,9 +451,9 @@ static void bfq_get_entity(struct bfq_entity *entity) +@@ -422,9 +586,9 @@ static void bfq_get_entity(struct bfq_entity *entity) struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); if (bfqq) { @@ -4812,7 +6723,7 @@ index a64fec1..7d73b9d 100644 } } -@@ -499,10 +528,6 @@ static void bfq_active_extract(struct bfq_service_tree *st, +@@ -499,10 +663,6 @@ static void bfq_active_extract(struct bfq_service_tree *st, BUG_ON(!bfqd); BUG_ON(!bfqg->active_entities); bfqg->active_entities--; @@ -4823,7 +6734,13 @@ index a64fec1..7d73b9d 100644 } #endif } -@@ -552,7 +577,7 @@ static void bfq_forget_entity(struct bfq_service_tree *st, +@@ -547,12 +707,12 @@ static void bfq_forget_entity(struct bfq_service_tree *st, + + BUG_ON(!entity->on_st); + +- entity->on_st = 0; ++ entity->on_st = false; + st->wsum -= entity->weight; if (bfqq) { sd = entity->sched_data; bfq_log_bfqq(bfqq->bfqd, bfqq, "forget_entity: %p %d", @@ -4832,7 +6749,7 @@ index a64fec1..7d73b9d 100644 bfq_put_queue(bfqq); } } -@@ -602,7 +627,7 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, +@@ -602,7 +762,7 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, if (entity->prio_changed) { struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); @@ -4841,13 +6758,9 @@ index a64fec1..7d73b9d 100644 struct bfq_data *bfqd = NULL; struct rb_root *root; #ifdef CONFIG_BFQ_GROUP_IOSCHED -@@ -628,12 +653,14 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, - if (entity->new_weight != entity->orig_weight) { - if (entity->new_weight < BFQ_MIN_WEIGHT || +@@ -630,7 +790,10 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, entity->new_weight > BFQ_MAX_WEIGHT) { -- printk(KERN_CRIT "update_weight_prio: " -- "new_weight %d\n", -+ pr_crit("update_weight_prio: new_weight %d\n", + pr_crit("update_weight_prio: new_weight %d\n", entity->new_weight); - BUG(); + if (entity->new_weight < BFQ_MIN_WEIGHT) @@ -4855,12 +6768,9 @@ index a64fec1..7d73b9d 100644 + else + entity->new_weight = BFQ_MAX_WEIGHT; } -- entity->orig_weight = entity->new_weight; -+ entity->orig_weight = entity->new_weight; + entity->orig_weight = entity->new_weight; if (bfqq) - bfqq->ioprio = - bfq_weight_to_ioprio(entity->orig_weight); -@@ -662,6 +689,13 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, +@@ -661,6 +824,13 @@ __bfq_entity_update_weight_prio(struct bfq_service_tree *old_st, * associated with its new weight. */ if (prev_weight != new_weight) { @@ -4874,7 +6784,7 @@ index a64fec1..7d73b9d 100644 root = bfqq ? &bfqd->queue_weights_tree : &bfqd->group_weights_tree; bfq_weights_tree_remove(bfqd, entity, root); -@@ -708,7 +742,7 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served) +@@ -707,7 +877,7 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served) st = bfq_entity_service_tree(entity); entity->service += served; @@ -4883,7 +6793,7 @@ index a64fec1..7d73b9d 100644 BUG_ON(st->wsum == 0); st->vtime += bfq_delta(served, st->wsum); -@@ -717,31 +751,69 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served) +@@ -716,234 +886,574 @@ static void bfq_bfqq_served(struct bfq_queue *bfqq, int served) #ifdef CONFIG_BFQ_GROUP_IOSCHED bfqg_stats_set_start_empty_time(bfqq_group(bfqq)); #endif @@ -4937,107 +6847,34 @@ index a64fec1..7d73b9d 100644 + if (time_ms > 0 && time_ms < timeout_ms) + tot_serv_to_charge = + (bfqd->bfq_max_budget * time_ms) / timeout_ms; -+ + +- bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget"); + if (tot_serv_to_charge < entity->service) + tot_serv_to_charge = entity->service; -- bfq_log_bfqq(bfqq->bfqd, bfqq, "charge_full_budget"); +- bfq_bfqq_served(bfqq, entity->budget - entity->service); + bfq_log_bfqq(bfqq->bfqd, bfqq, + "charge_time: %lu/%u ms, %d/%d/%d sectors", + time_ms, timeout_ms, entity->service, + tot_serv_to_charge, entity->budget); - -- bfq_bfqq_served(bfqq, entity->budget - entity->service); ++ + /* Increase budget to avoid inconsistencies */ + if (tot_serv_to_charge > entity->budget) + entity->budget = tot_serv_to_charge; + + bfq_bfqq_served(bfqq, + max_t(int, 0, tot_serv_to_charge - entity->service)); - } - - /** - * __bfq_activate_entity - activate an entity. - * @entity: the entity being activated. -+ * @non_blocking_wait_rq: true if this entity was waiting for a request - * - * Called whenever an entity is activated, i.e., it is not active and one - * of its children receives a new request, or has to be reactivated due to -@@ -749,11 +821,16 @@ static void bfq_bfqq_charge_full_budget(struct bfq_queue *bfqq) - * service received if @entity is active) of the queue to calculate its - * timestamps. - */ --static void __bfq_activate_entity(struct bfq_entity *entity) -+static void __bfq_activate_entity(struct bfq_entity *entity, -+ bool non_blocking_wait_rq) - { - struct bfq_sched_data *sd = entity->sched_data; - struct bfq_service_tree *st = bfq_entity_service_tree(entity); ++} ++ ++static void bfq_update_fin_time_enqueue(struct bfq_entity *entity, ++ struct bfq_service_tree *st, ++ bool backshifted) ++{ + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ bool backshifted = false; - -+ BUG_ON(!sd); -+ BUG_ON(!st); - if (entity == sd->in_service_entity) { - BUG_ON(entity->tree); - /* -@@ -771,45 +848,133 @@ static void __bfq_activate_entity(struct bfq_entity *entity) - * old start time. - */ - bfq_active_extract(st, entity); -- } else if (entity->tree == &st->idle) { -- /* -- * Must be on the idle tree, bfq_idle_extract() will -- * check for that. -- */ -- bfq_idle_extract(st, entity); -- entity->start = bfq_gt(st->vtime, entity->finish) ? -- st->vtime : entity->finish; - } else { -- /* -- * The finish time of the entity may be invalid, and -- * it is in the past for sure, otherwise the queue -- * would have been on the idle tree. -- */ -- entity->start = st->vtime; -- st->wsum += entity->weight; -- bfq_get_entity(entity); -+ unsigned long long min_vstart; - -- BUG_ON(entity->on_st); -- entity->on_st = 1; -+ /* See comments on bfq_fqq_update_budg_for_activation */ -+ if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) { -+ backshifted = true; -+ min_vstart = entity->finish; -+ } else -+ min_vstart = st->vtime; ++ struct bfq_sched_data *sd = entity->sched_data; + -+ if (entity->tree == &st->idle) { -+ /* -+ * Must be on the idle tree, bfq_idle_extract() will -+ * check for that. -+ */ -+ bfq_idle_extract(st, entity); -+ entity->start = bfq_gt(min_vstart, entity->finish) ? -+ min_vstart : entity->finish; -+ } else { -+ /* -+ * The finish time of the entity may be invalid, and -+ * it is in the past for sure, otherwise the queue -+ * would have been on the idle tree. -+ */ -+ entity->start = min_vstart; -+ st->wsum += entity->weight; -+ bfq_get_entity(entity); -+ -+ BUG_ON(entity->on_st); -+ entity->on_st = 1; -+ } - } - - st = __bfq_entity_update_weight_prio(st, entity); - bfq_calc_finish(entity, entity->budget); ++ st = __bfq_entity_update_weight_prio(st, entity); ++ bfq_calc_finish(entity, entity->budget); + + /* + * If some queues enjoy backshifting for a while, then their @@ -5097,7 +6934,7 @@ index a64fec1..7d73b9d 100644 + } + } + - bfq_active_insert(st, entity); ++ bfq_active_insert(st, entity); + + if (bfqq) { + bfq_log_bfqq(bfqq->bfqd, bfqq, @@ -5113,70 +6950,466 @@ index a64fec1..7d73b9d 100644 + entity->start <= st->vtime ? "" : "non ", st); +#endif + } ++ BUG_ON(RB_EMPTY_ROOT(&st->active)); ++ BUG_ON(&st->active != &sd->service_tree->active && ++ &st->active != &(sd->service_tree+1)->active && ++ &st->active != &(sd->service_tree+2)->active); } /** - * bfq_activate_entity - activate an entity and its ancestors if necessary. - * @entity: the entity to activate. -+ * @non_blocking_wait_rq: true if this entity was waiting for a request +- * __bfq_activate_entity - activate an entity. ++ * __bfq_activate_entity - handle activation of entity. + * @entity: the entity being activated. ++ * @non_blocking_wait_rq: true if entity was waiting for a request ++ * ++ * Called for a 'true' activation, i.e., if entity is not active and ++ * one of its children receives a new request. * - * Activate @entity and all the entities on the path from it to the root. +- * Called whenever an entity is activated, i.e., it is not active and one +- * of its children receives a new request, or has to be reactivated due to +- * budget exhaustion. It uses the current budget of the entity (and the +- * service received if @entity is active) of the queue to calculate its +- * timestamps. ++ * Basically, this function updates the timestamps of entity and ++ * inserts entity into its active tree, ater possible extracting it ++ * from its idle tree. + */ +-static void __bfq_activate_entity(struct bfq_entity *entity) ++static void __bfq_activate_entity(struct bfq_entity *entity, ++ bool non_blocking_wait_rq) + { + struct bfq_sched_data *sd = entity->sched_data; + struct bfq_service_tree *st = bfq_entity_service_tree(entity); ++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ bool backshifted = false; ++ unsigned long long min_vstart; + +- if (entity == sd->in_service_entity) { +- BUG_ON(entity->tree); +- /* +- * If we are requeueing the current entity we have +- * to take care of not charging to it service it has +- * not received. +- */ +- bfq_calc_finish(entity, entity->service); +- entity->start = entity->finish; +- sd->in_service_entity = NULL; +- } else if (entity->tree == &st->active) { +- /* +- * Requeueing an entity due to a change of some +- * next_in_service entity below it. We reuse the +- * old start time. +- */ +- bfq_active_extract(st, entity); +- } else if (entity->tree == &st->idle) { ++ BUG_ON(!sd); ++ BUG_ON(!st); ++ ++ /* See comments on bfq_fqq_update_budg_for_activation */ ++ if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) { ++ backshifted = true; ++ min_vstart = entity->finish; ++ } else ++ min_vstart = st->vtime; ++ ++ if (entity->tree == &st->idle) { + /* + * Must be on the idle tree, bfq_idle_extract() will + * check for that. + */ + bfq_idle_extract(st, entity); +- entity->start = bfq_gt(st->vtime, entity->finish) ? +- st->vtime : entity->finish; ++ entity->start = bfq_gt(min_vstart, entity->finish) ? ++ min_vstart : entity->finish; + } else { + /* + * The finish time of the entity may be invalid, and + * it is in the past for sure, otherwise the queue + * would have been on the idle tree. + */ +- entity->start = st->vtime; ++ entity->start = min_vstart; + st->wsum += entity->weight; + bfq_get_entity(entity); + +- BUG_ON(entity->on_st); +- entity->on_st = 1; ++ BUG_ON(entity->on_st && bfqq); ++ ++#ifdef CONFIG_BFQ_GROUP_IOSCHED ++ if (entity->on_st && !bfqq) { ++ struct bfq_group *bfqg = ++ container_of(entity, struct bfq_group, ++ entity); ++ ++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, ++ bfqg, ++ "activate bug, class %d in_service %p", ++ bfq_class_idx(entity), sd->in_service_entity); ++ } ++#endif ++ BUG_ON(entity->on_st && !bfqq); ++ entity->on_st = true; + } + +- st = __bfq_entity_update_weight_prio(st, entity); +- bfq_calc_finish(entity, entity->budget); +- bfq_active_insert(st, entity); ++ bfq_update_fin_time_enqueue(entity, st, backshifted); + } + + /** +- * bfq_activate_entity - activate an entity and its ancestors if necessary. +- * @entity: the entity to activate. ++ * __bfq_requeue_entity - handle requeueing or repositioning of an entity. ++ * @entity: the entity being requeued or repositioned. ++ * ++ * Requeueing is needed if this entity stops being served, which ++ * happens if a leaf descendant entity has expired. On the other hand, ++ * repositioning is needed if the next_inservice_entity for the child ++ * entity has changed. See the comments inside the function for ++ * details. + * +- * Activate @entity and all the entities on the path from it to the root. ++ * Basically, this function: 1) removes entity from its active tree if ++ * present there, 2) updates the timestamps of entity and 3) inserts ++ * entity back into its active tree (in the new, right position for ++ * the new values of the timestamps). */ -static void bfq_activate_entity(struct bfq_entity *entity) -+static void bfq_activate_entity(struct bfq_entity *entity, -+ bool non_blocking_wait_rq) ++static void __bfq_requeue_entity(struct bfq_entity *entity) ++{ ++ struct bfq_sched_data *sd = entity->sched_data; ++ struct bfq_service_tree *st = bfq_entity_service_tree(entity); ++ ++ BUG_ON(!sd); ++ BUG_ON(!st); ++ ++ BUG_ON(entity != sd->in_service_entity && ++ entity->tree != &st->active); ++ ++ if (entity == sd->in_service_entity) { ++ /* ++ * We are requeueing the current in-service entity, ++ * which may have to be done for one of the following ++ * reasons: ++ * - entity represents the in-service queue, and the ++ * in-service queue is being requeued after an ++ * expiration; ++ * - entity represents a group, and its budget has ++ * changed because one of its child entities has ++ * just been either activated or requeued for some ++ * reason; the timestamps of the entity need then to ++ * be updated, and the entity needs to be enqueued ++ * or repositioned accordingly. ++ * ++ * In particular, before requeueing, the start time of ++ * the entity must be moved forward to account for the ++ * service that the entity has received while in ++ * service. This is done by the next instructions. The ++ * finish time will then be updated according to this ++ * new value of the start time, and to the budget of ++ * the entity. ++ */ ++ bfq_calc_finish(entity, entity->service); ++ entity->start = entity->finish; ++ BUG_ON(entity->tree && entity->tree != &st->active); ++ /* ++ * In addition, if the entity had more than one child ++ * when set in service, then was not extracted from ++ * the active tree. This implies that the position of ++ * the entity in the active tree may need to be ++ * changed now, because we have just updated the start ++ * time of the entity, and we will update its finish ++ * time in a moment (the requeueing is then, more ++ * precisely, a repositioning in this case). To ++ * implement this repositioning, we: 1) dequeue the ++ * entity here, 2) update the finish time and ++ * requeue the entity according to the new ++ * timestamps below. ++ */ ++ if (entity->tree) ++ bfq_active_extract(st, entity); ++ } else { /* The entity is already active, and not in service */ ++ /* ++ * In this case, this function gets called only if the ++ * next_in_service entity below this entity has ++ * changed, and this change has caused the budget of ++ * this entity to change, which, finally implies that ++ * the finish time of this entity must be ++ * updated. Such an update may cause the scheduling, ++ * i.e., the position in the active tree, of this ++ * entity to change. We handle this change by: 1) ++ * dequeueing the entity here, 2) updating the finish ++ * time and requeueing the entity according to the new ++ * timestamps below. This is the same approach as the ++ * non-extracted-entity sub-case above. ++ */ ++ bfq_active_extract(st, entity); ++ } ++ ++ bfq_update_fin_time_enqueue(entity, st, false); ++} ++ ++static void __bfq_activate_requeue_entity(struct bfq_entity *entity, ++ struct bfq_sched_data *sd, ++ bool non_blocking_wait_rq) ++{ ++ struct bfq_service_tree *st = bfq_entity_service_tree(entity); ++ ++ if (sd->in_service_entity == entity || entity->tree == &st->active) ++ /* ++ * in service or already queued on the active tree, ++ * requeue or reposition ++ */ ++ __bfq_requeue_entity(entity); ++ else ++ /* ++ * Not in service and not queued on its active tree: ++ * the activity is idle and this is a true activation. ++ */ ++ __bfq_activate_entity(entity, non_blocking_wait_rq); ++} ++ ++ ++/** ++ * bfq_activate_entity - activate or requeue an entity representing a bfq_queue, ++ * and activate, requeue or reposition all ancestors ++ * for which such an update becomes necessary. ++ * @entity: the entity to activate. ++ * @non_blocking_wait_rq: true if this entity was waiting for a request ++ * @requeue: true if this is a requeue, which implies that bfqq is ++ * being expired; thus ALL its ancestors stop being served and must ++ * therefore be requeued ++ */ ++static void bfq_activate_requeue_entity(struct bfq_entity *entity, ++ bool non_blocking_wait_rq, ++ bool requeue) { struct bfq_sched_data *sd; for_each_entity(entity) { - __bfq_activate_entity(entity); +- + BUG_ON(!entity); -+ __bfq_activate_entity(entity, non_blocking_wait_rq); - sd = entity->sched_data; - if (!bfq_update_next_in_service(sd)) -@@ -890,23 +1055,24 @@ static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue) +- if (!bfq_update_next_in_service(sd)) +- /* +- * No need to propagate the activation to the +- * upper entities, as they will be updated when +- * the in-service entity is rescheduled. +- */ ++ __bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq); ++ ++ BUG_ON(RB_EMPTY_ROOT(&sd->service_tree->active) && ++ RB_EMPTY_ROOT(&(sd->service_tree+1)->active) && ++ RB_EMPTY_ROOT(&(sd->service_tree+2)->active)); ++ ++ if (!bfq_update_next_in_service(sd, entity) && !requeue) { ++ BUG_ON(!sd->next_in_service); + break; ++ } ++ BUG_ON(!sd->next_in_service); + } + } - if (!__bfq_deactivate_entity(entity, requeue)) + /** + * __bfq_deactivate_entity - deactivate an entity from its service tree. + * @entity: the entity to deactivate. +- * @requeue: if false, the entity will not be put into the idle tree. +- * +- * Deactivate an entity, independently from its previous state. If the +- * entity was not on a service tree just return, otherwise if it is on +- * any scheduler tree, extract it from that tree, and if necessary +- * and if the caller did not specify @requeue, put it on the idle tree. ++ * @ins_into_idle_tree: if false, the entity will not be put into the ++ * idle tree. + * +- * Return %1 if the caller should update the entity hierarchy, i.e., +- * if the entity was in service or if it was the next_in_service for +- * its sched_data; return %0 otherwise. ++ * Deactivates an entity, independently from its previous state. Must ++ * be invoked only if entity is on a service tree. Extracts the entity ++ * from that tree, and if necessary and allowed, puts it on the idle ++ * tree. + */ +-static int __bfq_deactivate_entity(struct bfq_entity *entity, int requeue) ++static bool __bfq_deactivate_entity(struct bfq_entity *entity, ++ bool ins_into_idle_tree) + { + struct bfq_sched_data *sd = entity->sched_data; +- struct bfq_service_tree *st; +- int was_in_service; +- int ret = 0; +- +- if (sd == NULL || !entity->on_st) /* never activated, or inactive */ +- return 0; ++ struct bfq_service_tree *st = bfq_entity_service_tree(entity); ++ bool was_in_service = entity == sd->in_service_entity; + +- st = bfq_entity_service_tree(entity); +- was_in_service = entity == sd->in_service_entity; ++ if (!entity->on_st) { /* entity never activated, or already inactive */ ++ BUG_ON(entity == entity->sched_data->in_service_entity); ++ return false; ++ } + +- BUG_ON(was_in_service && entity->tree); ++ BUG_ON(was_in_service && entity->tree && entity->tree != &st->active); + +- if (was_in_service) { ++ if (was_in_service) + bfq_calc_finish(entity, entity->service); +- sd->in_service_entity = NULL; +- } else if (entity->tree == &st->active) ++ ++ if (entity->tree == &st->active) + bfq_active_extract(st, entity); +- else if (entity->tree == &st->idle) ++ else if (!was_in_service && entity->tree == &st->idle) + bfq_idle_extract(st, entity); + else if (entity->tree) + BUG(); + +- if (was_in_service || sd->next_in_service == entity) +- ret = bfq_update_next_in_service(sd); +- +- if (!requeue || !bfq_gt(entity->finish, st->vtime)) ++ if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime)) + bfq_forget_entity(st, entity); + else + bfq_idle_insert(st, entity); + +- BUG_ON(sd->in_service_entity == entity); +- BUG_ON(sd->next_in_service == entity); +- +- return ret; ++ return true; + } + + /** +- * bfq_deactivate_entity - deactivate an entity. ++ * bfq_deactivate_entity - deactivate an entity representing a bfq_queue. + * @entity: the entity to deactivate. +- * @requeue: true if the entity can be put on the idle tree ++ * @ins_into_idle_tree: true if the entity can be put on the idle tree + */ +-static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue) ++static void bfq_deactivate_entity(struct bfq_entity *entity, ++ bool ins_into_idle_tree, ++ bool expiration) + { + struct bfq_sched_data *sd; +- struct bfq_entity *parent; ++ struct bfq_entity *parent = NULL; + + for_each_entity_safe(entity, parent) { + sd = entity->sched_data; + +- if (!__bfq_deactivate_entity(entity, requeue)) ++ BUG_ON(sd == NULL); /* ++ * It would mean that this is the ++ * root group. ++ */ ++ ++ BUG_ON(expiration && entity != sd->in_service_entity); ++ ++ BUG_ON(entity != sd->in_service_entity && ++ entity->tree == ++ &bfq_entity_service_tree(entity)->active && ++ !sd->next_in_service); ++ ++ if (!__bfq_deactivate_entity(entity, ins_into_idle_tree)) { /* - * The parent entity is still backlogged, and - * we don't need to update it as it is still - * in service. -+ * next_in_service has not been changed, so -+ * no upwards update is needed ++ * Entity is not any tree any more, so, this ++ * deactivation is a no-op, and there is ++ * nothing to change for upper-level entities ++ * (in case of expiration, this can never ++ * happen). */ - break; +- break; ++ BUG_ON(expiration); /* ++ * entity cannot be already out of ++ * any tree ++ */ ++ return; ++ } - if (sd->next_in_service) +- if (sd->next_in_service) ++ if (sd->next_in_service == entity) /* - * The parent entity is still backlogged and - * the budgets on the path towards the root - * need to be updated. -+ * The parent entity is still backlogged, -+ * because next_in_service is not NULL, and -+ * next_in_service has been updated (see -+ * comment on the body of the above if): -+ * upwards update of the schedule is needed. ++ * entity was the next_in_service entity, ++ * then, since entity has just been ++ * deactivated, a new one must be found. */ - goto update; +- goto update; ++ bfq_update_next_in_service(sd, NULL); ++ ++ if (sd->next_in_service) { ++ /* ++ * The parent entity is still backlogged, ++ * because next_in_service is not NULL. So, no ++ * further upwards deactivation must be ++ * performed. Yet, next_in_service has ++ * changed. Then the schedule does need to be ++ * updated upwards. ++ */ ++ BUG_ON(sd->next_in_service == entity); ++ break; ++ } /* - * If we reach there the parent is no more backlogged and - * we want to propagate the dequeue upwards. -+ * If we get here, then the parent is no more backlogged and -+ * we want to propagate the deactivation upwards. ++ * If we get here, then the parent is no more ++ * backlogged and we need to propagate the ++ * deactivation upwards. Thus let the loop go on. */ - requeue = 1; - } -@@ -916,9 +1082,23 @@ static void bfq_deactivate_entity(struct bfq_entity *entity, int requeue) - update: +- requeue = 1; +- } + +- return; ++ /* ++ * Also let parent be queued into the idle tree on ++ * deactivation, to preserve service guarantees, and ++ * assuming that who invoked this function does not ++ * need parent entities too to be removed completely. ++ */ ++ ins_into_idle_tree = true; ++ } + +-update: ++ /* ++ * If the deactivation loop is fully executed, then there are ++ * no more entities to touch and next loop is not executed at ++ * all. Otherwise, requeue remaining entities if they are ++ * about to stop receiving service, or reposition them if this ++ * is not the case. ++ */ entity = parent; for_each_entity(entity) { - __bfq_activate_entity(entity); + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ __bfq_activate_entity(entity, false); ++ ++ /* ++ * Invoke __bfq_requeue_entity on entity, even if ++ * already active, to requeue/reposition it in the ++ * active tree (because sd->next_in_service has ++ * changed) ++ */ ++ __bfq_requeue_entity(entity); sd = entity->sched_data; +- if (!bfq_update_next_in_service(sd)) ++ BUG_ON(expiration && sd->in_service_entity != entity); ++ + if (bfqq) + bfq_log_bfqq(bfqq->bfqd, bfqq, + "invoking udpdate_next for this queue"); @@ -5190,33 +7423,195 @@ index a64fec1..7d73b9d 100644 + "invoking udpdate_next for this entity"); + } +#endif - if (!bfq_update_next_in_service(sd)) ++ if (!bfq_update_next_in_service(sd, entity) && ++ !expiration) ++ /* ++ * next_in_service unchanged or not causing ++ * any change in entity->parent->sd, and no ++ * requeueing needed for expiration: stop ++ * here. ++ */ break; } -@@ -997,10 +1177,11 @@ left: - * Update the virtual time in @st and return the first eligible entity - * it contains. + } + + /** +- * bfq_update_vtime - update vtime if necessary. ++ * bfq_calc_vtime_jump - compute the value to which the vtime should jump, ++ * if needed, to have at least one entity eligible. + * @st: the service tree to act upon. + * +- * If necessary update the service tree vtime to have at least one +- * eligible entity, skipping to its start time. Assumes that the +- * active tree of the device is not empty. +- * +- * NOTE: this hierarchical implementation updates vtimes quite often, +- * we may end up with reactivated processes getting timestamps after a +- * vtime skip done because we needed a ->first_active entity on some +- * intermediate node. ++ * Assumes that st is not empty. + */ +-static void bfq_update_vtime(struct bfq_service_tree *st) ++static u64 bfq_calc_vtime_jump(struct bfq_service_tree *st) + { +- struct bfq_entity *entry; +- struct rb_node *node = st->active.rb_node; ++ struct bfq_entity *root_entity = bfq_root_active_entity(&st->active); ++ ++ if (bfq_gt(root_entity->min_start, st->vtime)) { ++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(root_entity); + +- entry = rb_entry(node, struct bfq_entity, rb_node); +- if (bfq_gt(entry->min_start, st->vtime)) { +- st->vtime = entry->min_start; ++ if (bfqq) ++ bfq_log_bfqq(bfqq->bfqd, bfqq, ++ "calc_vtime_jump: new value %llu", ++ root_entity->min_start); ++#ifdef CONFIG_BFQ_GROUP_IOSCHED ++ else { ++ struct bfq_group *bfqg = ++ container_of(root_entity, struct bfq_group, ++ entity); ++ ++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, ++ "calc_vtime_jump: new value %llu", ++ root_entity->min_start); ++ } ++#endif ++ return root_entity->min_start; ++ } ++ return st->vtime; ++} ++ ++static void bfq_update_vtime(struct bfq_service_tree *st, u64 new_value) ++{ ++ if (new_value > st->vtime) { ++ st->vtime = new_value; + bfq_forget_idle(st); + } + } +@@ -952,6 +1462,7 @@ static void bfq_update_vtime(struct bfq_service_tree *st) + * bfq_first_active_entity - find the eligible entity with + * the smallest finish time + * @st: the service tree to select from. ++ * @vtime: the system virtual to use as a reference for eligibility + * + * This function searches the first schedulable entity, starting from the + * root of the tree and going on the left every time on this side there is +@@ -959,7 +1470,8 @@ static void bfq_update_vtime(struct bfq_service_tree *st) + * the right is followed only if a) the left subtree contains no eligible + * entities and b) no eligible entity has been found yet. + */ +-static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st) ++static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st, ++ u64 vtime) + { + struct bfq_entity *entry, *first = NULL; + struct rb_node *node = st->active.rb_node; +@@ -967,15 +1479,15 @@ static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st) + while (node) { + entry = rb_entry(node, struct bfq_entity, rb_node); + left: +- if (!bfq_gt(entry->start, st->vtime)) ++ if (!bfq_gt(entry->start, vtime)) + first = entry; + +- BUG_ON(bfq_gt(entry->min_start, st->vtime)); ++ BUG_ON(bfq_gt(entry->min_start, vtime)); + + if (node->rb_left) { + entry = rb_entry(node->rb_left, + struct bfq_entity, rb_node); +- if (!bfq_gt(entry->min_start, st->vtime)) { ++ if (!bfq_gt(entry->min_start, vtime)) { + node = node->rb_left; + goto left; + } +@@ -993,31 +1505,84 @@ static struct bfq_entity *bfq_first_active_entity(struct bfq_service_tree *st) + * __bfq_lookup_next_entity - return the first eligible entity in @st. + * @st: the service tree. + * +- * Update the virtual time in @st and return the first eligible entity +- * it contains. ++ * If there is no in-service entity for the sched_data st belongs to, ++ * then return the entity that will be set in service if: ++ * 1) the parent entity this st belongs to is set in service; ++ * 2) no entity belonging to such parent entity undergoes a state change ++ * that would influence the timestamps of the entity (e.g., becomes idle, ++ * becomes backlogged, changes its budget, ...). ++ * ++ * In this first case, update the virtual time in @st too (see the ++ * comments on this update inside the function). ++ * ++ * In constrast, if there is an in-service entity, then return the ++ * entity that would be set in service if not only the above ++ * conditions, but also the next one held true: the currently ++ * in-service entity, on expiration, ++ * 1) gets a finish time equal to the current one, or ++ * 2) is not eligible any more, or ++ * 3) is idle. */ -static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st, - bool force) +static struct bfq_entity * -+__bfq_lookup_next_entity(struct bfq_service_tree *st, bool force) ++__bfq_lookup_next_entity(struct bfq_service_tree *st, bool in_service ++#if 0 ++ , bool force ++#endif ++ ) { - struct bfq_entity *entity, *new_next_in_service = NULL; +- struct bfq_entity *entity, *new_next_in_service = NULL; ++ struct bfq_entity *entity ++#if 0 ++ , *new_next_in_service = NULL ++#endif ++ ; ++ u64 new_vtime; + struct bfq_queue *bfqq; if (RB_EMPTY_ROOT(&st->active)) return NULL; -@@ -1009,6 +1190,24 @@ static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st, - entity = bfq_first_active_entity(st); - BUG_ON(bfq_gt(entity->start, st->vtime)); +- bfq_update_vtime(st); +- entity = bfq_first_active_entity(st); +- BUG_ON(bfq_gt(entity->start, st->vtime)); ++ /* ++ * Get the value of the system virtual time for which at ++ * least one entity is eligible. ++ */ ++ new_vtime = bfq_calc_vtime_jump(st); + + /* +- * If the chosen entity does not match with the sched_data's +- * next_in_service and we are forcedly serving the IDLE priority +- * class tree, bubble up budget update. ++ * If there is no in-service entity for the sched_data this ++ * active tree belongs to, then push the system virtual time ++ * up to the value that guarantees that at least one entity is ++ * eligible. If, instead, there is an in-service entity, then ++ * do not make any such update, because there is already an ++ * eligible entity, namely the in-service one (even if the ++ * entity is not on st, because it was extracted when set in ++ * service). + */ +- if (unlikely(force && entity != entity->sched_data->next_in_service)) { +- new_next_in_service = entity; +- for_each_entity(new_next_in_service) +- bfq_update_budget(new_next_in_service); ++ if (!in_service) ++ bfq_update_vtime(st, new_vtime); ++ ++ entity = bfq_first_active_entity(st, new_vtime); ++ BUG_ON(bfq_gt(entity->start, new_vtime)); ++ ++ /* Log some information */ + bfqq = bfq_entity_to_bfqq(entity); + if (bfqq) + bfq_log_bfqq(bfqq->bfqd, bfqq, + "__lookup_next: start %llu vtime %llu st %p", + ((entity->start>>10)*1000)>>12, -+ ((st->vtime>>10)*1000)>>12, st); ++ ((new_vtime>>10)*1000)>>12, st); +#ifdef CONFIG_BFQ_GROUP_IOSCHED + else { + struct bfq_group *bfqg = @@ -5225,69 +7620,115 @@ index a64fec1..7d73b9d 100644 + bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, + "__lookup_next: start %llu vtime %llu st %p", + ((entity->start>>10)*1000)>>12, -+ ((st->vtime>>10)*1000)>>12, st); ++ ((new_vtime>>10)*1000)>>12, st); + } ++#endif ++ ++ BUG_ON(!entity); + + return entity; + } +@@ -1025,50 +1590,81 @@ static struct bfq_entity *__bfq_lookup_next_entity(struct bfq_service_tree *st, + /** + * bfq_lookup_next_entity - return the first eligible entity in @sd. + * @sd: the sched_data. +- * @extract: if true the returned entity will be also extracted from @sd. + * +- * NOTE: since we cache the next_in_service entity at each level of the +- * hierarchy, the complexity of the lookup can be decreased with +- * absolutely no effort just returning the cached next_in_service value; +- * we prefer to do full lookups to test the consistency of * the data +- * structures. ++ * This function is invoked when there has been a change in the trees ++ * for sd, and we need know what is the new next entity after this ++ * change. + */ +-static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, +- int extract, +- struct bfq_data *bfqd) ++static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd) + { + struct bfq_service_tree *st = sd->service_tree; +- struct bfq_entity *entity; +- int i = 0; +- +- BUG_ON(sd->in_service_entity); ++ struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1); ++ struct bfq_entity *entity = NULL; ++ struct bfq_queue *bfqq; ++ int class_idx = 0; + +- if (bfqd && +- jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) { +- entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1, +- true); +- if (entity) { +- i = BFQ_IOPRIO_CLASSES - 1; +- bfqd->bfq_class_idle_last_service = jiffies; +- sd->next_in_service = entity; +- } ++ BUG_ON(!sd); ++ BUG_ON(!st); ++ /* ++ * Choose from idle class, if needed to guarantee a minimum ++ * bandwidth to this class (and if there is some active entity ++ * in idle class). This should also mitigate ++ * priority-inversion problems in case a low priority task is ++ * holding file system resources. ++ */ ++ if (time_is_before_jiffies(sd->bfq_class_idle_last_service + ++ BFQ_CL_IDLE_TIMEOUT)) { ++ if (!RB_EMPTY_ROOT(&idle_class_st->active)) ++ class_idx = BFQ_IOPRIO_CLASSES - 1; ++ /* About to be served if backlogged, or not yet backlogged */ ++ sd->bfq_class_idle_last_service = jiffies; + } +- for (; i < BFQ_IOPRIO_CLASSES; i++) { +- entity = __bfq_lookup_next_entity(st + i, false); +- if (entity) { +- if (extract) { +- bfq_check_next_in_service(sd, entity); +- bfq_active_extract(st + i, entity); +- sd->in_service_entity = entity; +- sd->next_in_service = NULL; +- } ++ ++ /* ++ * Find the next entity to serve for the highest-priority ++ * class, unless the idle class needs to be served. ++ */ ++ for (; class_idx < BFQ_IOPRIO_CLASSES; class_idx++) { ++ entity = __bfq_lookup_next_entity(st + class_idx, ++ sd->in_service_entity); ++ ++ if (entity) + break; +- } + } + ++ BUG_ON(!entity && ++ (!RB_EMPTY_ROOT(&st->active) || !RB_EMPTY_ROOT(&(st+1)->active) || ++ !RB_EMPTY_ROOT(&(st+2)->active))); ++ ++ if (!entity) ++ return NULL; ++ ++ /* Log some information */ ++ bfqq = bfq_entity_to_bfqq(entity); ++ if (bfqq) ++ bfq_log_bfqq(bfqq->bfqd, bfqq, "chosen from st %p %d", ++ st + class_idx, class_idx); ++#ifdef CONFIG_BFQ_GROUP_IOSCHED ++ else { ++ struct bfq_group *bfqg = ++ container_of(entity, struct bfq_group, entity); ++ ++ bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, ++ "chosen from st %p %d", ++ st + class_idx, class_idx); + } +#endif + - /* - * If the chosen entity does not match with the sched_data's - * next_in_service and we are forcedly serving the IDLE priority -@@ -1045,10 +1244,28 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, - BUG_ON(sd->in_service_entity); - - if (bfqd && -- jiffies - bfqd->bfq_class_idle_last_service > BFQ_CL_IDLE_TIMEOUT) { -+ jiffies - bfqd->bfq_class_idle_last_service > -+ BFQ_CL_IDLE_TIMEOUT) { - entity = __bfq_lookup_next_entity(st + BFQ_IOPRIO_CLASSES - 1, - true); - if (entity) { -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ if (bfqq) -+ bfq_log_bfqq(bfqd, bfqq, -+ "idle chosen from st %p %d", -+ st + BFQ_IOPRIO_CLASSES - 1, -+ BFQ_IOPRIO_CLASSES - 1) ; -+#ifdef CONFIG_BFQ_GROUP_IOSCHED -+ else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg(bfqd, bfqg, -+ "idle chosen from st %p %d", -+ st + BFQ_IOPRIO_CLASSES - 1, -+ BFQ_IOPRIO_CLASSES - 1) ; -+ } -+#endif - i = BFQ_IOPRIO_CLASSES - 1; - bfqd->bfq_class_idle_last_service = jiffies; - sd->next_in_service = entity; -@@ -1057,6 +1274,24 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, - for (; i < BFQ_IOPRIO_CLASSES; i++) { - entity = __bfq_lookup_next_entity(st + i, false); - if (entity) { -+ if (bfqd != NULL) { -+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); -+ if (bfqq) -+ bfq_log_bfqq(bfqd, bfqq, -+ "chosen from st %p %d", -+ st + i, i) ; -+#ifdef CONFIG_BFQ_GROUP_IOSCHED -+ else { -+ struct bfq_group *bfqg = -+ container_of(entity, struct bfq_group, entity); -+ -+ bfq_log_bfqg(bfqd, bfqg, -+ "chosen from st %p %d", -+ st + i, i) ; -+ } -+#endif -+ } -+ - if (extract) { - bfq_check_next_in_service(sd, entity); - bfq_active_extract(st + i, entity); -@@ -1070,6 +1305,13 @@ static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, return entity; } @@ -5301,10 +7742,19 @@ index a64fec1..7d73b9d 100644 /* * Get next queue for service. */ -@@ -1086,7 +1328,36 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) +@@ -1083,58 +1679,208 @@ static struct bfq_queue *bfq_get_next_queue(struct bfq_data *bfqd) + if (bfqd->busy_queues == 0) + return NULL; ++ /* ++ * Traverse the path from the root to the leaf entity to ++ * serve. Set in service all the entities visited along the ++ * way. ++ */ sd = &bfqd->root_group->sched_data; for (; sd ; sd = entity->my_sched_data) { +- entity = bfq_lookup_next_entity(sd, 1, bfqd); +- BUG_ON(!entity); +#ifdef CONFIG_BFQ_GROUP_IOSCHED + if (entity) { + struct bfq_group *bfqg = @@ -5312,13 +7762,96 @@ index a64fec1..7d73b9d 100644 + + bfq_log_bfqg(bfqd, bfqg, + "get_next_queue: lookup in this group"); -+ } else ++ if (!sd->next_in_service) ++ pr_crit("get_next_queue: lookup in this group"); ++ } else { + bfq_log_bfqg(bfqd, bfqd->root_group, + "get_next_queue: lookup in root group"); ++ if (!sd->next_in_service) ++ pr_crit("get_next_queue: lookup in root group"); ++ } +#endif + - entity = bfq_lookup_next_entity(sd, 1, bfqd); ++ BUG_ON(!sd->next_in_service); + ++ /* ++ * WARNING. We are about to set the in-service entity ++ * to sd->next_in_service, i.e., to the (cached) value ++ * returned by bfq_lookup_next_entity(sd) the last ++ * time it was invoked, i.e., the last time when the ++ * service order in sd changed as a consequence of the ++ * activation or deactivation of an entity. In this ++ * respect, if we execute bfq_lookup_next_entity(sd) ++ * in this very moment, it may, although with low ++ * probability, yield a different entity than that ++ * pointed to by sd->next_in_service. This rare event ++ * happens in case there was no CLASS_IDLE entity to ++ * serve for sd when bfq_lookup_next_entity(sd) was ++ * invoked for the last time, while there is now one ++ * such entity. ++ * ++ * If the above event happens, then the scheduling of ++ * such entity in CLASS_IDLE is postponed until the ++ * service of the sd->next_in_service entity ++ * finishes. In fact, when the latter is expired, ++ * bfq_lookup_next_entity(sd) gets called again, ++ * exactly to update sd->next_in_service. ++ */ ++ ++ /* Make next_in_service entity become in_service_entity */ ++ entity = sd->next_in_service; ++ sd->in_service_entity = entity; ++ ++ /* ++ * Reset the accumulator of the amount of service that ++ * the entity is about to receive. ++ */ + entity->service = 0; ++ ++ /* ++ * If entity is no longer a candidate for next ++ * service, then we extract it from its active tree, ++ * for the following reason. To further boost the ++ * throughput in some special case, BFQ needs to know ++ * which is the next candidate entity to serve, while ++ * there is already an entity in service. In this ++ * respect, to make it easy to compute/update the next ++ * candidate entity to serve after the current ++ * candidate has been set in service, there is a case ++ * where it is necessary to extract the current ++ * candidate from its service tree. Such a case is ++ * when the entity just set in service cannot be also ++ * a candidate for next service. Details about when ++ * this conditions holds are reported in the comments ++ * on the function bfq_no_longer_next_in_service() ++ * invoked below. ++ */ ++ if (bfq_no_longer_next_in_service(entity)) ++ bfq_active_extract(bfq_entity_service_tree(entity), ++ entity); ++ ++ /* ++ * For the same reason why we may have just extracted ++ * entity from its active tree, we may need to update ++ * next_in_service for the sched_data of entity too, ++ * regardless of whether entity has been extracted. ++ * In fact, even if entity has not been extracted, a ++ * descendant entity may get extracted. Such an event ++ * would cause a change in next_in_service for the ++ * level of the descendant entity, and thus possibly ++ * back to upper levels. ++ * ++ * We cannot perform the resulting needed update ++ * before the end of this loop, because, to know which ++ * is the correct next-to-serve candidate entity for ++ * each level, we need first to find the leaf entity ++ * to set in service. In fact, only after we know ++ * which is the next-to-serve leaf entity, we can ++ * discover whether the parent entity of the leaf ++ * entity becomes the next-to-serve, and so on. ++ */ ++ ++ /* Log some information */ + bfqq = bfq_entity_to_bfqq(entity); + if (bfqq) + bfq_log_bfqq(bfqd, bfqq, @@ -5335,27 +7868,84 @@ index a64fec1..7d73b9d 100644 + } +#endif + - BUG_ON(!entity); - entity->service = 0; } -@@ -1113,9 +1384,7 @@ static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, + ++ BUG_ON(!entity); + bfqq = bfq_entity_to_bfqq(entity); + BUG_ON(!bfqq); + ++ /* ++ * We can finally update all next-to-serve entities along the ++ * path from the leaf entity just set in service to the root. ++ */ ++ for_each_entity(entity) { ++ struct bfq_sched_data *sd = entity->sched_data; ++ ++ if(!bfq_update_next_in_service(sd, NULL)) ++ break; ++ } ++ + return bfqq; + } + + static void __bfq_bfqd_reset_in_service(struct bfq_data *bfqd) + { ++ struct bfq_entity *entity = &bfqd->in_service_queue->entity; ++ + if (bfqd->in_service_bic) { + put_io_context(bfqd->in_service_bic->icq.ioc); + bfqd->in_service_bic = NULL; + } + ++ bfq_clear_bfqq_wait_request(bfqd->in_service_queue); ++ hrtimer_try_to_cancel(&bfqd->idle_slice_timer); + bfqd->in_service_queue = NULL; +- del_timer(&bfqd->idle_slice_timer); ++ ++ /* ++ * When this function is called, all in-service entities have ++ * been properly deactivated or requeued, so we can safely ++ * execute the final step: reset in_service_entity along the ++ * path from entity to the root. ++ */ ++ for_each_entity(entity) ++ entity->sched_data->in_service_entity = NULL; + } + + static void bfq_deactivate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, +- int requeue) ++ bool ins_into_idle_tree, bool expiration) { struct bfq_entity *entity = &bfqq->entity; - if (bfqq == bfqd->in_service_queue) - __bfq_bfqd_reset_in_service(bfqd); - -+ BUG_ON(bfqq == bfqd->in_service_queue); - bfq_deactivate_entity(entity, requeue); +- bfq_deactivate_entity(entity, requeue); ++ bfq_deactivate_entity(entity, ins_into_idle_tree, expiration); } -@@ -1123,12 +1392,11 @@ static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) + static void bfq_activate_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) { struct bfq_entity *entity = &bfqq->entity; ++ struct bfq_service_tree *st = bfq_entity_service_tree(entity); ++ ++ BUG_ON(bfqq == bfqd->in_service_queue); ++ BUG_ON(entity->tree != &st->active && entity->tree != &st->idle && ++ entity->on_st); - bfq_activate_entity(entity); -+ bfq_activate_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq)); ++ bfq_activate_requeue_entity(entity, bfq_bfqq_non_blocking_wait_rq(bfqq), ++ false); + bfq_clear_bfqq_non_blocking_wait_rq(bfqq); ++} ++ ++static void bfq_requeue_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) ++{ ++ struct bfq_entity *entity = &bfqq->entity; ++ ++ bfq_activate_requeue_entity(entity, false, ++ bfqq == bfqd->in_service_queue); } -#ifdef CONFIG_BFQ_GROUP_IOSCHED @@ -5364,15 +7954,17 @@ index a64fec1..7d73b9d 100644 /* * Called when the bfqq no longer has requests pending, remove it from -@@ -1139,6 +1407,7 @@ static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, +- * the service tree. ++ * the service tree. As a special case, it can be invoked during an ++ * expiration. + */ + static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, +- int requeue) ++ bool expiration) { BUG_ON(!bfq_bfqq_busy(bfqq)); BUG_ON(!RB_EMPTY_ROOT(&bfqq->sort_list)); -+ BUG_ON(bfqq == bfqd->in_service_queue); - - bfq_log_bfqq(bfqd, bfqq, "del from busy"); - -@@ -1147,27 +1416,20 @@ static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, +@@ -1146,27 +1892,20 @@ static void bfq_del_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq, BUG_ON(bfqd->busy_queues == 0); bfqd->busy_queues--; @@ -5398,15 +7990,16 @@ index a64fec1..7d73b9d 100644 bfqg_stats_update_dequeue(bfqq_group(bfqq)); -#endif +- bfq_deactivate_bfqq(bfqd, bfqq, requeue); + BUG_ON(bfqq->entity.budget < 0); -+ - bfq_deactivate_bfqq(bfqd, bfqq, requeue); ++ ++ bfq_deactivate_bfqq(bfqd, bfqq, true, expiration); + + BUG_ON(bfqq->entity.budget < 0); } /* -@@ -1185,16 +1447,11 @@ static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) +@@ -1184,16 +1923,11 @@ static void bfq_add_bfqq_busy(struct bfq_data *bfqd, struct bfq_queue *bfqq) bfq_mark_bfqq_busy(bfqq); bfqd->busy_queues++; @@ -5426,17 +8019,28 @@ index a64fec1..7d73b9d 100644 bfqd->wr_busy_queues++; } diff --git a/block/bfq.h b/block/bfq.h -index f73c942..49d28b9 100644 +index fcce855..2a2bc30 100644 --- a/block/bfq.h +++ b/block/bfq.h @@ -1,5 +1,5 @@ /* - * BFQ-v7r11 for 4.5.0: data structures and common functions prototypes. -+ * BFQ-v8r3 for 4.7.0: data structures and common functions prototypes. ++ * BFQ v8r8 for 4.10.0: data structures and common functions prototypes. * * Based on ideas and code from CFQ: * Copyright (C) 2003 Jens Axboe -@@ -28,20 +28,21 @@ +@@ -7,7 +7,9 @@ + * Copyright (C) 2008 Fabio Checconi + * Paolo Valente + * +- * Copyright (C) 2010 Paolo Valente ++ * Copyright (C) 2015 Paolo Valente ++ * ++ * Copyright (C) 2017 Paolo Valente + */ + + #ifndef _BFQ_H +@@ -28,20 +30,21 @@ #define BFQ_DEFAULT_QUEUE_IOPRIO 4 @@ -5465,7 +8069,7 @@ index f73c942..49d28b9 100644 * * Each service tree represents a B-WF2Q+ scheduler on its own. Each * ioprio_class has its own independent scheduler, and so its own -@@ -49,27 +50,28 @@ struct bfq_entity; +@@ -49,27 +52,28 @@ struct bfq_entity; * of the containing bfqd. */ struct bfq_service_tree { @@ -5476,8 +8080,8 @@ index f73c942..49d28b9 100644 - struct bfq_entity *first_idle; - struct bfq_entity *last_idle; -+ struct bfq_entity *first_idle; /* idle entity with minimum F_i */ -+ struct bfq_entity *last_idle; /* idle entity with maximum F_i */ ++ struct bfq_entity *first_idle; /* idle entity with minimum F_i */ ++ struct bfq_entity *last_idle; /* idle entity with maximum F_i */ - u64 vtime; + u64 vtime; /* scheduler virtual time */ @@ -5504,7 +8108,7 @@ index f73c942..49d28b9 100644 * * The supported ioprio_classes are the same as in CFQ, in descending * priority order, IOPRIO_CLASS_RT, IOPRIO_CLASS_BE, IOPRIO_CLASS_IDLE. -@@ -79,48 +81,29 @@ struct bfq_service_tree { +@@ -79,48 +83,32 @@ struct bfq_service_tree { * All the fields are protected by the queue lock of the containing bfqd. */ struct bfq_sched_data { @@ -5514,6 +8118,9 @@ index f73c942..49d28b9 100644 struct bfq_entity *next_in_service; + /* array of service trees, one per ioprio_class */ struct bfq_service_tree service_tree[BFQ_IOPRIO_CLASSES]; ++ /* last time CLASS_IDLE was served */ ++ unsigned long bfq_class_idle_last_service; ++ }; /** @@ -5562,7 +8169,7 @@ index f73c942..49d28b9 100644 * * A bfq_entity is used to represent either a bfq_queue (leaf node in the * cgroup hierarchy) or a bfq_group into the upper level scheduler. Each -@@ -147,27 +130,52 @@ struct bfq_weight_counter { +@@ -147,27 +135,52 @@ struct bfq_weight_counter { * containing bfqd. */ struct bfq_entity { @@ -5571,11 +8178,12 @@ index f73c942..49d28b9 100644 + /* pointer to the weight counter associated with this entity */ struct bfq_weight_counter *weight_counter; +- int on_st; + /* -+ * flag, true if the entity is on a tree (either the active or -+ * the idle one of its service_tree). ++ * Flag, true if the entity is on a tree (either the active or ++ * the idle one of its service_tree) or is in service. + */ - int on_st; ++ bool on_st; - u64 finish; - u64 start; @@ -5600,7 +8208,7 @@ index f73c942..49d28b9 100644 + /* budget, used also to calculate F_i: F_i = S_i + @budget / @weight */ + int budget; + -+ unsigned int weight; /* weight of the queue */ ++ unsigned int weight; /* weight of the queue */ + unsigned int new_weight; /* next weight if a change is in progress */ + + /* original weight, used to implement weight boosting */ @@ -5621,7 +8229,7 @@ index f73c942..49d28b9 100644 int prio_changed; }; -@@ -175,56 +183,6 @@ struct bfq_group; +@@ -175,56 +188,6 @@ struct bfq_group; /** * struct bfq_queue - leaf schedulable entity. @@ -5678,7 +8286,7 @@ index f73c942..49d28b9 100644 * * A bfq_queue is a leaf request queue; it can be associated with an * io_context or more, if it is async or shared between cooperating -@@ -235,117 +193,163 @@ struct bfq_group; +@@ -235,117 +198,175 @@ struct bfq_group; * All the fields are protected by the queue lock of the containing bfqd. */ struct bfq_queue { @@ -5794,6 +8402,10 @@ index f73c942..49d28b9 100644 + * last transition from idle to backlogged. + */ unsigned long service_from_backlogged; ++ /* ++ * Value of wr start time when switching to soft rt ++ */ ++ unsigned long wr_start_at_switch_to_srt; + + unsigned long split_time; /* time of last split */ }; @@ -5806,11 +8418,11 @@ index f73c942..49d28b9 100644 */ struct bfq_ttime { - unsigned long last_end_request; -+ unsigned long last_end_request; /* completion time of last request */ ++ u64 last_end_request; /* completion time of last request */ + -+ unsigned long ttime_total; /* total process thinktime */ ++ u64 ttime_total; /* total process thinktime */ + unsigned long ttime_samples; /* number of thinktime samples */ -+ unsigned long ttime_mean; /* average process thinktime */ ++ u64 ttime_mean; /* average process thinktime */ - unsigned long ttime_total; - unsigned long ttime_samples; @@ -5883,13 +8495,20 @@ index f73c942..49d28b9 100644 + * with another cooperating queue. + */ bool was_in_burst_list; -- + - unsigned int cooperations; - unsigned int failed_cooperations; ++ /* ++ * Similar to previous fields: save wr information. ++ */ ++ unsigned long saved_wr_coeff; ++ unsigned long saved_last_wr_start_finish; ++ unsigned long saved_wr_start_at_switch_to_srt; ++ unsigned int saved_wr_cur_max_time; }; enum bfq_device_speed { -@@ -354,224 +358,216 @@ enum bfq_device_speed { +@@ -354,224 +375,232 @@ enum bfq_device_speed { }; /** @@ -6000,10 +8619,10 @@ index f73c942..49d28b9 100644 - * @last_ins_in_burst. - * @burst_size: number of queues in the current burst of queue activations. - * @bfq_large_burst_thresh: maximum burst size above which the current -- * queue-activation burst is deemed as 'large'. +- * queue-activation burst is deemed as 'large'. - * @large_burst: true if a large queue-activation burst is in progress. - * @burst_list: head of the burst list (as for the above fields, more details -- * in the comments to the function bfq_handle_burst). +- * in the comments to the function bfq_handle_burst). - * @low_latency: if set to true, low-latency heuristics are enabled. - * @bfq_wr_coeff: maximum factor by which the weight of a weight-raised - * queue is multiplied. @@ -6083,11 +8702,12 @@ index f73c942..49d28b9 100644 + /* number of budgets assigned */ int budgets_assigned; +- struct timer_list idle_slice_timer; + /* + * Timer set when idling (waiting) for the next request from + * the queue in service. + */ - struct timer_list idle_slice_timer; ++ struct hrtimer idle_slice_timer; + /* delayed work to restart dispatching on the request queue */ struct work_struct unplug_work; @@ -6099,14 +8719,33 @@ index f73c942..49d28b9 100644 + /* on-disk position of the last served request */ sector_t last_position; ++ /* time of last request completion (ns) */ ++ u64 last_completion; ++ ++ /* time of first rq dispatch in current observation interval (ns) */ ++ u64 first_dispatch; ++ /* time of last rq dispatch in current observation interval (ns) */ ++ u64 last_dispatch; ++ + /* beginning of the last budget */ ktime_t last_budget_start; + /* beginning of the last idle slice */ ktime_t last_idling_start; -+ /* number of samples used to calculate @peak_rate */ ++ ++ /* number of samples in current observation interval */ int peak_rate_samples; -+ /* peak transfer rate observed for a budget */ - u64 peak_rate; +- u64 peak_rate; ++ /* num of samples of seq dispatches in current observation interval */ ++ u32 sequential_samples; ++ /* total num of sectors transferred in current observation interval */ ++ u64 tot_sectors_dispatched; ++ /* max rq size seen during current observation interval (sectors) */ ++ u32 last_rq_max_size; ++ /* time elapsed from first dispatch in current observ. interval (us) */ ++ u64 delta_from_first; ++ /* current estimate of device peak rate */ ++ u32 peak_rate; ++ + /* maximum budget allotted to a bfq_queue before rescheduling */ int bfq_max_budget; @@ -6115,19 +8754,20 @@ index f73c942..49d28b9 100644 + /* list of all the bfq_queues idle on the device */ struct list_head idle_list; +- unsigned int bfq_fifo_expire[2]; + /* + * Timeout for async/sync requests; when it fires, requests + * are served in fifo order. + */ - unsigned int bfq_fifo_expire[2]; ++ u64 bfq_fifo_expire[2]; + /* weight of backward seeks wrt forward ones */ unsigned int bfq_back_penalty; + /* maximum allowed backward seek */ unsigned int bfq_back_max; +- unsigned int bfq_slice_idle; +- u64 bfq_class_idle_last_service; + /* maximum idling time */ - unsigned int bfq_slice_idle; -+ /* last time CLASS_IDLE was served */ - u64 bfq_class_idle_last_service; ++ u32 bfq_slice_idle; + /* user-configured max budget value (0 for auto-tuning) */ int bfq_user_max_budget; @@ -6250,7 +8890,7 @@ index f73c942..49d28b9 100644 BFQ_BFQQ_FLAG_IO_bound, /* * bfqq has timed-out at least once * having consumed at most 2/10 of -@@ -581,17 +577,12 @@ enum bfqq_state_flags { +@@ -581,17 +610,12 @@ enum bfqq_state_flags { * bfqq activated in a large burst, * see comments to bfq_handle_burst. */ @@ -6269,7 +8909,7 @@ index f73c942..49d28b9 100644 }; #define BFQ_BFQQ_FNS(name) \ -@@ -608,25 +599,53 @@ static int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ +@@ -608,28 +632,94 @@ static int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ return ((bfqq)->flags & (1 << BFQ_BFQQ_FLAG_##name)) != 0; \ } @@ -6294,6 +8934,43 @@ index f73c942..49d28b9 100644 /* Logging facilities. */ -#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ - blk_add_trace_msg((bfqd)->queue, "bfq%d " fmt, (bfqq)->pid, ##args) ++#ifdef CONFIG_BFQ_REDIRECT_TO_CONSOLE ++#ifdef CONFIG_BFQ_GROUP_IOSCHED ++static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); ++static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); ++ ++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) do { \ ++ char __pbuf[128]; \ ++ \ ++ assert_spin_locked((bfqd)->queue->queue_lock); \ ++ blkg_path(bfqg_to_blkg(bfqq_group(bfqq)), __pbuf, sizeof(__pbuf)); \ ++ pr_crit("bfq%d%c %s " fmt "\n", \ ++ (bfqq)->pid, \ ++ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ ++ __pbuf, ##args); \ ++} while (0) ++ ++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do { \ ++ char __pbuf[128]; \ ++ \ ++ blkg_path(bfqg_to_blkg(bfqg), __pbuf, sizeof(__pbuf)); \ ++ pr_crit("%s " fmt "\n", __pbuf, ##args); \ ++} while (0) ++ ++#else /* CONFIG_BFQ_GROUP_IOSCHED */ ++ ++#define bfq_log_bfqq(bfqd, bfqq, fmt, args...) \ ++ pr_crit("bfq%d%c " fmt "\n", (bfqq)->pid, \ ++ bfq_bfqq_sync((bfqq)) ? 'S' : 'A', \ ++ ##args) ++#define bfq_log_bfqg(bfqd, bfqg, fmt, args...) do {} while (0) ++ ++#endif /* CONFIG_BFQ_GROUP_IOSCHED */ ++ ++#define bfq_log(bfqd, fmt, args...) \ ++ pr_crit("bfq " fmt "\n", ##args) ++ ++#else /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ +#ifdef CONFIG_BFQ_GROUP_IOSCHED +static struct bfq_group *bfqq_group(struct bfq_queue *bfqq); +static struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg); @@ -6328,7 +9005,11 @@ index f73c942..49d28b9 100644 #define bfq_log(bfqd, fmt, args...) \ blk_add_trace_msg((bfqd)->queue, "bfq " fmt, ##args) -@@ -640,15 +659,12 @@ enum bfqq_expiration { ++#endif /* CONFIG_BFQ_REDIRECT_TO_CONSOLE */ + + /* Expiration reasons. */ + enum bfqq_expiration { +@@ -640,15 +730,12 @@ enum bfqq_expiration { BFQ_BFQQ_BUDGET_TIMEOUT, /* budget took too long to be used */ BFQ_BFQQ_BUDGET_EXHAUSTED, /* budget consumed */ BFQ_BFQQ_NO_MORE_REQUESTS, /* the queue has no more requests */ @@ -6346,7 +9027,7 @@ index f73c942..49d28b9 100644 /* number of ios merged */ struct blkg_rwstat merged; /* total time spent on device in ns, may not be accurate w/ queueing */ -@@ -657,12 +673,8 @@ struct bfqg_stats { +@@ -657,12 +744,8 @@ struct bfqg_stats { struct blkg_rwstat wait_time; /* number of IOs queued up */ struct blkg_rwstat queued; @@ -6359,7 +9040,7 @@ index f73c942..49d28b9 100644 /* sum of number of ios queued across all samples */ struct blkg_stat avg_queue_size_sum; /* count of samples taken for average */ -@@ -680,8 +692,10 @@ struct bfqg_stats { +@@ -680,8 +763,10 @@ struct bfqg_stats { uint64_t start_idle_time; uint64_t start_empty_time; uint16_t flags; @@ -6370,7 +9051,7 @@ index f73c942..49d28b9 100644 /* * struct bfq_group_data - per-blkcg storage for the blkio subsystem. * -@@ -692,7 +706,7 @@ struct bfq_group_data { +@@ -692,7 +777,7 @@ struct bfq_group_data { /* must be the first member */ struct blkcg_policy_data pd; @@ -6379,7 +9060,7 @@ index f73c942..49d28b9 100644 }; /** -@@ -712,7 +726,7 @@ struct bfq_group_data { +@@ -712,7 +797,7 @@ struct bfq_group_data { * unused for the root group. Used to know whether there * are groups with more than one active @bfq_entity * (see the comments to the function @@ -6388,7 +9069,7 @@ index f73c942..49d28b9 100644 * @rq_pos_tree: rbtree sorted by next_request position, used when * determining if two or more queues have interleaving * requests (see bfq_find_close_cooperator()). -@@ -745,7 +759,6 @@ struct bfq_group { +@@ -745,7 +830,6 @@ struct bfq_group { struct rb_root rq_pos_tree; struct bfqg_stats stats; @@ -6396,12 +9077,26 @@ index f73c942..49d28b9 100644 }; #else -@@ -767,11 +780,25 @@ bfq_entity_service_tree(struct bfq_entity *entity) +@@ -761,17 +845,38 @@ struct bfq_group { + + static struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity); + ++static unsigned int bfq_class_idx(struct bfq_entity *entity) ++{ ++ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); ++ ++ return bfqq ? bfqq->ioprio_class - 1 : ++ BFQ_DEFAULT_GRP_CLASS - 1; ++} ++ + static struct bfq_service_tree * + bfq_entity_service_tree(struct bfq_entity *entity) + { struct bfq_sched_data *sched_data = entity->sched_data; struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); - unsigned int idx = bfqq ? bfqq->ioprio_class - 1 : +- unsigned int idx = bfqq ? bfqq->ioprio_class - 1 : - BFQ_DEFAULT_GRP_CLASS; -+ BFQ_DEFAULT_GRP_CLASS - 1; ++ unsigned int idx = bfq_class_idx(entity); BUG_ON(idx >= BFQ_IOPRIO_CLASSES); BUG_ON(sched_data == NULL); @@ -6409,7 +9104,7 @@ index f73c942..49d28b9 100644 + if (bfqq) + bfq_log_bfqq(bfqq->bfqd, bfqq, + "entity_service_tree %p %d", -+ sched_data->service_tree + idx, idx) ; ++ sched_data->service_tree + idx, idx); +#ifdef CONFIG_BFQ_GROUP_IOSCHED + else { + struct bfq_group *bfqg = @@ -6417,13 +9112,13 @@ index f73c942..49d28b9 100644 + + bfq_log_bfqg((struct bfq_data *)bfqg->bfqd, bfqg, + "entity_service_tree %p %d", -+ sched_data->service_tree + idx, idx) ; ++ sched_data->service_tree + idx, idx); + } +#endif return sched_data->service_tree + idx; } -@@ -791,47 +818,6 @@ static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) +@@ -791,47 +896,6 @@ static struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) return bic->icq.q->elevator->elevator_data; } @@ -6471,7 +9166,7 @@ index f73c942..49d28b9 100644 #ifdef CONFIG_BFQ_GROUP_IOSCHED static struct bfq_group *bfq_bfqq_to_bfqg(struct bfq_queue *bfqq) -@@ -857,11 +843,13 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio); +@@ -857,11 +921,13 @@ static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio); static void bfq_put_queue(struct bfq_queue *bfqq); static void bfq_dispatch_insert(struct request_queue *q, struct request *rq); static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, @@ -6488,5 +9183,5 @@ index f73c942..49d28b9 100644 #endif /* _BFQ_H */ -- -1.9.1 +2.10.0 diff --git a/patches/0009-Implement-min-and-msec-hrtimeout-un-interruptible-sc.patch b/patches/0009-Implement-min-and-msec-hrtimeout-un-interruptible-sc.patch new file mode 100644 index 0000000..ba027c0 --- /dev/null +++ b/patches/0009-Implement-min-and-msec-hrtimeout-un-interruptible-sc.patch @@ -0,0 +1,111 @@ +From a7fb2842267fd275cae9cf44dd3037469f75eeef Mon Sep 17 00:00:00 2001 +From: Con Kolivas +Date: Tue, 1 Nov 2016 12:54:20 +1100 +Subject: [PATCH 09/25] Implement min and msec hrtimeout un/interruptible + schedule timeout variants with a lower resolution of 1ms to work around low + Hz time resolutions. + +--- + include/linux/sched.h | 6 +++++ + kernel/time/hrtimer.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++ + 2 files changed, 77 insertions(+) + +diff --git a/include/linux/sched.h b/include/linux/sched.h +index d752ef6..46544f4 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -457,6 +457,12 @@ extern signed long schedule_timeout_interruptible(signed long timeout); + extern signed long schedule_timeout_killable(signed long timeout); + extern signed long schedule_timeout_uninterruptible(signed long timeout); + extern signed long schedule_timeout_idle(signed long timeout); ++ ++extern signed long schedule_msec_hrtimeout(signed long timeout); ++extern signed long schedule_min_hrtimeout(void); ++extern signed long schedule_msec_hrtimeout_interruptible(signed long timeout); ++extern signed long schedule_msec_hrtimeout_uninterruptible(signed long timeout); ++ + asmlinkage void schedule(void); + extern void schedule_preempt_disabled(void); + +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index c6ecedd..a47f5b3 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -1796,3 +1796,74 @@ int __sched schedule_hrtimeout(ktime_t *expires, + return schedule_hrtimeout_range(expires, 0, mode); + } + EXPORT_SYMBOL_GPL(schedule_hrtimeout); ++ ++/* ++ * As per schedule_hrtimeout but taskes a millisecond value and returns how ++ * many milliseconds are left. ++ */ ++signed long __sched schedule_msec_hrtimeout(signed long timeout) ++{ ++ struct hrtimer_sleeper t; ++ int delta, secs, jiffs; ++ ktime_t expires; ++ ++ if (!timeout) { ++ __set_current_state(TASK_RUNNING); ++ return 0; ++ } ++ ++ jiffs = msecs_to_jiffies(timeout); ++ /* ++ * If regular timer resolution is adequate or hrtimer resolution is not ++ * (yet) better than Hz, as would occur during startup, use regular ++ * timers. ++ */ ++ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ) ++ return schedule_timeout(jiffs); ++ ++ secs = timeout / 1000; ++ delta = (timeout % 1000) * NSEC_PER_MSEC; ++ expires = ktime_set(secs, delta); ++ ++ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ hrtimer_set_expires_range_ns(&t.timer, expires, delta); ++ ++ hrtimer_init_sleeper(&t, current); ++ ++ hrtimer_start_expires(&t.timer, HRTIMER_MODE_REL); ++ ++ if (likely(t.task)) ++ schedule(); ++ ++ hrtimer_cancel(&t.timer); ++ destroy_hrtimer_on_stack(&t.timer); ++ ++ __set_current_state(TASK_RUNNING); ++ ++ expires = hrtimer_expires_remaining(&t.timer); ++ timeout = ktime_to_ms(expires); ++ return timeout < 0 ? 0 : timeout; ++} ++ ++EXPORT_SYMBOL(schedule_msec_hrtimeout); ++ ++signed long __sched schedule_min_hrtimeout(void) ++{ ++ return schedule_msec_hrtimeout(1); ++} ++ ++EXPORT_SYMBOL(schedule_min_hrtimeout); ++ ++signed long __sched schedule_msec_hrtimeout_interruptible(signed long timeout) ++{ ++ __set_current_state(TASK_INTERRUPTIBLE); ++ return schedule_msec_hrtimeout(timeout); ++} ++EXPORT_SYMBOL(schedule_msec_hrtimeout_interruptible); ++ ++signed long __sched schedule_msec_hrtimeout_uninterruptible(signed long timeout) ++{ ++ __set_current_state(TASK_UNINTERRUPTIBLE); ++ return schedule_msec_hrtimeout(timeout); ++} ++EXPORT_SYMBOL(schedule_msec_hrtimeout_uninterruptible); +-- +2.9.3 + diff --git a/patches/0010-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch b/patches/0010-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch new file mode 100644 index 0000000..ee79d88 --- /dev/null +++ b/patches/0010-Special-case-calls-of-schedule_timeout-1-to-use-the-.patch @@ -0,0 +1,48 @@ +From a4f3820228ebab3d5d480d720fecebd3f7e71771 Mon Sep 17 00:00:00 2001 +From: Con Kolivas +Date: Sat, 5 Nov 2016 09:27:36 +1100 +Subject: [PATCH 10/25] Special case calls of schedule_timeout(1) to use the + min hrtimeout of 1ms, working around low Hz resolutions. + +--- + kernel/time/timer.c | 15 +++++++++++++-- + 1 file changed, 13 insertions(+), 2 deletions(-) + +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index ef3128f..3f72c13 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -1750,6 +1750,17 @@ signed long __sched schedule_timeout(signed long timeout) + + expire = timeout + jiffies; + ++ if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) { ++ /* ++ * Special case 1 as being a request for the minimum timeout ++ * and use highres timers to timeout after 1ms to workaround ++ * the granularity of low Hz tick timers. ++ */ ++ if (!schedule_min_hrtimeout()) ++ return 0; ++ goto out_timeout; ++ } ++ + setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); + __mod_timer(&timer, expire, false); + schedule(); +@@ -1757,10 +1768,10 @@ signed long __sched schedule_timeout(signed long timeout) + + /* Remove the timer from the object tracker */ + destroy_timer_on_stack(&timer); +- ++out_timeout: + timeout = expire - jiffies; + +- out: ++out: + return timeout < 0 ? 0 : timeout; + } + EXPORT_SYMBOL(schedule_timeout); +-- +2.9.3 + diff --git a/patches/0011-Convert-msleep-to-use-hrtimers-when-active.patch b/patches/0011-Convert-msleep-to-use-hrtimers-when-active.patch new file mode 100644 index 0000000..6eafa9a --- /dev/null +++ b/patches/0011-Convert-msleep-to-use-hrtimers-when-active.patch @@ -0,0 +1,54 @@ +From 534bc9d3e559420eaf57771f48d2c2f549dcc4d2 Mon Sep 17 00:00:00 2001 +From: Con Kolivas +Date: Fri, 4 Nov 2016 09:25:54 +1100 +Subject: [PATCH 11/25] Convert msleep to use hrtimers when active. + +--- + kernel/time/timer.c | 24 ++++++++++++++++++++++-- + 1 file changed, 22 insertions(+), 2 deletions(-) + +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index 3f72c13..bc53598 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -1892,7 +1892,19 @@ void __init init_timers(void) + */ + void msleep(unsigned int msecs) + { +- unsigned long timeout = msecs_to_jiffies(msecs) + 1; ++ int jiffs = msecs_to_jiffies(msecs); ++ unsigned long timeout; ++ ++ /* ++ * Use high resolution timers where the resolution of tick based ++ * timers is inadequate. ++ */ ++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) { ++ while (msecs) ++ msecs = schedule_msec_hrtimeout_uninterruptible(msecs); ++ return; ++ } ++ timeout = msecs_to_jiffies(msecs) + 1; + + while (timeout) + timeout = schedule_timeout_uninterruptible(timeout); +@@ -1906,7 +1918,15 @@ EXPORT_SYMBOL(msleep); + */ + unsigned long msleep_interruptible(unsigned int msecs) + { +- unsigned long timeout = msecs_to_jiffies(msecs) + 1; ++ int jiffs = msecs_to_jiffies(msecs); ++ unsigned long timeout; ++ ++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) { ++ while (msecs && !signal_pending(current)) ++ msecs = schedule_msec_hrtimeout_interruptible(msecs); ++ return msecs; ++ } ++ timeout = msecs_to_jiffies(msecs) + 1; + + while (timeout && !signal_pending(current)) + timeout = schedule_timeout_interruptible(timeout); +-- +2.9.3 + diff --git a/patches/0012-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch b/patches/0012-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch new file mode 100644 index 0000000..14be4c1 --- /dev/null +++ b/patches/0012-Replace-all-schedule-timeout-1-with-schedule_min_hrt.patch @@ -0,0 +1,226 @@ +From 8fef7b75352d874af02881de3493f2ce2d47a341 Mon Sep 17 00:00:00 2001 +From: Con Kolivas +Date: Mon, 20 Feb 2017 13:28:30 +1100 +Subject: [PATCH 12/25] Replace all schedule timeout(1) with + schedule_min_hrtimeout() + +--- + drivers/block/swim.c | 6 +++--- + drivers/char/ipmi/ipmi_msghandler.c | 2 +- + drivers/char/ipmi/ipmi_ssif.c | 2 +- + drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 2 +- + drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 2 +- + drivers/mfd/ucb1x00-core.c | 2 +- + drivers/misc/sgi-xp/xpc_channel.c | 2 +- + drivers/net/caif/caif_hsi.c | 2 +- + drivers/ntb/test/ntb_perf.c | 2 +- + drivers/staging/comedi/drivers/ni_mio_common.c | 2 +- + fs/afs/vlocation.c | 2 +- + fs/btrfs/extent-tree.c | 2 +- + fs/btrfs/inode-map.c | 2 +- + sound/usb/line6/pcm.c | 2 +- + 14 files changed, 16 insertions(+), 16 deletions(-) + +diff --git a/drivers/block/swim.c b/drivers/block/swim.c +index b5afd49..7d09955 100644 +--- a/drivers/block/swim.c ++++ b/drivers/block/swim.c +@@ -332,7 +332,7 @@ static inline void swim_motor(struct swim __iomem *base, + if (swim_readbit(base, MOTOR_ON)) + break; + current->state = TASK_INTERRUPTIBLE; +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } + } else if (action == OFF) { + swim_action(base, MOTOR_OFF); +@@ -351,7 +351,7 @@ static inline void swim_eject(struct swim __iomem *base) + if (!swim_readbit(base, DISK_IN)) + break; + current->state = TASK_INTERRUPTIBLE; +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } + swim_select(base, RELAX); + } +@@ -375,7 +375,7 @@ static inline int swim_step(struct swim __iomem *base) + for (wait = 0; wait < HZ; wait++) { + + current->state = TASK_INTERRUPTIBLE; +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + + swim_select(base, RELAX); + if (!swim_readbit(base, STEP)) +diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c +index 92e53ac..a2418e7 100644 +--- a/drivers/char/ipmi/ipmi_msghandler.c ++++ b/drivers/char/ipmi/ipmi_msghandler.c +@@ -2953,7 +2953,7 @@ static void cleanup_smi_msgs(ipmi_smi_t intf) + /* Current message first, to preserve order */ + while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) { + /* Wait for the message to clear out. */ +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } + + /* No need for locks, the interface is down. */ +diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c +index cca6e5b..fd3c7da 100644 +--- a/drivers/char/ipmi/ipmi_ssif.c ++++ b/drivers/char/ipmi/ipmi_ssif.c +@@ -1185,7 +1185,7 @@ static int ssif_remove(struct i2c_client *client) + + /* make sure the driver is not looking for flags any more. */ + while (ssif_info->ssif_state != SSIF_NORMAL) +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + + ssif_info->stopping = true; + del_timer_sync(&ssif_info->retry_timer); +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +index b6a0806..b5b02cf 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c +@@ -235,7 +235,7 @@ static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv, + DRM_ERROR("SVGA device lockup.\n"); + break; + } +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + if (interruptible && signal_pending(current)) { + ret = -ERESTARTSYS; + break; +diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +index 0c7e172..4c1555c 100644 +--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c ++++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c +@@ -156,7 +156,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv, + break; + } + if (lazy) +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + else if ((++count & 0x0F) == 0) { + /** + * FIXME: Use schedule_hr_timeout here for +diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c +index d6fb2e1..7ac951b 100644 +--- a/drivers/mfd/ucb1x00-core.c ++++ b/drivers/mfd/ucb1x00-core.c +@@ -253,7 +253,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync) + break; + /* yield to other processes */ + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } + + return UCB_ADC_DAT(val); +diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c +index 128d561..38e68e9 100644 +--- a/drivers/misc/sgi-xp/xpc_channel.c ++++ b/drivers/misc/sgi-xp/xpc_channel.c +@@ -837,7 +837,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch) + + atomic_inc(&ch->n_on_msg_allocate_wq); + prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE); +- ret = schedule_timeout(1); ++ ret = schedule_min_hrtimeout(); + finish_wait(&ch->msg_allocate_wq, &wait); + atomic_dec(&ch->n_on_msg_allocate_wq); + +diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c +index ddabce7..67fb5ce 100644 +--- a/drivers/net/caif/caif_hsi.c ++++ b/drivers/net/caif/caif_hsi.c +@@ -944,7 +944,7 @@ static void cfhsi_wake_down(struct work_struct *work) + break; + + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + retry--; + } + +diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c +index 434e1d4..2f9543b 100644 +--- a/drivers/ntb/test/ntb_perf.c ++++ b/drivers/ntb/test/ntb_perf.c +@@ -308,7 +308,7 @@ static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src, + if (unlikely((jiffies - last_sleep) > 5 * HZ)) { + last_sleep = jiffies; + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } + + if (unlikely(kthread_should_stop())) +diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c +index b2e3828..beae38b 100644 +--- a/drivers/staging/comedi/drivers/ni_mio_common.c ++++ b/drivers/staging/comedi/drivers/ni_mio_common.c +@@ -4655,7 +4655,7 @@ static int cs5529_wait_for_idle(struct comedi_device *dev) + if ((status & NI67XX_CAL_STATUS_BUSY) == 0) + break; + set_current_state(TASK_INTERRUPTIBLE); +- if (schedule_timeout(1)) ++ if (schedule_min_hrtimeout()) + return -EIO; + } + if (i == timeout) { +diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c +index 45a8639..855d08e 100644 +--- a/fs/afs/vlocation.c ++++ b/fs/afs/vlocation.c +@@ -129,7 +129,7 @@ static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vl, + if (vl->upd_busy_cnt > 1) { + /* second+ BUSY - sleep a little bit */ + set_current_state(TASK_UNINTERRUPTIBLE); +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } + continue; + } +diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c +index dcd2e79..16bf891 100644 +--- a/fs/btrfs/extent-tree.c ++++ b/fs/btrfs/extent-tree.c +@@ -5952,7 +5952,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) + + if (flush != BTRFS_RESERVE_NO_FLUSH && + btrfs_transaction_in_commit(fs_info)) +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + + if (delalloc_lock) + mutex_lock(&BTRFS_I(inode)->delalloc_mutex); +diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c +index 144b119..03d2e8e 100644 +--- a/fs/btrfs/inode-map.c ++++ b/fs/btrfs/inode-map.c +@@ -89,7 +89,7 @@ static int caching_kthread(void *data) + btrfs_release_path(path); + root->ino_cache_progress = last; + up_read(&fs_info->commit_root_sem); +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + goto again; + } else + continue; +diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c +index fab53f5..fda1ab5 100644 +--- a/sound/usb/line6/pcm.c ++++ b/sound/usb/line6/pcm.c +@@ -131,7 +131,7 @@ static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm, + if (!alive) + break; + set_current_state(TASK_UNINTERRUPTIBLE); +- schedule_timeout(1); ++ schedule_min_hrtimeout(); + } while (--timeout > 0); + if (alive) + dev_err(line6pcm->line6->ifcdev, +-- +2.9.3 + diff --git a/patches/0013-Change-all-schedule_timeout-with-msecs_to_jiffies-po.patch b/patches/0013-Change-all-schedule_timeout-with-msecs_to_jiffies-po.patch new file mode 100644 index 0000000..78e0322 --- /dev/null +++ b/patches/0013-Change-all-schedule_timeout-with-msecs_to_jiffies-po.patch @@ -0,0 +1,397 @@ +From 56e8b01452fbb6c1aa85b0a52fbd352fddf7e959 Mon Sep 17 00:00:00 2001 +From: Con Kolivas +Date: Mon, 20 Feb 2017 13:29:16 +1100 +Subject: [PATCH 13/25] Change all schedule_timeout with msecs_to_jiffies + potentially under 50ms to use schedule_msec_hrtimeout. + +--- + drivers/bluetooth/hci_qca.c | 2 +- + drivers/char/snsc.c | 4 ++-- + drivers/media/pci/ivtv/ivtv-ioctl.c | 2 +- + drivers/media/pci/ivtv/ivtv-streams.c | 2 +- + drivers/net/can/usb/peak_usb/pcan_usb.c | 2 +- + drivers/net/usb/lan78xx.c | 2 +- + drivers/net/usb/usbnet.c | 2 +- + drivers/scsi/fnic/fnic_scsi.c | 4 ++-- + drivers/scsi/snic/snic_scsi.c | 2 +- + drivers/staging/lustre/lnet/lnet/lib-eq.c | 2 +- + drivers/staging/rts5208/rtsx.c | 2 +- + drivers/staging/speakup/speakup_acntpc.c | 4 ++-- + drivers/staging/speakup/speakup_apollo.c | 2 +- + drivers/staging/speakup/speakup_decext.c | 2 +- + drivers/staging/speakup/speakup_decpc.c | 2 +- + drivers/staging/speakup/speakup_dectlk.c | 2 +- + drivers/staging/speakup/speakup_dtlk.c | 4 ++-- + drivers/staging/speakup/speakup_keypc.c | 4 ++-- + drivers/staging/speakup/synth.c | 2 +- + drivers/staging/unisys/visornic/visornic_main.c | 6 +++--- + drivers/target/target_core_user.c | 2 +- + drivers/video/fbdev/omap/hwa742.c | 2 +- + drivers/video/fbdev/pxafb.c | 2 +- + 23 files changed, 30 insertions(+), 30 deletions(-) + +diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c +index 05c2307..6954d29 100644 +--- a/drivers/bluetooth/hci_qca.c ++++ b/drivers/bluetooth/hci_qca.c +@@ -880,7 +880,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) + * then host can communicate with new baudrate to controller + */ + set_current_state(TASK_UNINTERRUPTIBLE); +- schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS)); ++ schedule_msec_hrtimeout((BAUDRATE_SETTLE_TIMEOUT_MS)); + set_current_state(TASK_INTERRUPTIBLE); + + return 0; +diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c +index ec07f0e..3410b46 100644 +--- a/drivers/char/snsc.c ++++ b/drivers/char/snsc.c +@@ -198,7 +198,7 @@ scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos) + add_wait_queue(&sd->sd_rq, &wait); + spin_unlock_irqrestore(&sd->sd_rlock, flags); + +- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT)); ++ schedule_msec_hrtimeout((SCDRV_TIMEOUT)); + + remove_wait_queue(&sd->sd_rq, &wait); + if (signal_pending(current)) { +@@ -294,7 +294,7 @@ scdrv_write(struct file *file, const char __user *buf, + add_wait_queue(&sd->sd_wq, &wait); + spin_unlock_irqrestore(&sd->sd_wlock, flags); + +- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT)); ++ schedule_msec_hrtimeout((SCDRV_TIMEOUT)); + + remove_wait_queue(&sd->sd_wq, &wait); + if (signal_pending(current)) { +diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c +index 2dc4b20..8e061cf 100644 +--- a/drivers/media/pci/ivtv/ivtv-ioctl.c ++++ b/drivers/media/pci/ivtv/ivtv-ioctl.c +@@ -1151,7 +1151,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std) + TASK_UNINTERRUPTIBLE); + if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100) + break; +- schedule_timeout(msecs_to_jiffies(25)); ++ schedule_msec_hrtimeout((25)); + } + finish_wait(&itv->vsync_waitq, &wait); + mutex_lock(&itv->serialize_lock); +diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c +index d27c6df..e9ffc4e 100644 +--- a/drivers/media/pci/ivtv/ivtv-streams.c ++++ b/drivers/media/pci/ivtv/ivtv-streams.c +@@ -834,7 +834,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end) + while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) && + time_before(jiffies, + then + msecs_to_jiffies(2000))) { +- schedule_timeout(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout((10)); + } + + /* To convert jiffies to ms, we must multiply by 1000 +diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c +index 838545c..34f8972 100644 +--- a/drivers/net/can/usb/peak_usb/pcan_usb.c ++++ b/drivers/net/can/usb/peak_usb/pcan_usb.c +@@ -250,7 +250,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff) + } else { + /* the PCAN-USB needs time to init */ + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT)); ++ schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT)); + } + + return err; +diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c +index 08f8703..3b3bc86 100644 +--- a/drivers/net/usb/lan78xx.c ++++ b/drivers/net/usb/lan78xx.c +@@ -2544,7 +2544,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev) + while (!skb_queue_empty(&dev->rxq) && + !skb_queue_empty(&dev->txq) && + !skb_queue_empty(&dev->done)) { +- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); ++ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS)); + set_current_state(TASK_UNINTERRUPTIBLE); + netif_dbg(dev, ifdown, dev->net, + "waited for %d urb completions\n", temp); +diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c +index 3de65ea..f8a4b18 100644 +--- a/drivers/net/usb/usbnet.c ++++ b/drivers/net/usb/usbnet.c +@@ -767,7 +767,7 @@ static void wait_skb_queue_empty(struct sk_buff_head *q) + spin_lock_irqsave(&q->lock, flags); + while (!skb_queue_empty(q)) { + spin_unlock_irqrestore(&q->lock, flags); +- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS)); ++ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS)); + set_current_state(TASK_UNINTERRUPTIBLE); + spin_lock_irqsave(&q->lock, flags); + } +diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c +index adb3d58..de73e78 100644 +--- a/drivers/scsi/fnic/fnic_scsi.c ++++ b/drivers/scsi/fnic/fnic_scsi.c +@@ -217,7 +217,7 @@ int fnic_fw_reset_handler(struct fnic *fnic) + + /* wait for io cmpl */ + while (atomic_read(&fnic->in_flight)) +- schedule_timeout(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout((1)); + + spin_lock_irqsave(&fnic->wq_copy_lock[0], flags); + +@@ -2201,7 +2201,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, + } + } + +- schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov)); ++ schedule_msec_hrtimeout((2 * fnic->config.ed_tov)); + + /* walk again to check, if IOs are still pending in fw */ + if (fnic_is_abts_pending(fnic, lr_sc)) +diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c +index abada16..0bf30dc 100644 +--- a/drivers/scsi/snic/snic_scsi.c ++++ b/drivers/scsi/snic/snic_scsi.c +@@ -2356,7 +2356,7 @@ snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc) + + /* Wait for all the IOs that are entered in Qcmd */ + while (atomic_read(&snic->ios_inflight)) +- schedule_timeout(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout((1)); + + ret = snic_issue_hba_reset(snic, sc); + if (ret) { +diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c +index d05c6cc..3f62b6f 100644 +--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c ++++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c +@@ -328,7 +328,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock) + schedule(); + } else { + now = jiffies; +- schedule_timeout(msecs_to_jiffies(tms)); ++ schedule_msec_hrtimeout((tms)); + tms -= jiffies_to_msecs(jiffies - now); + if (tms < 0) /* no more wait but may have new event */ + tms = 0; +diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c +index 68d75d0..aef88c4 100644 +--- a/drivers/staging/rts5208/rtsx.c ++++ b/drivers/staging/rts5208/rtsx.c +@@ -537,7 +537,7 @@ static int rtsx_polling_thread(void *__dev) + + for (;;) { + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL)); ++ schedule_msec_hrtimeout((POLLING_INTERVAL)); + + /* lock the device pointers */ + mutex_lock(&dev->dev_mutex); +diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c +index efb791b..fd02fb2 100644 +--- a/drivers/staging/speakup/speakup_acntpc.c ++++ b/drivers/staging/speakup/speakup_acntpc.c +@@ -204,7 +204,7 @@ static void do_catch_up(struct spk_synth *synth) + full_time_val = full_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); + if (synth_full()) { +- schedule_timeout(msecs_to_jiffies(full_time_val)); ++ schedule_msec_hrtimeout((full_time_val)); + continue; + } + set_current_state(TASK_RUNNING); +@@ -232,7 +232,7 @@ static void do_catch_up(struct spk_synth *synth) + jiffy_delta_val = jiffy_delta->u.n.value; + delay_time_val = delay_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout((delay_time_val)); + jiff_max = jiffies+jiffy_delta_val; + } + } +diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c +index 3cbc8a7..3c17854 100644 +--- a/drivers/staging/speakup/speakup_apollo.c ++++ b/drivers/staging/speakup/speakup_apollo.c +@@ -172,7 +172,7 @@ static void do_catch_up(struct spk_synth *synth) + outb(UART_MCR_DTR, speakup_info.port_tts + UART_MCR); + outb(UART_MCR_DTR | UART_MCR_RTS, + speakup_info.port_tts + UART_MCR); +- schedule_timeout(msecs_to_jiffies(full_time_val)); ++ schedule_msec_hrtimeout((full_time_val)); + continue; + } + if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) { +diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c +index 1a5cf3d..fa2b4e1 100644 +--- a/drivers/staging/speakup/speakup_decext.c ++++ b/drivers/staging/speakup/speakup_decext.c +@@ -186,7 +186,7 @@ static void do_catch_up(struct spk_synth *synth) + if (ch == '\n') + ch = 0x0D; + if (synth_full() || !spk_serial_out(ch)) { +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout((delay_time_val)); + continue; + } + set_current_state(TASK_RUNNING); +diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c +index d6479bd..f7554bf 100644 +--- a/drivers/staging/speakup/speakup_decpc.c ++++ b/drivers/staging/speakup/speakup_decpc.c +@@ -403,7 +403,7 @@ static void do_catch_up(struct spk_synth *synth) + if (ch == '\n') + ch = 0x0D; + if (dt_sendchar(ch)) { +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout((delay_time_val)); + continue; + } + set_current_state(TASK_RUNNING); +diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c +index 7646567..639192e 100644 +--- a/drivers/staging/speakup/speakup_dectlk.c ++++ b/drivers/staging/speakup/speakup_dectlk.c +@@ -251,7 +251,7 @@ static void do_catch_up(struct spk_synth *synth) + if (ch == '\n') + ch = 0x0D; + if (synth_full_val || !spk_serial_out(ch)) { +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout((delay_time_val)); + continue; + } + set_current_state(TASK_RUNNING); +diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c +index 38aa401..1640519 100644 +--- a/drivers/staging/speakup/speakup_dtlk.c ++++ b/drivers/staging/speakup/speakup_dtlk.c +@@ -217,7 +217,7 @@ static void do_catch_up(struct spk_synth *synth) + delay_time_val = delay_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); + if (synth_full()) { +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout((delay_time_val)); + continue; + } + set_current_state(TASK_RUNNING); +@@ -233,7 +233,7 @@ static void do_catch_up(struct spk_synth *synth) + delay_time_val = delay_time->u.n.value; + jiffy_delta_val = jiffy_delta->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout((delay_time_val)); + jiff_max = jiffies + jiffy_delta_val; + } + } +diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c +index 5e2170b..30b5df7 100644 +--- a/drivers/staging/speakup/speakup_keypc.c ++++ b/drivers/staging/speakup/speakup_keypc.c +@@ -206,7 +206,7 @@ spin_lock_irqsave(&speakup_info.spinlock, flags); + full_time_val = full_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); + if (synth_full()) { +- schedule_timeout(msecs_to_jiffies(full_time_val)); ++ schedule_msec_hrtimeout((full_time_val)); + continue; + } + set_current_state(TASK_RUNNING); +@@ -239,7 +239,7 @@ spin_lock_irqsave(&speakup_info.spinlock, flags); + jiffy_delta_val = jiffy_delta->u.n.value; + delay_time_val = delay_time->u.n.value; + spin_unlock_irqrestore(&speakup_info.spinlock, flags); +- schedule_timeout(msecs_to_jiffies(delay_time_val)); ++ schedule_msec_hrtimeout((delay_time_val)); + jiff_max = jiffies+jiffy_delta_val; + } + } +diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c +index a61c02b..14299e5 100644 +--- a/drivers/staging/speakup/synth.c ++++ b/drivers/staging/speakup/synth.c +@@ -120,7 +120,7 @@ void spk_do_catch_up(struct spk_synth *synth) + if (ch == '\n') + ch = synth->procspeech; + if (!spk_serial_out(ch)) { +- schedule_timeout(msecs_to_jiffies(full_time_val)); ++ schedule_msec_hrtimeout((full_time_val)); + continue; + } + if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) { +diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c +index c1f674f..4f30a7a 100644 +--- a/drivers/staging/unisys/visornic/visornic_main.c ++++ b/drivers/staging/unisys/visornic/visornic_main.c +@@ -468,7 +468,7 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout) + } + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&devdata->priv_lock, flags); +- wait += schedule_timeout(msecs_to_jiffies(10)); ++ wait += schedule_msec_hrtimeout((10)); + spin_lock_irqsave(&devdata->priv_lock, flags); + } + +@@ -479,7 +479,7 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout) + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&devdata->priv_lock, flags); +- schedule_timeout(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout((10)); + spin_lock_irqsave(&devdata->priv_lock, flags); + if (atomic_read(&devdata->usage)) + break; +@@ -611,7 +611,7 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout) + } + set_current_state(TASK_INTERRUPTIBLE); + spin_unlock_irqrestore(&devdata->priv_lock, flags); +- wait += schedule_timeout(msecs_to_jiffies(10)); ++ wait += schedule_msec_hrtimeout((10)); + spin_lock_irqsave(&devdata->priv_lock, flags); + } + +diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c +index 8041710..f907a81 100644 +--- a/drivers/target/target_core_user.c ++++ b/drivers/target/target_core_user.c +@@ -451,7 +451,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd) + + pr_debug("sleeping for ring space\n"); + spin_unlock_irq(&udev->cmdr_lock); +- ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT)); ++ ret = schedule_msec_hrtimeout((TCMU_TIME_OUT)); + finish_wait(&udev->wait_cmdr, &__wait); + if (!ret) { + pr_warn("tcmu: command timed out\n"); +diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c +index a4ee65b..cf38bcb 100644 +--- a/drivers/video/fbdev/omap/hwa742.c ++++ b/drivers/video/fbdev/omap/hwa742.c +@@ -926,7 +926,7 @@ static void hwa742_resume(void) + if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7)) + break; + set_current_state(TASK_UNINTERRUPTIBLE); +- schedule_timeout(msecs_to_jiffies(5)); ++ schedule_msec_hrtimeout((5)); + } + hwa742_set_update_mode(hwa742.update_mode_before_suspend); + } +diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c +index ef73f14..7b5483b 100644 +--- a/drivers/video/fbdev/pxafb.c ++++ b/drivers/video/fbdev/pxafb.c +@@ -1287,7 +1287,7 @@ static int pxafb_smart_thread(void *arg) + mutex_unlock(&fbi->ctrlr_lock); + + set_current_state(TASK_INTERRUPTIBLE); +- schedule_timeout(msecs_to_jiffies(30)); ++ schedule_msec_hrtimeout((30)); + } + + pr_debug("%s(): task ending\n", __func__); +-- +2.9.3 + diff --git a/patches/0014-Replace-all-calls-to-schedule_timeout_interruptible-.patch b/patches/0014-Replace-all-calls-to-schedule_timeout_interruptible-.patch new file mode 100644 index 0000000..2159cb1 --- /dev/null +++ b/patches/0014-Replace-all-calls-to-schedule_timeout_interruptible-.patch @@ -0,0 +1,325 @@ +From af94b59651831b7e176ce8cb98441bdccb87eac0 Mon Sep 17 00:00:00 2001 +From: Con Kolivas +Date: Mon, 20 Feb 2017 13:30:07 +1100 +Subject: [PATCH 14/25] Replace all calls to schedule_timeout_interruptible of + potentially under 50ms to use schedule_msec_hrtimeout_interruptible. + +--- + drivers/hwmon/fam15h_power.c | 2 +- + drivers/iio/light/tsl2563.c | 6 +----- + drivers/media/i2c/msp3400-driver.c | 4 ++-- + drivers/media/pci/ivtv/ivtv-gpio.c | 6 +++--- + drivers/media/radio/radio-mr800.c | 2 +- + drivers/media/radio/radio-tea5777.c | 2 +- + drivers/media/radio/tea575x.c | 2 +- + drivers/misc/panel.c | 2 +- + drivers/parport/ieee1284.c | 2 +- + drivers/parport/ieee1284_ops.c | 2 +- + drivers/platform/x86/intel_ips.c | 8 ++++---- + net/core/pktgen.c | 2 +- + sound/soc/codecs/wm8350.c | 12 ++++++------ + sound/soc/codecs/wm8900.c | 2 +- + sound/soc/codecs/wm9713.c | 4 ++-- + 15 files changed, 27 insertions(+), 31 deletions(-) + +diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c +index 15aa49d..991e8a7 100644 +--- a/drivers/hwmon/fam15h_power.c ++++ b/drivers/hwmon/fam15h_power.c +@@ -238,7 +238,7 @@ static ssize_t acc_show_power(struct device *dev, + prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu]; + } + +- leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period)); ++ leftover = schedule_msec_hrtimeout_interruptible((data->power_period)); + if (leftover) + return 0; + +diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c +index 04598ae..a8c095d 100644 +--- a/drivers/iio/light/tsl2563.c ++++ b/drivers/iio/light/tsl2563.c +@@ -282,11 +282,7 @@ static void tsl2563_wait_adc(struct tsl2563_chip *chip) + default: + delay = 402; + } +- /* +- * TODO: Make sure that we wait at least required delay but why we +- * have to extend it one tick more? +- */ +- schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2); ++ schedule_msec_hrtimeout_interruptible(delay + 1); + } + + static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc) +diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c +index 201a9800..5cebabc 100644 +--- a/drivers/media/i2c/msp3400-driver.c ++++ b/drivers/media/i2c/msp3400-driver.c +@@ -184,7 +184,7 @@ static int msp_read(struct i2c_client *client, int dev, int addr) + break; + dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err, + dev, addr); +- schedule_timeout_interruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_interruptible((10)); + } + if (err == 3) { + dev_warn(&client->dev, "resetting chip, sound will go off.\n"); +@@ -225,7 +225,7 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val) + break; + dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err, + dev, addr); +- schedule_timeout_interruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_interruptible((10)); + } + if (err == 3) { + dev_warn(&client->dev, "resetting chip, sound will go off.\n"); +diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c +index f752f39..23372af6 100644 +--- a/drivers/media/pci/ivtv/ivtv-gpio.c ++++ b/drivers/media/pci/ivtv/ivtv-gpio.c +@@ -117,7 +117,7 @@ void ivtv_reset_ir_gpio(struct ivtv *itv) + curout = (curout & ~0xF) | 1; + write_reg(curout, IVTV_REG_GPIO_OUT); + /* We could use something else for smaller time */ +- schedule_timeout_interruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_interruptible((1)); + curout |= 2; + write_reg(curout, IVTV_REG_GPIO_OUT); + curdir &= ~0x80; +@@ -137,11 +137,11 @@ int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value) + curout = read_reg(IVTV_REG_GPIO_OUT); + curout &= ~(1 << itv->card->xceive_pin); + write_reg(curout, IVTV_REG_GPIO_OUT); +- schedule_timeout_interruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_interruptible((1)); + + curout |= 1 << itv->card->xceive_pin; + write_reg(curout, IVTV_REG_GPIO_OUT); +- schedule_timeout_interruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_interruptible((1)); + return 0; + } + +diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c +index c2927fd..bdee269 100644 +--- a/drivers/media/radio/radio-mr800.c ++++ b/drivers/media/radio/radio-mr800.c +@@ -382,7 +382,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *priv, + retval = -ENODATA; + break; + } +- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) { ++ if (schedule_msec_hrtimeout_interruptible((10))) { + retval = -ERESTARTSYS; + break; + } +diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c +index 83fe7ab..aaae5fa 100644 +--- a/drivers/media/radio/radio-tea5777.c ++++ b/drivers/media/radio/radio-tea5777.c +@@ -249,7 +249,7 @@ static int radio_tea5777_update_read_reg(struct radio_tea5777 *tea, int wait) + } + + if (wait) { +- if (schedule_timeout_interruptible(msecs_to_jiffies(wait))) ++ if (schedule_msec_hrtimeout_interruptible((wait))) + return -ERESTARTSYS; + } + +diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c +index 4dc2067..29f4416 100644 +--- a/drivers/media/radio/tea575x.c ++++ b/drivers/media/radio/tea575x.c +@@ -416,7 +416,7 @@ int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea, + for (;;) { + if (time_after(jiffies, timeout)) + break; +- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) { ++ if (schedule_msec_hrtimeout_interruptible((10))) { + /* some signal arrived, stop search */ + tea->val &= ~TEA575X_BIT_SEARCH; + snd_tea575x_set_freq(tea); +diff --git a/drivers/misc/panel.c b/drivers/misc/panel.c +index 6030ac5..f0c1a101 100644 +--- a/drivers/misc/panel.c ++++ b/drivers/misc/panel.c +@@ -760,7 +760,7 @@ static void long_sleep(int ms) + if (in_interrupt()) + mdelay(ms); + else +- schedule_timeout_interruptible(msecs_to_jiffies(ms)); ++ schedule_msec_hrtimeout_interruptible((ms)); + } + + /* +diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c +index f9fd4b3..00ad2f3 100644 +--- a/drivers/parport/ieee1284.c ++++ b/drivers/parport/ieee1284.c +@@ -215,7 +215,7 @@ int parport_wait_peripheral(struct parport *port, + /* parport_wait_event didn't time out, but the + * peripheral wasn't actually ready either. + * Wait for another 10ms. */ +- schedule_timeout_interruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_interruptible((10)); + } + } + +diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c +index c0e7d21..e1b4fd4 100644 +--- a/drivers/parport/ieee1284_ops.c ++++ b/drivers/parport/ieee1284_ops.c +@@ -536,7 +536,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port, + /* Yield the port for a while. */ + if (count && dev->port->irq != PARPORT_IRQ_NONE) { + parport_release (dev); +- schedule_timeout_interruptible(msecs_to_jiffies(40)); ++ schedule_msec_hrtimeout_interruptible((40)); + parport_claim_or_block (dev); + } + else +diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c +index 55663b3..0363fed 100644 +--- a/drivers/platform/x86/intel_ips.c ++++ b/drivers/platform/x86/intel_ips.c +@@ -812,7 +812,7 @@ static int ips_adjust(void *data) + ips_gpu_lower(ips); + + sleep: +- schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD)); ++ schedule_msec_hrtimeout_interruptible((IPS_ADJUST_PERIOD)); + } while (!kthread_should_stop()); + + dev_dbg(&ips->dev->dev, "ips-adjust thread stopped\n"); +@@ -991,7 +991,7 @@ static int ips_monitor(void *data) + seqno_timestamp = get_jiffies_64(); + + old_cpu_power = thm_readl(THM_CEC); +- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); ++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD)); + + /* Collect an initial average */ + for (i = 0; i < IPS_SAMPLE_COUNT; i++) { +@@ -1018,7 +1018,7 @@ static int ips_monitor(void *data) + mchp_samples[i] = mchp; + } + +- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); ++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD)); + if (kthread_should_stop()) + break; + } +@@ -1045,7 +1045,7 @@ static int ips_monitor(void *data) + * us to reduce the sample frequency if the CPU and GPU are idle. + */ + old_cpu_power = thm_readl(THM_CEC); +- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); ++ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD)); + last_sample_period = IPS_SAMPLE_PERIOD; + + setup_deferrable_timer_on_stack(&timer, monitor_timeout, +diff --git a/net/core/pktgen.c b/net/core/pktgen.c +index 8e69ce4..0227415 100644 +--- a/net/core/pktgen.c ++++ b/net/core/pktgen.c +@@ -1992,7 +1992,7 @@ static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname) + mutex_unlock(&pktgen_thread_lock); + pr_debug("%s: waiting for %s to disappear....\n", + __func__, ifname); +- schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try)); ++ schedule_msec_hrtimeout_interruptible((msec_per_try)); + mutex_lock(&pktgen_thread_lock); + + if (++i >= max_tries) { +diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c +index 2efc5b4..3e3248c 100644 +--- a/sound/soc/codecs/wm8350.c ++++ b/sound/soc/codecs/wm8350.c +@@ -236,10 +236,10 @@ static void wm8350_pga_work(struct work_struct *work) + out2->ramp == WM8350_RAMP_UP) { + /* delay is longer over 0dB as increases are larger */ + if (i >= WM8350_OUTn_0dB) +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (2)); + else +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (1)); + } else + udelay(50); /* doesn't matter if we delay longer */ +@@ -1123,7 +1123,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec, + (platform->dis_out4 << 6)); + + /* wait for discharge */ +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (platform-> + cap_discharge_msecs)); + +@@ -1139,7 +1139,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec, + WM8350_VBUFEN); + + /* wait for vmid */ +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (platform-> + vmid_charge_msecs)); + +@@ -1190,7 +1190,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec, + wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1); + + /* wait */ +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (platform-> + vmid_discharge_msecs)); + +@@ -1208,7 +1208,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec, + pm1 | WM8350_OUTPUT_DRAIN_EN); + + /* wait */ +- schedule_timeout_interruptible(msecs_to_jiffies ++ schedule_msec_hrtimeout_interruptible( + (platform->drain_msecs)); + + pm1 &= ~WM8350_BIASEN; +diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c +index c77b49a..fc50456 100644 +--- a/sound/soc/codecs/wm8900.c ++++ b/sound/soc/codecs/wm8900.c +@@ -1112,7 +1112,7 @@ static int wm8900_set_bias_level(struct snd_soc_codec *codec, + /* Need to let things settle before stopping the clock + * to ensure that restart works, see "Stopping the + * master clock" in the datasheet. */ +- schedule_timeout_interruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_interruptible((1)); + snd_soc_write(codec, WM8900_REG_POWER2, + WM8900_REG_POWER2_SYSCLK_ENA); + break; +diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c +index 7e48221..0c85a20 100644 +--- a/sound/soc/codecs/wm9713.c ++++ b/sound/soc/codecs/wm9713.c +@@ -199,7 +199,7 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w, + + /* Gracefully shut down the voice interface. */ + snd_soc_update_bits(codec, AC97_HANDSET_RATE, 0x0f00, 0x0200); +- schedule_timeout_interruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_interruptible((1)); + snd_soc_update_bits(codec, AC97_HANDSET_RATE, 0x0f00, 0x0f00); + snd_soc_update_bits(codec, AC97_EXTENDED_MID, 0x1000, 0x1000); + +@@ -868,7 +868,7 @@ static int wm9713_set_pll(struct snd_soc_codec *codec, + wm9713->pll_in = freq_in; + + /* wait 10ms AC97 link frames for the link to stabilise */ +- schedule_timeout_interruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_interruptible((10)); + return 0; + } + +-- +2.9.3 + diff --git a/patches/0015-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch b/patches/0015-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch new file mode 100644 index 0000000..be929c6 --- /dev/null +++ b/patches/0015-Replace-all-calls-to-schedule_timeout_uninterruptibl.patch @@ -0,0 +1,160 @@ +From 1137ff2bfa5eb63b53747fe303fdb3937c5e1077 Mon Sep 17 00:00:00 2001 +From: Con Kolivas +Date: Mon, 20 Feb 2017 13:30:32 +1100 +Subject: [PATCH 15/25] Replace all calls to schedule_timeout_uninterruptible + of potentially under 50ms to use schedule_msec_hrtimeout_uninterruptible + +--- + drivers/media/pci/cx18/cx18-gpio.c | 4 ++-- + drivers/net/wireless/intel/ipw2x00/ipw2100.c | 4 ++-- + drivers/rtc/rtc-wm8350.c | 6 +++--- + drivers/scsi/lpfc/lpfc_scsi.c | 2 +- + sound/pci/maestro3.c | 4 ++-- + sound/soc/codecs/rt5631.c | 4 ++-- + sound/soc/soc-dapm.c | 2 +- + 7 files changed, 13 insertions(+), 13 deletions(-) + +diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c +index 38dc6b8..3cd3098 100644 +--- a/drivers/media/pci/cx18/cx18-gpio.c ++++ b/drivers/media/pci/cx18/cx18-gpio.c +@@ -95,11 +95,11 @@ static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi, + + /* Assert */ + gpio_update(cx, mask, ~active_lo); +- schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs)); ++ schedule_msec_hrtimeout_uninterruptible((assert_msecs)); + + /* Deassert */ + gpio_update(cx, mask, ~active_hi); +- schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs)); ++ schedule_msec_hrtimeout_uninterruptible((recovery_msecs)); + } + + /* +diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c +index 356aba9..d2cc761 100644 +--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c ++++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c +@@ -830,7 +830,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv, + * doesn't seem to have as many firmware restart cycles... + * + * As a test, we're sticking in a 1/100s delay here */ +- schedule_timeout_uninterruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_uninterruptible((10)); + + return 0; + +@@ -1281,7 +1281,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv) + IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n"); + i = 5000; + do { +- schedule_timeout_uninterruptible(msecs_to_jiffies(40)); ++ schedule_msec_hrtimeout_uninterruptible((40)); + /* Todo... wait for sync command ... */ + + read_register(priv->net_dev, IPW_REG_INTA, &inta); +diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c +index fa247de..f1a28d8 100644 +--- a/drivers/rtc/rtc-wm8350.c ++++ b/drivers/rtc/rtc-wm8350.c +@@ -121,7 +121,7 @@ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm) + /* Wait until confirmation of stopping */ + do { + rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); +- schedule_timeout_uninterruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_uninterruptible((1)); + } while (--retries && !(rtc_ctrl & WM8350_RTC_STS)); + + if (!retries) { +@@ -204,7 +204,7 @@ static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350) + /* Wait until confirmation of stopping */ + do { + rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); +- schedule_timeout_uninterruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_uninterruptible((1)); + } while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS)); + + if (!(rtc_ctrl & WM8350_RTC_ALMSTS)) +@@ -227,7 +227,7 @@ static int wm8350_rtc_start_alarm(struct wm8350 *wm8350) + /* Wait until confirmation */ + do { + rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); +- schedule_timeout_uninterruptible(msecs_to_jiffies(1)); ++ schedule_msec_hrtimeout_uninterruptible((1)); + } while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS); + + if (rtc_ctrl & WM8350_RTC_ALMSTS) +diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c +index ad350d9..69a58a8 100644 +--- a/drivers/scsi/lpfc/lpfc_scsi.c ++++ b/drivers/scsi/lpfc/lpfc_scsi.c +@@ -5109,7 +5109,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id, + tgt_id, lun_id, context); + later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies; + while (time_after(later, jiffies) && cnt) { +- schedule_timeout_uninterruptible(msecs_to_jiffies(20)); ++ schedule_msec_hrtimeout_uninterruptible((20)); + cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context); + } + if (cnt) { +diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c +index cafea6d..d374514 100644 +--- a/sound/pci/maestro3.c ++++ b/sound/pci/maestro3.c +@@ -2016,7 +2016,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip) + outw(0, io + GPIO_DATA); + outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION); + +- schedule_timeout_uninterruptible(msecs_to_jiffies(delay1)); ++ schedule_msec_hrtimeout_uninterruptible((delay1)); + + outw(GPO_PRIMARY_AC97, io + GPIO_DATA); + udelay(5); +@@ -2024,7 +2024,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip) + outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A); + outw(~0, io + GPIO_MASK); + +- schedule_timeout_uninterruptible(msecs_to_jiffies(delay2)); ++ schedule_msec_hrtimeout_uninterruptible((delay2)); + + if (! snd_m3_try_read_vendor(chip)) + break; +diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c +index 0e41808..611cb9f 100644 +--- a/sound/soc/codecs/rt5631.c ++++ b/sound/soc/codecs/rt5631.c +@@ -419,7 +419,7 @@ static void onebit_depop_mute_stage(struct snd_soc_codec *codec, int enable) + hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2); + snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff); + if (enable) { +- schedule_timeout_uninterruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_uninterruptible((10)); + /* config one-bit depop parameter */ + rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x307f); + snd_soc_update_bits(codec, RT5631_HP_OUT_VOL, +@@ -529,7 +529,7 @@ static void depop_seq_mute_stage(struct snd_soc_codec *codec, int enable) + hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2); + snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff); + if (enable) { +- schedule_timeout_uninterruptible(msecs_to_jiffies(10)); ++ schedule_msec_hrtimeout_uninterruptible((10)); + + /* config depop sequence parameter */ + rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x302f); +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c +index 27dd02e..7ba49f4 100644 +--- a/sound/soc/soc-dapm.c ++++ b/sound/soc/soc-dapm.c +@@ -134,7 +134,7 @@ static void dapm_assert_locked(struct snd_soc_dapm_context *dapm) + static void pop_wait(u32 pop_time) + { + if (pop_time) +- schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time)); ++ schedule_msec_hrtimeout_uninterruptible((pop_time)); + } + + static void pop_dbg(struct device *dev, u32 pop_time, const char *fmt, ...) +-- +2.9.3 + diff --git a/patches/0016-Fix-build-for-disabled-highres-timers-with-hrtimeout.patch b/patches/0016-Fix-build-for-disabled-highres-timers-with-hrtimeout.patch new file mode 100644 index 0000000..0e1e0b7 --- /dev/null +++ b/patches/0016-Fix-build-for-disabled-highres-timers-with-hrtimeout.patch @@ -0,0 +1,84 @@ +From 37496baeea800e745a77620e90660496135f7fa5 Mon Sep 17 00:00:00 2001 +From: Con Kolivas +Date: Mon, 20 Feb 2017 13:31:42 +1100 +Subject: [PATCH 16/25] Fix build for disabled highres timers with hrtimeout + code. + +--- + include/linux/freezer.h | 1 + + include/linux/sched.h | 22 ++++++++++++++++++++++ + kernel/time/timer.c | 2 ++ + 3 files changed, 25 insertions(+) + +diff --git a/include/linux/freezer.h b/include/linux/freezer.h +index dd03e83..2fda682 100644 +--- a/include/linux/freezer.h ++++ b/include/linux/freezer.h +@@ -296,6 +296,7 @@ static inline void set_freezable(void) {} + #define wait_event_freezekillable_unsafe(wq, condition) \ + wait_event_killable(wq, condition) + ++#define pm_freezing (false) + #endif /* !CONFIG_FREEZER */ + + #endif /* FREEZER_H_INCLUDED */ +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 46544f4..680494d 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -458,10 +458,32 @@ extern signed long schedule_timeout_killable(signed long timeout); + extern signed long schedule_timeout_uninterruptible(signed long timeout); + extern signed long schedule_timeout_idle(signed long timeout); + ++#ifdef CONFIG_HIGH_RES_TIMERS + extern signed long schedule_msec_hrtimeout(signed long timeout); + extern signed long schedule_min_hrtimeout(void); + extern signed long schedule_msec_hrtimeout_interruptible(signed long timeout); + extern signed long schedule_msec_hrtimeout_uninterruptible(signed long timeout); ++#else ++static inline signed long schedule_msec_hrtimeout(signed long timeout) ++{ ++ return schedule_timeout(msecs_to_jiffies(timeout)); ++} ++ ++static inline signed long schedule_min_hrtimeout(void) ++{ ++ return schedule_timeout(1); ++} ++ ++static inline signed long schedule_msec_hrtimeout_interruptible(signed long timeout) ++{ ++ return schedule_timeout_interruptible(msecs_to_jiffies(timeout)); ++} ++ ++static inline signed long schedule_msec_hrtimeout_uninterruptible(signed long timeout) ++{ ++ return schedule_timeout_uninterruptible(msecs_to_jiffies(timeout)); ++} ++#endif + + asmlinkage void schedule(void); + extern void schedule_preempt_disabled(void); +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index bc53598..0e22641 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -1750,6 +1750,7 @@ signed long __sched schedule_timeout(signed long timeout) + + expire = timeout + jiffies; + ++#ifdef CONFIG_HIGH_RES_TIMERS + if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) { + /* + * Special case 1 as being a request for the minimum timeout +@@ -1760,6 +1761,7 @@ signed long __sched schedule_timeout(signed long timeout) + return 0; + goto out_timeout; + } ++#endif + + setup_timer_on_stack(&timer, process_timeout, (unsigned long)current); + __mod_timer(&timer, expire, false); +-- +2.9.3 + diff --git a/patches/0018-Make-threaded-IRQs-optionally-the-default-which-can-.patch b/patches/0018-Make-threaded-IRQs-optionally-the-default-which-can-.patch new file mode 100644 index 0000000..fe49e0e --- /dev/null +++ b/patches/0018-Make-threaded-IRQs-optionally-the-default-which-can-.patch @@ -0,0 +1,61 @@ +From a0d29f014dbcb29649dff1a9b8df58bad7be3926 Mon Sep 17 00:00:00 2001 +From: Con Kolivas +Date: Wed, 7 Dec 2016 21:13:16 +1100 +Subject: [PATCH 18/25] Make threaded IRQs optionally the default which can be + disabled. + +--- + kernel/irq/Kconfig | 14 ++++++++++++++ + kernel/irq/manage.c | 10 ++++++++++ + 2 files changed, 24 insertions(+) + +diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig +index 3bbfd6a..351bf16 100644 +--- a/kernel/irq/Kconfig ++++ b/kernel/irq/Kconfig +@@ -95,6 +95,20 @@ config IRQ_DOMAIN_DEBUG + config IRQ_FORCED_THREADING + bool + ++config FORCE_IRQ_THREADING ++ bool "Make IRQ threading compulsory" ++ depends on IRQ_FORCED_THREADING ++ default y ++ ---help--- ++ ++ Make IRQ threading mandatory for any IRQ handlers that support it ++ instead of being optional and requiring the threadirqs kernel ++ parameter. Instead they can be optionally disabled with the ++ nothreadirqs kernel parameter. ++ ++ Enable if you are building for a desktop or low latency system, ++ otherwise say N. ++ + config SPARSE_IRQ + bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ + ---help--- +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index 6b66959..6b3fb17 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -22,7 +22,17 @@ + #include "internals.h" + + #ifdef CONFIG_IRQ_FORCED_THREADING ++#ifdef CONFIG_FORCE_IRQ_THREADING ++__read_mostly bool force_irqthreads = true; ++#else + __read_mostly bool force_irqthreads; ++#endif ++static int __init setup_noforced_irqthreads(char *arg) ++{ ++ force_irqthreads = false; ++ return 0; ++} ++early_param("nothreadirqs", setup_noforced_irqthreads); + + static int __init setup_forced_irqthreads(char *arg) + { +-- +2.9.3 + diff --git a/patches/0020-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch b/patches/0020-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch new file mode 100644 index 0000000..1bf923d --- /dev/null +++ b/patches/0020-Don-t-use-hrtimer-overlay-when-pm_freezing-since-som.patch @@ -0,0 +1,69 @@ +From a278cad439033005610ddda23882f2c681c669d1 Mon Sep 17 00:00:00 2001 +From: Con Kolivas +Date: Mon, 20 Feb 2017 13:32:58 +1100 +Subject: [PATCH 20/25] Don't use hrtimer overlay when pm_freezing since some + drivers still don't correctly use freezable timeouts. + +--- + kernel/time/hrtimer.c | 2 +- + kernel/time/timer.c | 9 +++++---- + 2 files changed, 6 insertions(+), 5 deletions(-) + +diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c +index 26ac1f8..25e1555 100644 +--- a/kernel/time/hrtimer.c ++++ b/kernel/time/hrtimer.c +@@ -1818,7 +1818,7 @@ signed long __sched schedule_msec_hrtimeout(signed long timeout) + * (yet) better than Hz, as would occur during startup, use regular + * timers. + */ +- if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ) ++ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing) + return schedule_timeout(jiffs); + + delta = (timeout % 1000) * NSEC_PER_MSEC; +diff --git a/kernel/time/timer.c b/kernel/time/timer.c +index 0e22641..45a6e1f 100644 +--- a/kernel/time/timer.c ++++ b/kernel/time/timer.c +@@ -42,6 +42,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -1901,12 +1902,12 @@ void msleep(unsigned int msecs) + * Use high resolution timers where the resolution of tick based + * timers is inadequate. + */ +- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) { ++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) { + while (msecs) + msecs = schedule_msec_hrtimeout_uninterruptible(msecs); + return; + } +- timeout = msecs_to_jiffies(msecs) + 1; ++ timeout = jiffs + 1; + + while (timeout) + timeout = schedule_timeout_uninterruptible(timeout); +@@ -1923,12 +1924,12 @@ unsigned long msleep_interruptible(unsigned int msecs) + int jiffs = msecs_to_jiffies(msecs); + unsigned long timeout; + +- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) { ++ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) { + while (msecs && !signal_pending(current)) + msecs = schedule_msec_hrtimeout_interruptible(msecs); + return msecs; + } +- timeout = msecs_to_jiffies(msecs) + 1; ++ timeout = jiffs + 1; + + while (timeout && !signal_pending(current)) + timeout = schedule_timeout_interruptible(timeout); +-- +2.9.3 + diff --git a/patches/0021-Make-writeback-throttling-default-enabled.patch b/patches/0021-Make-writeback-throttling-default-enabled.patch new file mode 100644 index 0000000..0d587bb --- /dev/null +++ b/patches/0021-Make-writeback-throttling-default-enabled.patch @@ -0,0 +1,34 @@ +From da915e0f3abeb61f6a132bb77b7d0a9bf0573233 Mon Sep 17 00:00:00 2001 +From: Con Kolivas +Date: Mon, 20 Feb 2017 13:38:23 +1100 +Subject: [PATCH 21/25] Make writeback throttling default enabled. + +--- + block/Kconfig | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/block/Kconfig b/block/Kconfig +index 8bf114a..83e6f9d 100644 +--- a/block/Kconfig ++++ b/block/Kconfig +@@ -123,7 +123,7 @@ config BLK_CMDLINE_PARSER + + config BLK_WBT + bool "Enable support for block device writeback throttling" +- default n ++ default y + ---help--- + Enabling this option enables the block layer to throttle buffered + background writeback from the VM, making it more smooth and having +@@ -133,7 +133,7 @@ config BLK_WBT + + config BLK_WBT_SQ + bool "Single queue writeback throttling" +- default n ++ default y + depends on BLK_WBT + ---help--- + Enable writeback throttling by default on legacy single queue devices +-- +2.9.3 + diff --git a/patches/0022-Swap-sucks.patch b/patches/0022-Swap-sucks.patch new file mode 100644 index 0000000..ddab4f8 --- /dev/null +++ b/patches/0022-Swap-sucks.patch @@ -0,0 +1,43 @@ +From 2f96168f72bbd431c0e6d28b44393e98b49ca787 Mon Sep 17 00:00:00 2001 +From: Con Kolivas +Date: Mon, 20 Feb 2017 13:48:54 +1100 +Subject: [PATCH 22/25] Swap sucks. + +--- + include/linux/swap.h | 6 +----- + mm/vmscan.c | 2 +- + 2 files changed, 2 insertions(+), 6 deletions(-) + +diff --git a/include/linux/swap.h b/include/linux/swap.h +index 7f47b70..1c2ed28 100644 +--- a/include/linux/swap.h ++++ b/include/linux/swap.h +@@ -361,11 +361,7 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t, + extern atomic_long_t nr_swap_pages; + extern long total_swap_pages; + +-/* Swap 50% full? Release swapcache more aggressively.. */ +-static inline bool vm_swap_full(void) +-{ +- return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages; +-} ++#define vm_swap_full() 1 + + static inline long get_nr_swap_pages(void) + { +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 532a2a7..15e4260 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -141,7 +141,7 @@ struct scan_control { + /* + * From 0 .. 100. Higher means more swappy. + */ +-int vm_swappiness = 60; ++int vm_swappiness = 33; + /* + * The total number of pages which are beyond the high watermark within all + * zones. +-- +2.9.3 + diff --git a/patches/enable_additional_cpu_optimizations_for_gcc_v4.9+_kernel_v3.15+.patch b/patches/enable_additional_cpu_optimizations_for_gcc_v4.9+_kernel_v3.15+.patch index d9729b2..76cbd9d 100644 --- a/patches/enable_additional_cpu_optimizations_for_gcc_v4.9+_kernel_v3.15+.patch +++ b/patches/enable_additional_cpu_optimizations_for_gcc_v4.9+_kernel_v3.15+.patch @@ -1,33 +1,51 @@ -WARNING - this version of the patch works with version 4.9+ of gcc and with -kernel version 3.15.x+ and should NOT be applied when compiling on older -versions due to name changes of the flags with the 4.9 release of gcc. +WARNING +This patch works with gcc versions 4.9+ and with kernel version 3.15+ and should +NOT be applied when compiling on older versions of gcc due to key name changes +of the march flags introduced with the version 4.9 release of gcc.[1] + Use the older version of this patch hosted on the same github for older -versions of gcc. For example: +versions of gcc. -corei7 --> nehalem -corei7-avx --> sandybridge -core-avx-i --> ivybridge -core-avx2 --> haswell +FEATURES +This patch adds additional CPU options to the Linux kernel accessible under: + Processor type and features ---> + Processor family ---> -For more, see: https://gcc.gnu.org/gcc-4.9/changes.html +The expanded microarchitectures include: +* AMD Improved K8-family +* AMD K10-family +* AMD Family 10h (Barcelona) +* AMD Family 14h (Bobcat) +* AMD Family 16h (Jaguar) +* AMD Family 15h (Bulldozer) +* AMD Family 15h (Piledriver) +* AMD Family 15h (Steamroller) +* AMD Family 15h (Excavator) +* AMD Family 17h (Zen) +* Intel Silvermont low-power processors +* Intel 1st Gen Core i3/i5/i7 (Nehalem) +* Intel 1.5 Gen Core i3/i5/i7 (Westmere) +* Intel 2nd Gen Core i3/i5/i7 (Sandybridge) +* Intel 3rd Gen Core i3/i5/i7 (Ivybridge) +* Intel 4th Gen Core i3/i5/i7 (Haswell) +* Intel 5th Gen Core i3/i5/i7 (Broadwell) +* Intel 6th Gen Core i3/i5.i7 (Skylake) -It also changes 'atom' to 'bonnell' in accordance with the gcc v4.9 changes. -Note that upstream is using the deprecated 'match=atom' flags when I believe it -should use the newer 'march=bonnell' flag for atom processors. +It also offers to compile passing the 'native' option which, "selects the CPU +to generate code for at compilation time by determining the processor type of +the compiling machine. Using -march=native enables all instruction subsets +supported by the local machine and will produce code optimized for the local +machine under the constraints of the selected instruction set."[3] -I have made that change to this patch set as well. See the following kernel -bug report to see if I'm right: https://bugzilla.kernel.org/show_bug.cgi?id=77461 +MINOR NOTES +This patch also changes 'atom' to 'bonnell' in accordance with the gcc v4.9 +changes. Note that upstream is using the deprecated 'match=atom' flags when I +believe it should use the newer 'march=bonnell' flag for atom processors.[2] -This patch will expand the number of microarchitectures to include newer -processors including: AMD K10-family, AMD Family 10h (Barcelona), AMD Family -14h (Bobcat), AMD Family 15h (Bulldozer), AMD Family 15h (Piledriver), AMD -Family 15h (Steamroller), Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7 -(Nehalem), Intel 1.5 Gen Core i3/i5/i7 (Westmere), Intel 2nd Gen Core i3/i5/i7 -(Sandybridge), Intel 3rd Gen Core i3/i5/i7 (Ivybridge), Intel 4th Gen Core -i3/i5/i7 (Haswell), Intel 5th Gen Core i3/i5/i7 (Broadwell), and the low power -Silvermont series of Atom processors (Silvermont). It also offers the compiler -the 'native' flag. +It is not recommended to compile on Atom-CPUs with the 'native' option.[4] The +recommendation is use to the 'atom' option instead. +BENEFITS Small but real speed increases are measurable using a make endpoint comparing a generic kernel to one built with one of the respective microarchs. @@ -38,8 +56,18 @@ REQUIREMENTS linux version >=3.15 gcc version >=4.9 ---- a/arch/x86/include/asm/module.h 2015-08-30 14:34:09.000000000 -0400 -+++ b/arch/x86/include/asm/module.h 2015-11-06 14:18:24.234941036 -0500 +ACKNOWLEDGMENTS +This patch builds on the seminal work by Jeroen.[5] + +REFERENCES +1. https://gcc.gnu.org/gcc-4.9/changes.html +2. https://bugzilla.kernel.org/show_bug.cgi?id=77461 +3. https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html +4. https://github.com/graysky2/kernel_gcc_patch/issues/15 +5. http://www.linuxforge.net/docs/linux/linux-gcc.php + +--- a/arch/x86/include/asm/module.h 2016-12-11 14:17:54.000000000 -0500 ++++ b/arch/x86/include/asm/module.h 2017-01-06 20:44:36.602227264 -0500 @@ -15,6 +15,24 @@ #define MODULE_PROC_FAMILY "586MMX " #elif defined CONFIG_MCORE2 @@ -65,7 +93,7 @@ gcc version >=4.9 #elif defined CONFIG_MATOM #define MODULE_PROC_FAMILY "ATOM " #elif defined CONFIG_M686 -@@ -33,6 +51,22 @@ +@@ -33,6 +51,26 @@ #define MODULE_PROC_FAMILY "K7 " #elif defined CONFIG_MK8 #define MODULE_PROC_FAMILY "K8 " @@ -80,17 +108,29 @@ gcc version >=4.9 +#elif defined CONFIG_MBULLDOZER +#define MODULE_PROC_FAMILY "BULLDOZER " +#elif defined CONFIG_MPILEDRIVER -+#define MODULE_PROC_FAMILY "STEAMROLLER " -+#elif defined CONFIG_MSTEAMROLLER +#define MODULE_PROC_FAMILY "PILEDRIVER " ++#elif defined CONFIG_MSTEAMROLLER ++#define MODULE_PROC_FAMILY "STEAMROLLER " +#elif defined CONFIG_MJAGUAR +#define MODULE_PROC_FAMILY "JAGUAR " ++#elif defined CONFIG_MEXCAVATOR ++#define MODULE_PROC_FAMILY "EXCAVATOR " ++#elif defined CONFIG_MZEN ++#define MODULE_PROC_FAMILY "ZEN " #elif defined CONFIG_MELAN #define MODULE_PROC_FAMILY "ELAN " #elif defined CONFIG_MCRUSOE ---- a/arch/x86/Kconfig.cpu 2015-08-30 14:34:09.000000000 -0400 -+++ b/arch/x86/Kconfig.cpu 2015-11-06 14:20:14.948369244 -0500 -@@ -137,9 +137,8 @@ config MPENTIUM4 +--- a/arch/x86/Kconfig.cpu 2016-12-11 14:17:54.000000000 -0500 ++++ b/arch/x86/Kconfig.cpu 2017-01-06 20:46:14.004109597 -0500 +@@ -115,6 +115,7 @@ config MPENTIUMM + config MPENTIUM4 + bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon" + depends on X86_32 ++ select X86_P6_NOP + ---help--- + Select this for Intel Pentium 4 chips. This includes the + Pentium 4, Pentium D, P4-based Celeron and Xeon, and +@@ -147,9 +148,8 @@ config MPENTIUM4 -Paxville -Dempsey @@ -101,7 +141,7 @@ gcc version >=4.9 depends on X86_32 ---help--- Select this for an AMD K6-family processor. Enables use of -@@ -147,7 +146,7 @@ config MK6 +@@ -157,7 +157,7 @@ config MK6 flags to GCC. config MK7 @@ -110,7 +150,7 @@ gcc version >=4.9 depends on X86_32 ---help--- Select this for an AMD Athlon K7-family processor. Enables use of -@@ -155,12 +154,69 @@ config MK7 +@@ -165,12 +165,83 @@ config MK7 flags to GCC. config MK8 @@ -139,54 +179,77 @@ gcc version >=4.9 +config MBARCELONA + bool "AMD Barcelona" + ---help--- -+ Select this for AMD Barcelona and newer processors. ++ Select this for AMD Family 10h Barcelona processors. + + Enables -march=barcelona + +config MBOBCAT + bool "AMD Bobcat" + ---help--- -+ Select this for AMD Bobcat processors. ++ Select this for AMD Family 14h Bobcat processors. + + Enables -march=btver1 + ++config MJAGUAR ++ bool "AMD Jaguar" ++ ---help--- ++ Select this for AMD Family 16h Jaguar processors. ++ ++ Enables -march=btver2 ++ +config MBULLDOZER + bool "AMD Bulldozer" + ---help--- -+ Select this for AMD Bulldozer processors. ++ Select this for AMD Family 15h Bulldozer processors. + + Enables -march=bdver1 + +config MPILEDRIVER + bool "AMD Piledriver" + ---help--- -+ Select this for AMD Piledriver processors. ++ Select this for AMD Family 15h Piledriver processors. + + Enables -march=bdver2 + +config MSTEAMROLLER + bool "AMD Steamroller" + ---help--- -+ Select this for AMD Steamroller processors. ++ Select this for AMD Family 15h Steamroller processors. + + Enables -march=bdver3 + -+config MJAGUAR -+ bool "AMD Jaguar" ++config MEXCAVATOR ++ bool "AMD Excavator" + ---help--- -+ Select this for AMD Jaguar processors. ++ Select this for AMD Family 15h Excavator processors. + -+ Enables -march=btver2 ++ Enables -march=bdver4 ++ ++config MZEN ++ bool "AMD Zen" ++ ---help--- ++ Select this for AMD Family 17h Zen processors. ++ ++ Enables -march=znver1 + config MCRUSOE bool "Crusoe" depends on X86_32 -@@ -251,8 +307,17 @@ config MPSC +@@ -252,6 +323,7 @@ config MVIAC7 + + config MPSC + bool "Intel P4 / older Netburst based Xeon" ++ select X86_P6_NOP + depends on X86_64 + ---help--- + Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey +@@ -261,8 +333,19 @@ config MPSC using the cpu family field in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one. +config MATOM + bool "Intel Atom" ++ select X86_P6_NOP + ---help--- + + Select this for the Intel Atom platform. Intel Atom CPUs have an @@ -197,10 +260,11 @@ gcc version >=4.9 config MCORE2 - bool "Core 2/newer Xeon" + bool "Intel Core 2" ++ select X86_P6_NOP ---help--- Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and -@@ -260,14 +325,71 @@ config MCORE2 +@@ -270,14 +353,79 @@ config MCORE2 family in /proc/cpuinfo. Newer ones have 6 and older ones 15 (not a typo) @@ -210,6 +274,7 @@ gcc version >=4.9 + +config MNEHALEM + bool "Intel Nehalem" ++ select X86_P6_NOP ---help--- - Select this for the Intel Atom platform. Intel Atom CPUs have an @@ -222,6 +287,7 @@ gcc version >=4.9 + +config MWESTMERE + bool "Intel Westmere" ++ select X86_P6_NOP + ---help--- + + Select this for the Intel Westmere formerly Nehalem-C family. @@ -230,6 +296,7 @@ gcc version >=4.9 + +config MSILVERMONT + bool "Intel Silvermont" ++ select X86_P6_NOP + ---help--- + + Select this for the Intel Silvermont platform. @@ -238,6 +305,7 @@ gcc version >=4.9 + +config MSANDYBRIDGE + bool "Intel Sandy Bridge" ++ select X86_P6_NOP + ---help--- + + Select this for 2nd Gen Core processors in the Sandy Bridge family. @@ -246,6 +314,7 @@ gcc version >=4.9 + +config MIVYBRIDGE + bool "Intel Ivy Bridge" ++ select X86_P6_NOP + ---help--- + + Select this for 3rd Gen Core processors in the Ivy Bridge family. @@ -254,6 +323,7 @@ gcc version >=4.9 + +config MHASWELL + bool "Intel Haswell" ++ select X86_P6_NOP + ---help--- + + Select this for 4th Gen Core processors in the Haswell family. @@ -262,6 +332,7 @@ gcc version >=4.9 + +config MBROADWELL + bool "Intel Broadwell" ++ select X86_P6_NOP + ---help--- + + Select this for 5th Gen Core processors in the Broadwell family. @@ -270,6 +341,7 @@ gcc version >=4.9 + +config MSKYLAKE + bool "Intel Skylake" ++ select X86_P6_NOP + ---help--- + + Select this for 6th Gen Core processors in the Skylake family. @@ -278,7 +350,7 @@ gcc version >=4.9 config GENERIC_CPU bool "Generic-x86-64" -@@ -276,6 +398,19 @@ config GENERIC_CPU +@@ -286,6 +434,19 @@ config GENERIC_CPU Generic x86-64 CPU. Run equally well on all x86-64 CPUs. @@ -298,16 +370,16 @@ gcc version >=4.9 endchoice config X86_GENERIC -@@ -300,7 +435,7 @@ config X86_INTERNODE_CACHE_SHIFT +@@ -310,7 +471,7 @@ config X86_INTERNODE_CACHE_SHIFT config X86_L1_CACHE_SHIFT int default "7" if MPENTIUM4 || MPSC - default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU -+ default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU ++ default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU default "4" if MELAN || M486 || MGEODEGX1 default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX -@@ -331,11 +466,11 @@ config X86_ALIGNMENT_16 +@@ -341,45 +502,46 @@ config X86_ALIGNMENT_16 config X86_INTEL_USERCOPY def_bool y @@ -321,7 +393,38 @@ gcc version >=4.9 config X86_USE_3DNOW def_bool y -@@ -359,17 +494,17 @@ config X86_P6_NOP + depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML + +-# +-# P6_NOPs are a relatively minor optimization that require a family >= +-# 6 processor, except that it is broken on certain VIA chips. +-# Furthermore, AMD chips prefer a totally different sequence of NOPs +-# (which work on all CPUs). In addition, it looks like Virtual PC +-# does not understand them. +-# +-# As a result, disallow these if we're not compiling for X86_64 (these +-# NOPs do work on all x86-64 capable chips); the list of processors in +-# the right-hand clause are the cores that benefit from this optimization. +-# + config X86_P6_NOP +- def_bool y +- depends on X86_64 +- depends on (MCORE2 || MPENTIUM4 || MPSC) ++ default n ++ bool "Support for P6_NOPs on Intel chips" ++ depends on (MCORE2 || MPENTIUM4 || MPSC || MATOM || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE) ++ ---help--- ++ P6_NOPs are a relatively minor optimization that require a family >= ++ 6 processor, except that it is broken on certain VIA chips. ++ Furthermore, AMD chips prefer a totally different sequence of NOPs ++ (which work on all CPUs). In addition, it looks like Virtual PC ++ does not understand them. ++ ++ As a result, disallow these if we're not compiling for X86_64 (these ++ NOPs do work on all x86-64 capable chips); the list of processors in ++ the right-hand clause are the cores that benefit from this optimization. ++ ++ Say Y if you have Intel CPU newer than Pentium Pro, N otherwise. config X86_TSC def_bool y @@ -338,13 +441,13 @@ gcc version >=4.9 config X86_CMOV def_bool y - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) -+ depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX) ++ depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX) config X86_MINIMUM_CPU_FAMILY int ---- a/arch/x86/Makefile 2015-08-30 14:34:09.000000000 -0400 -+++ b/arch/x86/Makefile 2015-11-06 14:21:05.708983344 -0500 -@@ -94,13 +94,38 @@ else +--- a/arch/x86/Makefile 2016-12-11 14:17:54.000000000 -0500 ++++ b/arch/x86/Makefile 2017-01-06 20:44:36.603227283 -0500 +@@ -104,13 +104,40 @@ else KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup) # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu) @@ -354,10 +457,12 @@ gcc version >=4.9 + cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10) + cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona) + cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1) ++ cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2) + cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1) + cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2) + cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3) -+ cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2) ++ cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4) ++ cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1) cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona) cflags-$(CONFIG_MCORE2) += \ @@ -386,9 +491,9 @@ gcc version >=4.9 cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic) KBUILD_CFLAGS += $(cflags-y) ---- a/arch/x86/Makefile_32.cpu 2015-08-30 14:34:09.000000000 -0400 -+++ b/arch/x86/Makefile_32.cpu 2015-11-06 14:21:43.604429077 -0500 -@@ -23,7 +23,16 @@ cflags-$(CONFIG_MK6) += -march=k6 +--- a/arch/x86/Makefile_32.cpu 2016-12-11 14:17:54.000000000 -0500 ++++ b/arch/x86/Makefile_32.cpu 2017-01-06 20:44:36.603227283 -0500 +@@ -23,7 +23,18 @@ cflags-$(CONFIG_MK6) += -march=k6 # Please note, that patches that add -march=athlon-xp and friends are pointless. # They make zero difference whatsosever to performance at this time. cflags-$(CONFIG_MK7) += -march=athlon @@ -398,14 +503,16 @@ gcc version >=4.9 +cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10,-march=athlon) +cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona,-march=athlon) +cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1,-march=athlon) ++cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2,-march=athlon) +cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1,-march=athlon) +cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2,-march=athlon) +cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3,-march=athlon) -+cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2,-march=athlon) ++cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4,-march=athlon) ++cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1,-march=athlon) cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0 cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0 cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586) -@@ -32,8 +41,16 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc- +@@ -32,8 +43,16 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc- cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686) cflags-$(CONFIG_MVIAC7) += -march=i686 cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2) diff --git a/patches/uksm-0.1.2.6-for-v4.10.patch b/patches/uksm-0.1.2.6-for-v4.10.patch new file mode 100644 index 0000000..3c32e5e --- /dev/null +++ b/patches/uksm-0.1.2.6-for-v4.10.patch @@ -0,0 +1,6933 @@ +diff --git a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX +index 6a5e2a1..09eaa9a1 100644 +--- a/Documentation/vm/00-INDEX ++++ b/Documentation/vm/00-INDEX +@@ -18,6 +18,8 @@ idle_page_tracking.txt + - description of the idle page tracking feature. + ksm.txt + - how to use the Kernel Samepage Merging feature. ++uksm.txt ++ - Introduction to Ultra KSM + numa + - information about NUMA specific code in the Linux vm. + numa_memory_policy.txt +diff --git a/Documentation/vm/uksm.txt b/Documentation/vm/uksm.txt +new file mode 100644 +index 0000000..b7a110f +--- /dev/null ++++ b/Documentation/vm/uksm.txt +@@ -0,0 +1,61 @@ ++The Ultra Kernel Samepage Merging feature ++---------------------------------------------- ++/* ++ * Ultra KSM. Copyright (C) 2011-2012 Nai Xia ++ * ++ * This is an improvement upon KSM. Some basic data structures and routines ++ * are borrowed from ksm.c . ++ * ++ * Its new features: ++ * 1. Full system scan: ++ * It automatically scans all user processes' anonymous VMAs. Kernel-user ++ * interaction to submit a memory area to KSM is no longer needed. ++ * ++ * 2. Rich area detection: ++ * It automatically detects rich areas containing abundant duplicated ++ * pages based. Rich areas are given a full scan speed. Poor areas are ++ * sampled at a reasonable speed with very low CPU consumption. ++ * ++ * 3. Ultra Per-page scan speed improvement: ++ * A new hash algorithm is proposed. As a result, on a machine with ++ * Core(TM)2 Quad Q9300 CPU in 32-bit mode and 800MHZ DDR2 main memory, it ++ * can scan memory areas that does not contain duplicated pages at speed of ++ * 627MB/sec ~ 2445MB/sec and can merge duplicated areas at speed of ++ * 477MB/sec ~ 923MB/sec. ++ * ++ * 4. Thrashing area avoidance: ++ * Thrashing area(an VMA that has frequent Ksm page break-out) can be ++ * filtered out. My benchmark shows it's more efficient than KSM's per-page ++ * hash value based volatile page detection. ++ * ++ * ++ * 5. Misc changes upon KSM: ++ * * It has a fully x86-opitmized memcmp dedicated for 4-byte-aligned page ++ * comparison. It's much faster than default C version on x86. ++ * * rmap_item now has an struct *page member to loosely cache a ++ * address-->page mapping, which reduces too much time-costly ++ * follow_page(). ++ * * The VMA creation/exit procedures are hooked to let the Ultra KSM know. ++ * * try_to_merge_two_pages() now can revert a pte if it fails. No break_ ++ * ksm is needed for this case. ++ * ++ * 6. Full Zero Page consideration(contributed by Figo Zhang) ++ * Now uksmd consider full zero pages as special pages and merge them to an ++ * special unswappable uksm zero page. ++ */ ++ ++ChangeLog: ++ ++2012-05-05 The creation of this Doc ++2012-05-08 UKSM 0.1.1.1 libc crash bug fix, api clean up, doc clean up. ++2012-05-28 UKSM 0.1.1.2 bug fix release ++2012-06-26 UKSM 0.1.2-beta1 first beta release for 0.1.2 ++2012-07-2 UKSM 0.1.2-beta2 ++2012-07-10 UKSM 0.1.2-beta3 ++2012-07-26 UKSM 0.1.2 Fine grained speed control, more scan optimization. ++2012-10-13 UKSM 0.1.2.1 Bug fixes. ++2012-12-31 UKSM 0.1.2.2 Minor bug fixes. ++2014-07-02 UKSM 0.1.2.3 Fix a " __this_cpu_read() in preemptible bug". ++2015-04-22 UKSM 0.1.2.4 Fix a race condition that can sometimes trigger anonying warnings. ++2016-09-10 UKSM 0.1.2.5 Fix a bug in dedup ratio calculation. ++2017-02-26 UKSM 0.1.2.6 Fix a bug in hugetlbpage handling and a race bug with page migration. +diff --git a/fs/exec.c b/fs/exec.c +index e579466..3da3dd68 100644 +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -57,6 +57,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -1336,6 +1337,7 @@ void setup_new_exec(struct linux_binprm * bprm) + /* An exec changes our domain. We are no longer part of the thread + group */ + current->self_exec_id++; ++ + flush_signal_handlers(current, 0); + } + EXPORT_SYMBOL(setup_new_exec); +diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c +index 8a42849..47480dd 100644 +--- a/fs/proc/meminfo.c ++++ b/fs/proc/meminfo.c +@@ -117,6 +117,10 @@ static int meminfo_proc_show(struct seq_file *m, void *v) + global_page_state(NR_KERNEL_STACK_KB)); + show_val_kb(m, "PageTables: ", + global_page_state(NR_PAGETABLE)); ++#ifdef CONFIG_UKSM ++ show_val_kb(m, "KsmZeroPages: ", ++ global_page_state(NR_UKSM_ZERO_PAGES)); ++#endif + #ifdef CONFIG_QUICKLIST + show_val_kb(m, "Quicklists: ", quicklist_total_size()); + #endif +diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h +index 18af2bc..c536344 100644 +--- a/include/asm-generic/pgtable.h ++++ b/include/asm-generic/pgtable.h +@@ -600,12 +600,25 @@ extern void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn, + extern void untrack_pfn_moved(struct vm_area_struct *vma); + #endif + ++#ifdef CONFIG_UKSM ++static inline int is_uksm_zero_pfn(unsigned long pfn) ++{ ++ extern unsigned long uksm_zero_pfn; ++ return pfn == uksm_zero_pfn; ++} ++#else ++static inline int is_uksm_zero_pfn(unsigned long pfn) ++{ ++ return 0; ++} ++#endif ++ + #ifdef __HAVE_COLOR_ZERO_PAGE + static inline int is_zero_pfn(unsigned long pfn) + { + extern unsigned long zero_pfn; + unsigned long offset_from_zero_pfn = pfn - zero_pfn; +- return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT); ++ return offset_from_zero_pfn <= (zero_page_mask >> PAGE_SHIFT) || is_uksm_zero_pfn(pfn); + } + + #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) +@@ -614,7 +627,7 @@ static inline int is_zero_pfn(unsigned long pfn) + static inline int is_zero_pfn(unsigned long pfn) + { + extern unsigned long zero_pfn; +- return pfn == zero_pfn; ++ return (pfn == zero_pfn) || (is_uksm_zero_pfn(pfn)); + } + + static inline unsigned long my_zero_pfn(unsigned long addr) +diff --git a/include/linux/ksm.h b/include/linux/ksm.h +index 481c8c4..5329b23 100644 +--- a/include/linux/ksm.h ++++ b/include/linux/ksm.h +@@ -19,21 +19,6 @@ struct mem_cgroup; + #ifdef CONFIG_KSM + int ksm_madvise(struct vm_area_struct *vma, unsigned long start, + unsigned long end, int advice, unsigned long *vm_flags); +-int __ksm_enter(struct mm_struct *mm); +-void __ksm_exit(struct mm_struct *mm); +- +-static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +-{ +- if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) +- return __ksm_enter(mm); +- return 0; +-} +- +-static inline void ksm_exit(struct mm_struct *mm) +-{ +- if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) +- __ksm_exit(mm); +-} + + static inline struct stable_node *page_stable_node(struct page *page) + { +@@ -63,6 +48,33 @@ struct page *ksm_might_need_to_copy(struct page *page, + int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); + void ksm_migrate_page(struct page *newpage, struct page *oldpage); + ++#ifdef CONFIG_KSM_LEGACY ++int __ksm_enter(struct mm_struct *mm); ++void __ksm_exit(struct mm_struct *mm); ++static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) ++{ ++ if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags)) ++ return __ksm_enter(mm); ++ return 0; ++} ++ ++static inline void ksm_exit(struct mm_struct *mm) ++{ ++ if (test_bit(MMF_VM_MERGEABLE, &mm->flags)) ++ __ksm_exit(mm); ++} ++ ++#elif defined(CONFIG_UKSM) ++static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) ++{ ++ return 0; ++} ++ ++static inline void ksm_exit(struct mm_struct *mm) ++{ ++} ++#endif /* !CONFIG_UKSM */ ++ + #else /* !CONFIG_KSM */ + + static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) +@@ -105,4 +117,6 @@ static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) + #endif /* CONFIG_MMU */ + #endif /* !CONFIG_KSM */ + ++#include ++ + #endif /* __LINUX_KSM_H */ +diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h +index 808751d..f2c9f6c 100644 +--- a/include/linux/mm_types.h ++++ b/include/linux/mm_types.h +@@ -358,6 +358,9 @@ struct vm_area_struct { + struct mempolicy *vm_policy; /* NUMA policy for the VMA */ + #endif + struct vm_userfaultfd_ctx vm_userfaultfd_ctx; ++#ifdef CONFIG_UKSM ++ struct vma_slot *uksm_vma_slot; ++#endif + }; + + struct core_thread { +diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h +index f4aac87..bff211e 100644 +--- a/include/linux/mmzone.h ++++ b/include/linux/mmzone.h +@@ -138,6 +138,9 @@ enum zone_stat_item { + NUMA_OTHER, /* allocation from other node */ + #endif + NR_FREE_CMA_PAGES, ++#ifdef CONFIG_UKSM ++ NR_UKSM_ZERO_PAGES, ++#endif + NR_VM_ZONE_STAT_ITEMS }; + + enum node_stat_item { +@@ -843,7 +846,7 @@ static inline int is_highmem_idx(enum zone_type idx) + } + + /** +- * is_highmem - helper function to quickly check if a struct zone is a ++ * is_highmem - helper function to quickly check if a struct zone is a + * highmem zone or not. This is an attempt to keep references + * to ZONE_{DMA/NORMAL/HIGHMEM/etc} in general code to a minimum. + * @zone - pointer to struct zone variable +diff --git a/include/linux/sradix-tree.h b/include/linux/sradix-tree.h +new file mode 100644 +index 0000000..6780fdb +--- /dev/null ++++ b/include/linux/sradix-tree.h +@@ -0,0 +1,77 @@ ++#ifndef _LINUX_SRADIX_TREE_H ++#define _LINUX_SRADIX_TREE_H ++ ++ ++#define INIT_SRADIX_TREE(root, mask) \ ++do { \ ++ (root)->height = 0; \ ++ (root)->gfp_mask = (mask); \ ++ (root)->rnode = NULL; \ ++} while (0) ++ ++#define ULONG_BITS (sizeof(unsigned long) * 8) ++#define SRADIX_TREE_INDEX_BITS (8 /* CHAR_BIT */ * sizeof(unsigned long)) ++//#define SRADIX_TREE_MAP_SHIFT 6 ++//#define SRADIX_TREE_MAP_SIZE (1UL << SRADIX_TREE_MAP_SHIFT) ++//#define SRADIX_TREE_MAP_MASK (SRADIX_TREE_MAP_SIZE-1) ++ ++struct sradix_tree_node { ++ unsigned int height; /* Height from the bottom */ ++ unsigned int count; ++ unsigned int fulls; /* Number of full sublevel trees */ ++ struct sradix_tree_node *parent; ++ void *stores[0]; ++}; ++ ++/* A simple radix tree implementation */ ++struct sradix_tree_root { ++ unsigned int height; ++ struct sradix_tree_node *rnode; ++ ++ /* Where found to have available empty stores in its sublevels */ ++ struct sradix_tree_node *enter_node; ++ unsigned int shift; ++ unsigned int stores_size; ++ unsigned int mask; ++ unsigned long min; /* The first hole index */ ++ unsigned long num; ++ //unsigned long *height_to_maxindex; ++ ++ /* How the node is allocated and freed. */ ++ struct sradix_tree_node *(*alloc)(void); ++ void (*free)(struct sradix_tree_node *node); ++ ++ /* When a new node is added and removed */ ++ void (*extend)(struct sradix_tree_node *parent, struct sradix_tree_node *child); ++ void (*assign)(struct sradix_tree_node *node, unsigned index, void *item); ++ void (*rm)(struct sradix_tree_node *node, unsigned offset); ++}; ++ ++struct sradix_tree_path { ++ struct sradix_tree_node *node; ++ int offset; ++}; ++ ++static inline ++void init_sradix_tree_root(struct sradix_tree_root *root, unsigned long shift) ++{ ++ root->height = 0; ++ root->rnode = NULL; ++ root->shift = shift; ++ root->stores_size = 1UL << shift; ++ root->mask = root->stores_size - 1; ++} ++ ++ ++extern void *sradix_tree_next(struct sradix_tree_root *root, ++ struct sradix_tree_node *node, unsigned long index, ++ int (*iter)(void *, unsigned long)); ++ ++extern int sradix_tree_enter(struct sradix_tree_root *root, void **item, int num); ++ ++extern void sradix_tree_delete_from_leaf(struct sradix_tree_root *root, ++ struct sradix_tree_node *node, unsigned long index); ++ ++extern void *sradix_tree_lookup(struct sradix_tree_root *root, unsigned long index); ++ ++#endif /* _LINUX_SRADIX_TREE_H */ +diff --git a/include/linux/uksm.h b/include/linux/uksm.h +new file mode 100644 +index 0000000..825f05e +--- /dev/null ++++ b/include/linux/uksm.h +@@ -0,0 +1,149 @@ ++#ifndef __LINUX_UKSM_H ++#define __LINUX_UKSM_H ++/* ++ * Memory merging support. ++ * ++ * This code enables dynamic sharing of identical pages found in different ++ * memory areas, even if they are not shared by fork(). ++ */ ++ ++/* if !CONFIG_UKSM this file should not be compiled at all. */ ++#ifdef CONFIG_UKSM ++ ++#include ++#include ++#include ++#include ++#include ++ ++extern unsigned long zero_pfn __read_mostly; ++extern unsigned long uksm_zero_pfn __read_mostly; ++extern struct page *empty_uksm_zero_page; ++ ++/* must be done before linked to mm */ ++extern void uksm_vma_add_new(struct vm_area_struct *vma); ++extern void uksm_remove_vma(struct vm_area_struct *vma); ++ ++#define UKSM_SLOT_NEED_SORT (1 << 0) ++#define UKSM_SLOT_NEED_RERAND (1 << 1) ++#define UKSM_SLOT_SCANNED (1 << 2) /* It's scanned in this round */ ++#define UKSM_SLOT_FUL_SCANNED (1 << 3) ++#define UKSM_SLOT_IN_UKSM (1 << 4) ++ ++struct vma_slot { ++ struct sradix_tree_node *snode; ++ unsigned long sindex; ++ ++ struct list_head slot_list; ++ unsigned long fully_scanned_round; ++ unsigned long dedup_num; ++ unsigned long pages_scanned; ++ unsigned long this_sampled; ++ unsigned long last_scanned; ++ unsigned long pages_to_scan; ++ struct scan_rung *rung; ++ struct page **rmap_list_pool; ++ unsigned int *pool_counts; ++ unsigned long pool_size; ++ struct vm_area_struct *vma; ++ struct mm_struct *mm; ++ unsigned long ctime_j; ++ unsigned long pages; ++ unsigned long flags; ++ unsigned long pages_cowed; /* pages cowed this round */ ++ unsigned long pages_merged; /* pages merged this round */ ++ unsigned long pages_bemerged; ++ ++ /* when it has page merged in this eval round */ ++ struct list_head dedup_list; ++}; ++ ++static inline void uksm_unmap_zero_page(pte_t pte) ++{ ++ if (pte_pfn(pte) == uksm_zero_pfn) ++ __dec_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES); ++} ++ ++static inline void uksm_map_zero_page(pte_t pte) ++{ ++ if (pte_pfn(pte) == uksm_zero_pfn) ++ __inc_zone_page_state(empty_uksm_zero_page, NR_UKSM_ZERO_PAGES); ++} ++ ++static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page) ++{ ++ if (vma->uksm_vma_slot && PageKsm(page)) ++ vma->uksm_vma_slot->pages_cowed++; ++} ++ ++static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte) ++{ ++ if (vma->uksm_vma_slot && pte_pfn(pte) == uksm_zero_pfn) ++ vma->uksm_vma_slot->pages_cowed++; ++} ++ ++static inline int uksm_flags_can_scan(unsigned long vm_flags) ++{ ++#ifdef VM_SAO ++ if (vm_flags & VM_SAO) ++ return 0; ++#endif ++ ++ return !(vm_flags & (VM_PFNMAP | VM_IO | VM_DONTEXPAND | ++ VM_HUGETLB | VM_MIXEDMAP | VM_SHARED ++ | VM_MAYSHARE | VM_GROWSUP | VM_GROWSDOWN)); ++} ++ ++static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p) ++{ ++ if (uksm_flags_can_scan(*vm_flags_p)) ++ *vm_flags_p |= VM_MERGEABLE; ++} ++ ++/* ++ * Just a wrapper for BUG_ON for where ksm_zeropage must not be. TODO: it will ++ * be removed when uksm zero page patch is stable enough. ++ */ ++static inline void uksm_bugon_zeropage(pte_t pte) ++{ ++ BUG_ON(pte_pfn(pte) == uksm_zero_pfn); ++} ++#else ++static inline void uksm_vma_add_new(struct vm_area_struct *vma) ++{ ++} ++ ++static inline void uksm_remove_vma(struct vm_area_struct *vma) ++{ ++} ++ ++static inline void uksm_unmap_zero_page(pte_t pte) ++{ ++} ++ ++static inline void uksm_map_zero_page(pte_t pte) ++{ ++} ++ ++static inline void uksm_cow_page(struct vm_area_struct *vma, struct page *page) ++{ ++} ++ ++static inline void uksm_cow_pte(struct vm_area_struct *vma, pte_t pte) ++{ ++} ++ ++static inline int uksm_flags_can_scan(unsigned long vm_flags) ++{ ++ return 0; ++} ++ ++static inline void uksm_vm_flags_mod(unsigned long *vm_flags_p) ++{ ++} ++ ++static inline void uksm_bugon_zeropage(pte_t pte) ++{ ++} ++#endif /* !CONFIG_UKSM */ ++#endif /* __LINUX_UKSM_H */ +diff --git a/kernel/fork.c b/kernel/fork.c +index 11c5c8a..78cd12d 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -606,7 +606,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, + goto fail_nomem; + charge = len; + } +- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); ++ tmp = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + if (!tmp) + goto fail_nomem; + *tmp = *mpnt; +@@ -659,7 +659,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, + __vma_link_rb(mm, tmp, rb_link, rb_parent); + rb_link = &tmp->vm_rb.rb_right; + rb_parent = &tmp->vm_rb; +- ++ uksm_vma_add_new(tmp); + mm->map_count++; + retval = copy_page_range(mm, oldmm, mpnt); + +diff --git a/lib/Makefile b/lib/Makefile +index bc4073a..dbdb5ee 100644 +--- a/lib/Makefile ++++ b/lib/Makefile +@@ -17,7 +17,7 @@ KCOV_INSTRUMENT_debugobjects.o := n + KCOV_INSTRUMENT_dynamic_debug.o := n + + lib-y := ctype.o string.o vsprintf.o cmdline.o \ +- rbtree.o radix-tree.o dump_stack.o timerqueue.o\ ++ rbtree.o radix-tree.o sradix-tree.o dump_stack.o timerqueue.o\ + idr.o int_sqrt.o extable.o \ + sha1.o chacha20.o md5.o irq_regs.o argv_split.o \ + flex_proportions.o ratelimit.o show_mem.o \ +diff --git a/lib/sradix-tree.c b/lib/sradix-tree.c +new file mode 100644 +index 0000000..8d06329 +--- /dev/null ++++ b/lib/sradix-tree.c +@@ -0,0 +1,476 @@ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++static inline int sradix_node_full(struct sradix_tree_root *root, struct sradix_tree_node *node) ++{ ++ return node->fulls == root->stores_size || ++ (node->height == 1 && node->count == root->stores_size); ++} ++ ++/* ++ * Extend a sradix tree so it can store key @index. ++ */ ++static int sradix_tree_extend(struct sradix_tree_root *root, unsigned long index) ++{ ++ struct sradix_tree_node *node; ++ unsigned int height; ++ ++ if (unlikely(root->rnode == NULL)) { ++ if (!(node = root->alloc())) ++ return -ENOMEM; ++ ++ node->height = 1; ++ root->rnode = node; ++ root->height = 1; ++ } ++ ++ /* Figure out what the height should be. */ ++ height = root->height; ++ index >>= root->shift * height; ++ ++ while (index) { ++ index >>= root->shift; ++ height++; ++ } ++ ++ while (height > root->height) { ++ unsigned int newheight; ++ if (!(node = root->alloc())) ++ return -ENOMEM; ++ ++ /* Increase the height. */ ++ node->stores[0] = root->rnode; ++ root->rnode->parent = node; ++ if (root->extend) ++ root->extend(node, root->rnode); ++ ++ newheight = root->height + 1; ++ node->height = newheight; ++ node->count = 1; ++ if (sradix_node_full(root, root->rnode)) ++ node->fulls = 1; ++ ++ root->rnode = node; ++ root->height = newheight; ++ } ++ ++ return 0; ++} ++ ++/* ++ * Search the next item from the current node, that is not NULL ++ * and can satify root->iter(). ++ */ ++void *sradix_tree_next(struct sradix_tree_root *root, ++ struct sradix_tree_node *node, unsigned long index, ++ int (*iter)(void *item, unsigned long height)) ++{ ++ unsigned long offset; ++ void *item; ++ ++ if (unlikely(node == NULL)) { ++ node = root->rnode; ++ for (offset = 0; offset < root->stores_size; offset++) { ++ item = node->stores[offset]; ++ if (item && (!iter || iter(item, node->height))) ++ break; ++ } ++ ++ if (unlikely(offset >= root->stores_size)) ++ return NULL; ++ ++ if (node->height == 1) ++ return item; ++ else ++ goto go_down; ++ } ++ ++ while (node) { ++ offset = (index & root->mask) + 1; ++ for (;offset < root->stores_size; offset++) { ++ item = node->stores[offset]; ++ if (item && (!iter || iter(item, node->height))) ++ break; ++ } ++ ++ if (offset < root->stores_size) ++ break; ++ ++ node = node->parent; ++ index >>= root->shift; ++ } ++ ++ if (!node) ++ return NULL; ++ ++ while (node->height > 1) { ++go_down: ++ node = item; ++ for (offset = 0; offset < root->stores_size; offset++) { ++ item = node->stores[offset]; ++ if (item && (!iter || iter(item, node->height))) ++ break; ++ } ++ ++ if (unlikely(offset >= root->stores_size)) ++ return NULL; ++ } ++ ++ BUG_ON(offset > root->stores_size); ++ ++ return item; ++} ++ ++/* ++ * Blindly insert the item to the tree. Typically, we reuse the ++ * first empty store item. ++ */ ++int sradix_tree_enter(struct sradix_tree_root *root, void **item, int num) ++{ ++ unsigned long index; ++ unsigned int height; ++ struct sradix_tree_node *node, *tmp = NULL; ++ int offset, offset_saved; ++ void **store = NULL; ++ int error, i, j, shift; ++ ++go_on: ++ index = root->min; ++ ++ if (root->enter_node && !sradix_node_full(root, root->enter_node)) { ++ node = root->enter_node; ++ BUG_ON((index >> (root->shift * root->height))); ++ } else { ++ node = root->rnode; ++ if (node == NULL || (index >> (root->shift * root->height)) ++ || sradix_node_full(root, node)) { ++ error = sradix_tree_extend(root, index); ++ if (error) ++ return error; ++ ++ node = root->rnode; ++ } ++ } ++ ++ ++ height = node->height; ++ shift = (height - 1) * root->shift; ++ offset = (index >> shift) & root->mask; ++ while (shift > 0) { ++ offset_saved = offset; ++ for (; offset < root->stores_size; offset++) { ++ store = &node->stores[offset]; ++ tmp = *store; ++ ++ if (!tmp || !sradix_node_full(root, tmp)) ++ break; ++ } ++ BUG_ON(offset >= root->stores_size); ++ ++ if (offset != offset_saved) { ++ index += (offset - offset_saved) << shift; ++ index &= ~((1UL << shift) - 1); ++ } ++ ++ if (!tmp) { ++ if (!(tmp = root->alloc())) ++ return -ENOMEM; ++ ++ tmp->height = shift / root->shift; ++ *store = tmp; ++ tmp->parent = node; ++ node->count++; ++// if (root->extend) ++// root->extend(node, tmp); ++ } ++ ++ node = tmp; ++ shift -= root->shift; ++ offset = (index >> shift) & root->mask; ++ } ++ ++ BUG_ON(node->height != 1); ++ ++ ++ store = &node->stores[offset]; ++ for (i = 0, j = 0; ++ j < root->stores_size - node->count && ++ i < root->stores_size - offset && j < num; i++) { ++ if (!store[i]) { ++ store[i] = item[j]; ++ if (root->assign) ++ root->assign(node, index + i, item[j]); ++ j++; ++ } ++ } ++ ++ node->count += j; ++ root->num += j; ++ num -= j; ++ ++ while (sradix_node_full(root, node)) { ++ node = node->parent; ++ if (!node) ++ break; ++ ++ node->fulls++; ++ } ++ ++ if (unlikely(!node)) { ++ /* All nodes are full */ ++ root->min = 1 << (root->height * root->shift); ++ root->enter_node = NULL; ++ } else { ++ root->min = index + i - 1; ++ root->min |= (1UL << (node->height - 1)) - 1; ++ root->min++; ++ root->enter_node = node; ++ } ++ ++ if (num) { ++ item += j; ++ goto go_on; ++ } ++ ++ return 0; ++} ++ ++ ++/** ++ * sradix_tree_shrink - shrink height of a sradix tree to minimal ++ * @root sradix tree root ++ * ++ */ ++static inline void sradix_tree_shrink(struct sradix_tree_root *root) ++{ ++ /* try to shrink tree height */ ++ while (root->height > 1) { ++ struct sradix_tree_node *to_free = root->rnode; ++ ++ /* ++ * The candidate node has more than one child, or its child ++ * is not at the leftmost store, we cannot shrink. ++ */ ++ if (to_free->count != 1 || !to_free->stores[0]) ++ break; ++ ++ root->rnode = to_free->stores[0]; ++ root->rnode->parent = NULL; ++ root->height--; ++ if (unlikely(root->enter_node == to_free)) { ++ root->enter_node = NULL; ++ } ++ root->free(to_free); ++ } ++} ++ ++/* ++ * Del the item on the known leaf node and index ++ */ ++void sradix_tree_delete_from_leaf(struct sradix_tree_root *root, ++ struct sradix_tree_node *node, unsigned long index) ++{ ++ unsigned int offset; ++ struct sradix_tree_node *start, *end; ++ ++ BUG_ON(node->height != 1); ++ ++ start = node; ++ while (node && !(--node->count)) ++ node = node->parent; ++ ++ end = node; ++ if (!node) { ++ root->rnode = NULL; ++ root->height = 0; ++ root->min = 0; ++ root->num = 0; ++ root->enter_node = NULL; ++ } else { ++ offset = (index >> (root->shift * (node->height - 1))) & root->mask; ++ if (root->rm) ++ root->rm(node, offset); ++ node->stores[offset] = NULL; ++ root->num--; ++ if (root->min > index) { ++ root->min = index; ++ root->enter_node = node; ++ } ++ } ++ ++ if (start != end) { ++ do { ++ node = start; ++ start = start->parent; ++ if (unlikely(root->enter_node == node)) ++ root->enter_node = end; ++ root->free(node); ++ } while (start != end); ++ ++ /* ++ * Note that shrink may free "end", so enter_node still need to ++ * be checked inside. ++ */ ++ sradix_tree_shrink(root); ++ } else if (node->count == root->stores_size - 1) { ++ /* It WAS a full leaf node. Update the ancestors */ ++ node = node->parent; ++ while (node) { ++ node->fulls--; ++ if (node->fulls != root->stores_size - 1) ++ break; ++ ++ node = node->parent; ++ } ++ } ++} ++ ++void *sradix_tree_lookup(struct sradix_tree_root *root, unsigned long index) ++{ ++ unsigned int height, offset; ++ struct sradix_tree_node *node; ++ int shift; ++ ++ node = root->rnode; ++ if (node == NULL || (index >> (root->shift * root->height))) ++ return NULL; ++ ++ height = root->height; ++ shift = (height - 1) * root->shift; ++ ++ do { ++ offset = (index >> shift) & root->mask; ++ node = node->stores[offset]; ++ if (!node) ++ return NULL; ++ ++ shift -= root->shift; ++ } while (shift >= 0); ++ ++ return node; ++} ++ ++/* ++ * Return the item if it exists, otherwise create it in place ++ * and return the created item. ++ */ ++void *sradix_tree_lookup_create(struct sradix_tree_root *root, ++ unsigned long index, void *(*item_alloc)(void)) ++{ ++ unsigned int height, offset; ++ struct sradix_tree_node *node, *tmp; ++ void *item; ++ int shift, error; ++ ++ if (root->rnode == NULL || (index >> (root->shift * root->height))) { ++ if (item_alloc) { ++ error = sradix_tree_extend(root, index); ++ if (error) ++ return NULL; ++ } else { ++ return NULL; ++ } ++ } ++ ++ node = root->rnode; ++ height = root->height; ++ shift = (height - 1) * root->shift; ++ ++ do { ++ offset = (index >> shift) & root->mask; ++ if (!node->stores[offset]) { ++ if (!(tmp = root->alloc())) ++ return NULL; ++ ++ tmp->height = shift / root->shift; ++ node->stores[offset] = tmp; ++ tmp->parent = node; ++ node->count++; ++ node = tmp; ++ } else { ++ node = node->stores[offset]; ++ } ++ ++ shift -= root->shift; ++ } while (shift > 0); ++ ++ BUG_ON(node->height != 1); ++ offset = index & root->mask; ++ if (node->stores[offset]) { ++ return node->stores[offset]; ++ } else if (item_alloc) { ++ if (!(item = item_alloc())) ++ return NULL; ++ ++ node->stores[offset] = item; ++ ++ /* ++ * NOTE: we do NOT call root->assign here, since this item is ++ * newly created by us having no meaning. Caller can call this ++ * if it's necessary to do so. ++ */ ++ ++ node->count++; ++ root->num++; ++ ++ while (sradix_node_full(root, node)) { ++ node = node->parent; ++ if (!node) ++ break; ++ ++ node->fulls++; ++ } ++ ++ if (unlikely(!node)) { ++ /* All nodes are full */ ++ root->min = 1 << (root->height * root->shift); ++ } else { ++ if (root->min == index) { ++ root->min |= (1UL << (node->height - 1)) - 1; ++ root->min++; ++ root->enter_node = node; ++ } ++ } ++ ++ return item; ++ } else { ++ return NULL; ++ } ++ ++} ++ ++int sradix_tree_delete(struct sradix_tree_root *root, unsigned long index) ++{ ++ unsigned int height, offset; ++ struct sradix_tree_node *node; ++ int shift; ++ ++ node = root->rnode; ++ if (node == NULL || (index >> (root->shift * root->height))) ++ return -ENOENT; ++ ++ height = root->height; ++ shift = (height - 1) * root->shift; ++ ++ do { ++ offset = (index >> shift) & root->mask; ++ node = node->stores[offset]; ++ if (!node) ++ return -ENOENT; ++ ++ shift -= root->shift; ++ } while (shift > 0); ++ ++ offset = index & root->mask; ++ if (!node->stores[offset]) ++ return -ENOENT; ++ ++ sradix_tree_delete_from_leaf(root, node, index); ++ ++ return 0; ++} +diff --git a/mm/Kconfig b/mm/Kconfig +index 9b8fccb..d8a2f50 100644 +--- a/mm/Kconfig ++++ b/mm/Kconfig +@@ -340,6 +340,32 @@ config KSM + See Documentation/vm/ksm.txt for more information: KSM is inactive + until a program has madvised that an area is MADV_MERGEABLE, and + root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). ++choice ++ prompt "Choose UKSM/KSM strategy" ++ default UKSM ++ depends on KSM ++ help ++ This option allows to select a UKSM/KSM stragety. ++ ++config UKSM ++ bool "Ultra-KSM for page merging" ++ depends on KSM ++ help ++ UKSM is inspired by the Linux kernel project \u2014 KSM(Kernel Same ++ page Merging), but with a fundamentally rewritten core algorithm. With ++ an advanced algorithm, UKSM now can transparently scans all anonymously ++ mapped user space applications with an significantly improved scan speed ++ and CPU efficiency. Since KVM is friendly to KSM, KVM can also benefit from ++ UKSM. Now UKSM has its first stable release and first real world enterprise user. ++ For more information, please goto its project page. ++ (www.kerneldedup.org) ++ ++config KSM_LEGACY ++ bool "Legacy KSM implementation" ++ depends on KSM ++ help ++ The legacy KSM implementation from Redhat. ++endchoice + + config DEFAULT_MMAP_MIN_ADDR + int "Low address space to protect from user allocation" +diff --git a/mm/Makefile b/mm/Makefile +index 295bd7a..a47ed9b 100644 +--- a/mm/Makefile ++++ b/mm/Makefile +@@ -63,7 +63,8 @@ obj-$(CONFIG_SPARSEMEM) += sparse.o + obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o + obj-$(CONFIG_SLOB) += slob.o + obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o +-obj-$(CONFIG_KSM) += ksm.o ++obj-$(CONFIG_KSM_LEGACY) += ksm.o ++obj-$(CONFIG_UKSM) += uksm.o + obj-$(CONFIG_PAGE_POISONING) += page_poison.o + obj-$(CONFIG_SLAB) += slab.o + obj-$(CONFIG_SLUB) += slub.o +diff --git a/mm/memory.c b/mm/memory.c +index 6bf2b47..ee62eea 100644 +--- a/mm/memory.c ++++ b/mm/memory.c +@@ -124,6 +124,25 @@ unsigned long highest_memmap_pfn __read_mostly; + + EXPORT_SYMBOL(zero_pfn); + ++#ifdef CONFIG_UKSM ++unsigned long uksm_zero_pfn __read_mostly; ++EXPORT_SYMBOL_GPL(uksm_zero_pfn); ++struct page *empty_uksm_zero_page; ++ ++static int __init setup_uksm_zero_page(void) ++{ ++ empty_uksm_zero_page = alloc_pages(__GFP_ZERO & ~__GFP_MOVABLE, 0); ++ if (!empty_uksm_zero_page) ++ panic("Oh boy, that early out of memory?"); ++ ++ SetPageReserved(empty_uksm_zero_page); ++ uksm_zero_pfn = page_to_pfn(empty_uksm_zero_page); ++ ++ return 0; ++} ++core_initcall(setup_uksm_zero_page); ++#endif ++ + /* + * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() + */ +@@ -135,6 +154,7 @@ static int __init init_zero_pfn(void) + core_initcall(init_zero_pfn); + + ++ + #if defined(SPLIT_RSS_COUNTING) + + void sync_mm_rss(struct mm_struct *mm) +@@ -916,6 +936,11 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, + get_page(page); + page_dup_rmap(page, false); + rss[mm_counter(page)]++; ++ ++ /* Should return NULL in vm_normal_page() */ ++ uksm_bugon_zeropage(pte); ++ } else { ++ uksm_map_zero_page(pte); + } + + out_set_pte: +@@ -1150,8 +1175,10 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb, + ptent = ptep_get_and_clear_full(mm, addr, pte, + tlb->fullmm); + tlb_remove_tlb_entry(tlb, pte, addr); +- if (unlikely(!page)) ++ if (unlikely(!page)) { ++ uksm_unmap_zero_page(ptent); + continue; ++ } + + if (!PageAnon(page)) { + if (pte_dirty(ptent)) { +@@ -2010,8 +2037,10 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo + clear_page(kaddr); + kunmap_atomic(kaddr); + flush_dcache_page(dst); +- } else ++ } else { + copy_user_highpage(dst, src, va, vma); ++ uksm_cow_page(vma, src); ++ } + } + + static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) +@@ -2160,6 +2189,7 @@ static int wp_page_copy(struct vm_fault *vmf) + vmf->address); + if (!new_page) + goto oom; ++ uksm_cow_pte(vma, vmf->orig_pte); + } else { + new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, + vmf->address); +@@ -2186,7 +2216,9 @@ static int wp_page_copy(struct vm_fault *vmf) + mm_counter_file(old_page)); + inc_mm_counter_fast(mm, MM_ANONPAGES); + } ++ uksm_bugon_zeropage(vmf->orig_pte); + } else { ++ uksm_unmap_zero_page(vmf->orig_pte); + inc_mm_counter_fast(mm, MM_ANONPAGES); + } + flush_cache_page(vma, vmf->address, pte_pfn(vmf->orig_pte)); +diff --git a/mm/mmap.c b/mm/mmap.c +index dc4291d..a96fac9 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -44,6 +44,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -172,6 +173,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) + if (vma->vm_file) + fput(vma->vm_file); + mpol_put(vma_policy(vma)); ++ uksm_remove_vma(vma); + kmem_cache_free(vm_area_cachep, vma); + return next; + } +@@ -675,9 +677,16 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, + long adjust_next = 0; + int remove_next = 0; + ++/* ++ * to avoid deadlock, ksm_remove_vma must be done before any spin_lock is ++ * acquired ++ */ ++ uksm_remove_vma(vma); ++ + if (next && !insert) { + struct vm_area_struct *exporter = NULL, *importer = NULL; + ++ uksm_remove_vma(next); + if (end >= next->vm_end) { + /* + * vma expands, overlapping all the next, and +@@ -810,6 +819,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, + end_changed = true; + } + vma->vm_pgoff = pgoff; ++ + if (adjust_next) { + next->vm_start += adjust_next << PAGE_SHIFT; + next->vm_pgoff += adjust_next; +@@ -915,6 +925,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, + if (remove_next == 2) { + remove_next = 1; + end = next->vm_end; ++ uksm_remove_vma(next); + goto again; + } + else if (next) +@@ -941,10 +952,14 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start, + */ + VM_WARN_ON(mm->highest_vm_end != end); + } ++ } else { ++ if (next && !insert) ++ uksm_vma_add_new(next); + } + if (insert && file) + uprobe_mmap(insert); + ++ uksm_vma_add_new(vma); + validate_mm(mm); + + return 0; +@@ -1360,6 +1375,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr, + vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | + mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; + ++ /* If uksm is enabled, we add VM_MERGABLE to new VMAs. */ ++ uksm_vm_flags_mod(&vm_flags); ++ + if (flags & MAP_LOCKED) + if (!can_do_mlock()) + return -EPERM; +@@ -1698,6 +1716,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, + allow_write_access(file); + } + file = vma->vm_file; ++ uksm_vma_add_new(vma); + out: + perf_event_mmap(vma); + +@@ -1739,6 +1758,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, + if (vm_flags & VM_DENYWRITE) + allow_write_access(file); + free_vma: ++ uksm_remove_vma(vma); + kmem_cache_free(vm_area_cachep, vma); + unacct_error: + if (charged) +@@ -2544,6 +2564,8 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, + else + err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); + ++ uksm_vma_add_new(new); ++ + /* Success. */ + if (!err) + return 0; +@@ -2822,6 +2844,7 @@ static int do_brk(unsigned long addr, unsigned long request) + return 0; + + flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; ++ uksm_vm_flags_mod(&flags); + + error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); + if (offset_in_page(error)) +@@ -2879,6 +2902,7 @@ static int do_brk(unsigned long addr, unsigned long request) + vma->vm_flags = flags; + vma->vm_page_prot = vm_get_page_prot(flags); + vma_link(mm, vma, prev, rb_link, rb_parent); ++ uksm_vma_add_new(vma); + out: + perf_event_mmap(vma); + mm->total_vm += len >> PAGE_SHIFT; +@@ -2917,6 +2941,12 @@ void exit_mmap(struct mm_struct *mm) + /* mm's last user has gone, and its about to be pulled down */ + mmu_notifier_release(mm); + ++ /* ++ * Taking write lock on mmap_sem does not harm others, ++ * but it's crucial for uksm to avoid races. ++ */ ++ down_write(&mm->mmap_sem); ++ + if (mm->locked_vm) { + vma = mm->mmap; + while (vma) { +@@ -2952,6 +2982,11 @@ void exit_mmap(struct mm_struct *mm) + vma = remove_vma(vma); + } + vm_unacct_memory(nr_accounted); ++ ++ mm->mmap = NULL; ++ mm->mm_rb = RB_ROOT; ++ vmacache_invalidate(mm); ++ up_write(&mm->mmap_sem); + } + + /* Insert vm structure into process list sorted by address +@@ -3061,6 +3096,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, + new_vma->vm_ops->open(new_vma); + vma_link(mm, new_vma, prev, rb_link, rb_parent); + *need_rmap_locks = false; ++ uksm_vma_add_new(new_vma); + } + return new_vma; + +@@ -3208,6 +3244,7 @@ static struct vm_area_struct *__install_special_mapping( + vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); + + perf_event_mmap(vma); ++ uksm_vma_add_new(vma); + + return vma; + +diff --git a/mm/rmap.c b/mm/rmap.c +index 91619fd..3619420 100644 +--- a/mm/rmap.c ++++ b/mm/rmap.c +@@ -1109,9 +1109,9 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) + + /** + * __page_set_anon_rmap - set up new anonymous rmap +- * @page: Page to add to rmap ++ * @page: Page to add to rmap + * @vma: VM area to add page to. +- * @address: User virtual address of the mapping ++ * @address: User virtual address of the mapping + * @exclusive: the page is exclusively owned by the current process + */ + static void __page_set_anon_rmap(struct page *page, +diff --git a/mm/uksm.c b/mm/uksm.c +new file mode 100644 +index 0000000..d4596e1 +--- /dev/null ++++ b/mm/uksm.c +@@ -0,0 +1,5580 @@ ++/* ++ * Ultra KSM. Copyright (C) 2011-2012 Nai Xia ++ * ++ * This is an improvement upon KSM. Some basic data structures and routines ++ * are borrowed from ksm.c . ++ * ++ * Its new features: ++ * 1. Full system scan: ++ * It automatically scans all user processes' anonymous VMAs. Kernel-user ++ * interaction to submit a memory area to KSM is no longer needed. ++ * ++ * 2. Rich area detection: ++ * It automatically detects rich areas containing abundant duplicated ++ * pages based. Rich areas are given a full scan speed. Poor areas are ++ * sampled at a reasonable speed with very low CPU consumption. ++ * ++ * 3. Ultra Per-page scan speed improvement: ++ * A new hash algorithm is proposed. As a result, on a machine with ++ * Core(TM)2 Quad Q9300 CPU in 32-bit mode and 800MHZ DDR2 main memory, it ++ * can scan memory areas that does not contain duplicated pages at speed of ++ * 627MB/sec ~ 2445MB/sec and can merge duplicated areas at speed of ++ * 477MB/sec ~ 923MB/sec. ++ * ++ * 4. Thrashing area avoidance: ++ * Thrashing area(an VMA that has frequent Ksm page break-out) can be ++ * filtered out. My benchmark shows it's more efficient than KSM's per-page ++ * hash value based volatile page detection. ++ * ++ * ++ * 5. Misc changes upon KSM: ++ * * It has a fully x86-opitmized memcmp dedicated for 4-byte-aligned page ++ * comparison. It's much faster than default C version on x86. ++ * * rmap_item now has an struct *page member to loosely cache a ++ * address-->page mapping, which reduces too much time-costly ++ * follow_page(). ++ * * The VMA creation/exit procedures are hooked to let the Ultra KSM know. ++ * * try_to_merge_two_pages() now can revert a pte if it fails. No break_ ++ * ksm is needed for this case. ++ * ++ * 6. Full Zero Page consideration(contributed by Figo Zhang) ++ * Now uksmd consider full zero pages as special pages and merge them to an ++ * special unswappable uksm zero page. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include "internal.h" ++ ++#ifdef CONFIG_X86 ++#undef memcmp ++ ++#ifdef CONFIG_X86_32 ++#define memcmp memcmpx86_32 ++/* ++ * Compare 4-byte-aligned address s1 and s2, with length n ++ */ ++int memcmpx86_32(void *s1, void *s2, size_t n) ++{ ++ size_t num = n / 4; ++ register int res; ++ ++ __asm__ __volatile__ ++ ( ++ "testl %3,%3\n\t" ++ "repe; cmpsd\n\t" ++ "je 1f\n\t" ++ "sbbl %0,%0\n\t" ++ "orl $1,%0\n" ++ "1:" ++ : "=&a" (res), "+&S" (s1), "+&D" (s2), "+&c" (num) ++ : "0" (0) ++ : "cc"); ++ ++ return res; ++} ++ ++/* ++ * Check the page is all zero ? ++ */ ++static int is_full_zero(const void *s1, size_t len) ++{ ++ unsigned char same; ++ ++ len /= 4; ++ ++ __asm__ __volatile__ ++ ("repe; scasl;" ++ "sete %0" ++ : "=qm" (same), "+D" (s1), "+c" (len) ++ : "a" (0) ++ : "cc"); ++ ++ return same; ++} ++ ++ ++#elif defined(CONFIG_X86_64) ++#define memcmp memcmpx86_64 ++/* ++ * Compare 8-byte-aligned address s1 and s2, with length n ++ */ ++int memcmpx86_64(void *s1, void *s2, size_t n) ++{ ++ size_t num = n / 8; ++ register int res; ++ ++ __asm__ __volatile__ ++ ( ++ "testq %q3,%q3\n\t" ++ "repe; cmpsq\n\t" ++ "je 1f\n\t" ++ "sbbq %q0,%q0\n\t" ++ "orq $1,%q0\n" ++ "1:" ++ : "=&a" (res), "+&S" (s1), "+&D" (s2), "+&c" (num) ++ : "0" (0) ++ : "cc"); ++ ++ return res; ++} ++ ++static int is_full_zero(const void *s1, size_t len) ++{ ++ unsigned char same; ++ ++ len /= 8; ++ ++ __asm__ __volatile__ ++ ("repe; scasq;" ++ "sete %0" ++ : "=qm" (same), "+D" (s1), "+c" (len) ++ : "a" (0) ++ : "cc"); ++ ++ return same; ++} ++ ++#endif ++#else ++static int is_full_zero(const void *s1, size_t len) ++{ ++ unsigned long *src = s1; ++ int i; ++ ++ len /= sizeof(*src); ++ ++ for (i = 0; i < len; i++) { ++ if (src[i]) ++ return 0; ++ } ++ ++ return 1; ++} ++#endif ++ ++#define UKSM_RUNG_ROUND_FINISHED (1 << 0) ++#define TIME_RATIO_SCALE 10000 ++ ++#define SLOT_TREE_NODE_SHIFT 8 ++#define SLOT_TREE_NODE_STORE_SIZE (1UL << SLOT_TREE_NODE_SHIFT) ++struct slot_tree_node { ++ unsigned long size; ++ struct sradix_tree_node snode; ++ void *stores[SLOT_TREE_NODE_STORE_SIZE]; ++}; ++ ++static struct kmem_cache *slot_tree_node_cachep; ++ ++static struct sradix_tree_node *slot_tree_node_alloc(void) ++{ ++ struct slot_tree_node *p; ++ p = kmem_cache_zalloc(slot_tree_node_cachep, GFP_KERNEL | ++ __GFP_NORETRY | __GFP_NOWARN); ++ if (!p) ++ return NULL; ++ ++ return &p->snode; ++} ++ ++static void slot_tree_node_free(struct sradix_tree_node *node) ++{ ++ struct slot_tree_node *p; ++ ++ p = container_of(node, struct slot_tree_node, snode); ++ kmem_cache_free(slot_tree_node_cachep, p); ++} ++ ++static void slot_tree_node_extend(struct sradix_tree_node *parent, ++ struct sradix_tree_node *child) ++{ ++ struct slot_tree_node *p, *c; ++ ++ p = container_of(parent, struct slot_tree_node, snode); ++ c = container_of(child, struct slot_tree_node, snode); ++ ++ p->size += c->size; ++} ++ ++void slot_tree_node_assign(struct sradix_tree_node *node, ++ unsigned index, void *item) ++{ ++ struct vma_slot *slot = item; ++ struct slot_tree_node *cur; ++ ++ slot->snode = node; ++ slot->sindex = index; ++ ++ while (node) { ++ cur = container_of(node, struct slot_tree_node, snode); ++ cur->size += slot->pages; ++ node = node->parent; ++ } ++} ++ ++void slot_tree_node_rm(struct sradix_tree_node *node, unsigned offset) ++{ ++ struct vma_slot *slot; ++ struct slot_tree_node *cur; ++ unsigned long pages; ++ ++ if (node->height == 1) { ++ slot = node->stores[offset]; ++ pages = slot->pages; ++ } else { ++ cur = container_of(node->stores[offset], ++ struct slot_tree_node, snode); ++ pages = cur->size; ++ } ++ ++ while (node) { ++ cur = container_of(node, struct slot_tree_node, snode); ++ cur->size -= pages; ++ node = node->parent; ++ } ++} ++ ++unsigned long slot_iter_index; ++int slot_iter(void *item, unsigned long height) ++{ ++ struct slot_tree_node *node; ++ struct vma_slot *slot; ++ ++ if (height == 1) { ++ slot = item; ++ if (slot_iter_index < slot->pages) { ++ /*in this one*/ ++ return 1; ++ } else { ++ slot_iter_index -= slot->pages; ++ return 0; ++ } ++ ++ } else { ++ node = container_of(item, struct slot_tree_node, snode); ++ if (slot_iter_index < node->size) { ++ /*in this one*/ ++ return 1; ++ } else { ++ slot_iter_index -= node->size; ++ return 0; ++ } ++ } ++} ++ ++ ++static inline void slot_tree_init_root(struct sradix_tree_root *root) ++{ ++ init_sradix_tree_root(root, SLOT_TREE_NODE_SHIFT); ++ root->alloc = slot_tree_node_alloc; ++ root->free = slot_tree_node_free; ++ root->extend = slot_tree_node_extend; ++ root->assign = slot_tree_node_assign; ++ root->rm = slot_tree_node_rm; ++} ++ ++void slot_tree_init(void) ++{ ++ slot_tree_node_cachep = kmem_cache_create("slot_tree_node", ++ sizeof(struct slot_tree_node), 0, ++ SLAB_PANIC | SLAB_RECLAIM_ACCOUNT, ++ NULL); ++} ++ ++ ++/* Each rung of this ladder is a list of VMAs having a same scan ratio */ ++struct scan_rung { ++ //struct list_head scanned_list; ++ struct sradix_tree_root vma_root; ++ struct sradix_tree_root vma_root2; ++ ++ struct vma_slot *current_scan; ++ unsigned long current_offset; ++ ++ /* ++ * The initial value for current_offset, it should loop over ++ * [0~ step - 1] to let all slot have its chance to be scanned. ++ */ ++ unsigned long offset_init; ++ unsigned long step; /* dynamic step for current_offset */ ++ unsigned int flags; ++ unsigned long pages_to_scan; ++ //unsigned long fully_scanned_slots; ++ /* ++ * a little bit tricky - if cpu_time_ratio > 0, then the value is the ++ * the cpu time ratio it can spend in rung_i for every scan ++ * period. if < 0, then it is the cpu time ratio relative to the ++ * max cpu percentage user specified. Both in unit of ++ * 1/TIME_RATIO_SCALE ++ */ ++ int cpu_ratio; ++ ++ /* ++ * How long it will take for all slots in this rung to be fully ++ * scanned? If it's zero, we don't care about the cover time: ++ * it's fully scanned. ++ */ ++ unsigned int cover_msecs; ++ //unsigned long vma_num; ++ //unsigned long pages; /* Sum of all slot's pages in rung */ ++}; ++ ++/** ++ * node of either the stable or unstale rbtree ++ * ++ */ ++struct tree_node { ++ struct rb_node node; /* link in the main (un)stable rbtree */ ++ struct rb_root sub_root; /* rb_root for sublevel collision rbtree */ ++ u32 hash; ++ unsigned long count; /* TODO: merged with sub_root */ ++ struct list_head all_list; /* all tree nodes in stable/unstable tree */ ++}; ++ ++/** ++ * struct stable_node - node of the stable rbtree ++ * @node: rb node of this ksm page in the stable tree ++ * @hlist: hlist head of rmap_items using this ksm page ++ * @kpfn: page frame number of this ksm page ++ */ ++struct stable_node { ++ struct rb_node node; /* link in sub-rbtree */ ++ struct tree_node *tree_node; /* it's tree node root in stable tree, NULL if it's in hell list */ ++ struct hlist_head hlist; ++ unsigned long kpfn; ++ u32 hash_max; /* if ==0 then it's not been calculated yet */ ++ struct list_head all_list; /* in a list for all stable nodes */ ++}; ++ ++/** ++ * struct node_vma - group rmap_items linked in a same stable ++ * node together. ++ */ ++struct node_vma { ++ union { ++ struct vma_slot *slot; ++ unsigned long key; /* slot is used as key sorted on hlist */ ++ }; ++ struct hlist_node hlist; ++ struct hlist_head rmap_hlist; ++ struct stable_node *head; ++}; ++ ++/** ++ * struct rmap_item - reverse mapping item for virtual addresses ++ * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list ++ * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree ++ * @mm: the memory structure this rmap_item is pointing into ++ * @address: the virtual address this rmap_item tracks (+ flags in low bits) ++ * @node: rb node of this rmap_item in the unstable tree ++ * @head: pointer to stable_node heading this list in the stable tree ++ * @hlist: link into hlist of rmap_items hanging off that stable_node ++ */ ++struct rmap_item { ++ struct vma_slot *slot; ++ struct page *page; ++ unsigned long address; /* + low bits used for flags below */ ++ unsigned long hash_round; ++ unsigned long entry_index; ++ union { ++ struct {/* when in unstable tree */ ++ struct rb_node node; ++ struct tree_node *tree_node; ++ u32 hash_max; ++ }; ++ struct { /* when in stable tree */ ++ struct node_vma *head; ++ struct hlist_node hlist; ++ struct anon_vma *anon_vma; ++ }; ++ }; ++} __attribute__((aligned(4))); ++ ++struct rmap_list_entry { ++ union { ++ struct rmap_item *item; ++ unsigned long addr; ++ }; ++ /* lowest bit is used for is_addr tag */ ++} __attribute__((aligned(4))); /* 4 aligned to fit in to pages*/ ++ ++ ++/* Basic data structure definition ends */ ++ ++ ++/* ++ * Flags for rmap_item to judge if it's listed in the stable/unstable tree. ++ * The flags use the low bits of rmap_item.address ++ */ ++#define UNSTABLE_FLAG 0x1 ++#define STABLE_FLAG 0x2 ++#define get_rmap_addr(x) ((x)->address & PAGE_MASK) ++ ++/* ++ * rmap_list_entry helpers ++ */ ++#define IS_ADDR_FLAG 1 ++#define is_addr(ptr) ((unsigned long)(ptr) & IS_ADDR_FLAG) ++#define set_is_addr(ptr) ((ptr) |= IS_ADDR_FLAG) ++#define get_clean_addr(ptr) (((ptr) & ~(__typeof__(ptr))IS_ADDR_FLAG)) ++ ++ ++/* ++ * High speed caches for frequently allocated and freed structs ++ */ ++static struct kmem_cache *rmap_item_cache; ++static struct kmem_cache *stable_node_cache; ++static struct kmem_cache *node_vma_cache; ++static struct kmem_cache *vma_slot_cache; ++static struct kmem_cache *tree_node_cache; ++#define UKSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("uksm_"#__struct,\ ++ sizeof(struct __struct), __alignof__(struct __struct),\ ++ (__flags), NULL) ++ ++/* Array of all scan_rung, uksm_scan_ladder[0] having the minimum scan ratio */ ++#define SCAN_LADDER_SIZE 4 ++static struct scan_rung uksm_scan_ladder[SCAN_LADDER_SIZE]; ++ ++/* The evaluation rounds uksmd has finished */ ++static unsigned long long uksm_eval_round = 1; ++ ++/* ++ * we add 1 to this var when we consider we should rebuild the whole ++ * unstable tree. ++ */ ++static unsigned long uksm_hash_round = 1; ++ ++/* ++ * How many times the whole memory is scanned. ++ */ ++static unsigned long long fully_scanned_round = 1; ++ ++/* The total number of virtual pages of all vma slots */ ++static u64 uksm_pages_total; ++ ++/* The number of pages has been scanned since the start up */ ++static u64 uksm_pages_scanned; ++ ++static u64 scanned_virtual_pages; ++ ++/* The number of pages has been scanned since last encode_benefit call */ ++static u64 uksm_pages_scanned_last; ++ ++/* If the scanned number is tooo large, we encode it here */ ++static u64 pages_scanned_stored; ++ ++static unsigned long pages_scanned_base; ++ ++/* The number of nodes in the stable tree */ ++static unsigned long uksm_pages_shared; ++ ++/* The number of page slots additionally sharing those nodes */ ++static unsigned long uksm_pages_sharing; ++ ++/* The number of nodes in the unstable tree */ ++static unsigned long uksm_pages_unshared; ++ ++/* ++ * Milliseconds ksmd should sleep between scans, ++ * >= 100ms to be consistent with ++ * scan_time_to_sleep_msec() ++ */ ++static unsigned int uksm_sleep_jiffies; ++ ++/* The real value for the uksmd next sleep */ ++static unsigned int uksm_sleep_real; ++ ++/* Saved value for user input uksm_sleep_jiffies when it's enlarged */ ++static unsigned int uksm_sleep_saved; ++ ++/* Max percentage of cpu utilization ksmd can take to scan in one batch */ ++static unsigned int uksm_max_cpu_percentage; ++ ++static int uksm_cpu_governor; ++ ++static char *uksm_cpu_governor_str[4] = { "full", "medium", "low", "quiet" }; ++ ++struct uksm_cpu_preset_s { ++ int cpu_ratio[SCAN_LADDER_SIZE]; ++ unsigned int cover_msecs[SCAN_LADDER_SIZE]; ++ unsigned int max_cpu; /* percentage */ ++}; ++ ++struct uksm_cpu_preset_s uksm_cpu_preset[4] = { ++ { {20, 40, -2500, -10000}, {1000, 500, 200, 50}, 95}, ++ { {20, 30, -2500, -10000}, {1000, 500, 400, 100}, 50}, ++ { {10, 20, -5000, -10000}, {1500, 1000, 1000, 250}, 20}, ++ { {10, 20, 40, 75}, {2000, 1000, 1000, 1000}, 1}, ++}; ++ ++/* The default value for uksm_ema_page_time if it's not initialized */ ++#define UKSM_PAGE_TIME_DEFAULT 500 ++ ++/*cost to scan one page by expotional moving average in nsecs */ ++static unsigned long uksm_ema_page_time = UKSM_PAGE_TIME_DEFAULT; ++ ++/* The expotional moving average alpha weight, in percentage. */ ++#define EMA_ALPHA 20 ++ ++/* ++ * The threshold used to filter out thrashing areas, ++ * If it == 0, filtering is disabled, otherwise it's the percentage up-bound ++ * of the thrashing ratio of all areas. Any area with a bigger thrashing ratio ++ * will be considered as having a zero duplication ratio. ++ */ ++static unsigned int uksm_thrash_threshold = 50; ++ ++/* How much dedup ratio is considered to be abundant*/ ++static unsigned int uksm_abundant_threshold = 10; ++ ++/* All slots having merged pages in this eval round. */ ++struct list_head vma_slot_dedup = LIST_HEAD_INIT(vma_slot_dedup); ++ ++/* How many times the ksmd has slept since startup */ ++static unsigned long long uksm_sleep_times; ++ ++#define UKSM_RUN_STOP 0 ++#define UKSM_RUN_MERGE 1 ++static unsigned int uksm_run = 1; ++ ++static DECLARE_WAIT_QUEUE_HEAD(uksm_thread_wait); ++static DEFINE_MUTEX(uksm_thread_mutex); ++ ++/* ++ * List vma_slot_new is for newly created vma_slot waiting to be added by ++ * ksmd. If one cannot be added(e.g. due to it's too small), it's moved to ++ * vma_slot_noadd. vma_slot_del is the list for vma_slot whose corresponding ++ * VMA has been removed/freed. ++ */ ++struct list_head vma_slot_new = LIST_HEAD_INIT(vma_slot_new); ++struct list_head vma_slot_noadd = LIST_HEAD_INIT(vma_slot_noadd); ++struct list_head vma_slot_del = LIST_HEAD_INIT(vma_slot_del); ++static DEFINE_SPINLOCK(vma_slot_list_lock); ++ ++/* The unstable tree heads */ ++static struct rb_root root_unstable_tree = RB_ROOT; ++ ++/* ++ * All tree_nodes are in a list to be freed at once when unstable tree is ++ * freed after each scan round. ++ */ ++static struct list_head unstable_tree_node_list = ++ LIST_HEAD_INIT(unstable_tree_node_list); ++ ++/* List contains all stable nodes */ ++static struct list_head stable_node_list = LIST_HEAD_INIT(stable_node_list); ++ ++/* ++ * When the hash strength is changed, the stable tree must be delta_hashed and ++ * re-structured. We use two set of below structs to speed up the ++ * re-structuring of stable tree. ++ */ ++static struct list_head ++stable_tree_node_list[2] = {LIST_HEAD_INIT(stable_tree_node_list[0]), ++ LIST_HEAD_INIT(stable_tree_node_list[1])}; ++ ++static struct list_head *stable_tree_node_listp = &stable_tree_node_list[0]; ++static struct rb_root root_stable_tree[2] = {RB_ROOT, RB_ROOT}; ++static struct rb_root *root_stable_treep = &root_stable_tree[0]; ++static unsigned long stable_tree_index; ++ ++/* The hash strength needed to hash a full page */ ++#define HASH_STRENGTH_FULL (PAGE_SIZE / sizeof(u32)) ++ ++/* The hash strength needed for loop-back hashing */ ++#define HASH_STRENGTH_MAX (HASH_STRENGTH_FULL + 10) ++ ++/* The random offsets in a page */ ++static u32 *random_nums; ++ ++/* The hash strength */ ++static unsigned long hash_strength = HASH_STRENGTH_FULL >> 4; ++ ++/* The delta value each time the hash strength increases or decreases */ ++static unsigned long hash_strength_delta; ++#define HASH_STRENGTH_DELTA_MAX 5 ++ ++/* The time we have saved due to random_sample_hash */ ++static u64 rshash_pos; ++ ++/* The time we have wasted due to hash collision */ ++static u64 rshash_neg; ++ ++struct uksm_benefit { ++ u64 pos; ++ u64 neg; ++ u64 scanned; ++ unsigned long base; ++} benefit; ++ ++/* ++ * The relative cost of memcmp, compared to 1 time unit of random sample ++ * hash, this value is tested when ksm module is initialized ++ */ ++static unsigned long memcmp_cost; ++ ++static unsigned long rshash_neg_cont_zero; ++static unsigned long rshash_cont_obscure; ++ ++/* The possible states of hash strength adjustment heuristic */ ++enum rshash_states { ++ RSHASH_STILL, ++ RSHASH_TRYUP, ++ RSHASH_TRYDOWN, ++ RSHASH_NEW, ++ RSHASH_PRE_STILL, ++}; ++ ++/* The possible direction we are about to adjust hash strength */ ++enum rshash_direct { ++ GO_UP, ++ GO_DOWN, ++ OBSCURE, ++ STILL, ++}; ++ ++/* random sampling hash state machine */ ++static struct { ++ enum rshash_states state; ++ enum rshash_direct pre_direct; ++ u8 below_count; ++ /* Keep a lookup window of size 5, iff above_count/below_count > 3 ++ * in this window we stop trying. ++ */ ++ u8 lookup_window_index; ++ u64 stable_benefit; ++ unsigned long turn_point_down; ++ unsigned long turn_benefit_down; ++ unsigned long turn_point_up; ++ unsigned long turn_benefit_up; ++ unsigned long stable_point; ++} rshash_state; ++ ++/*zero page hash table, hash_strength [0 ~ HASH_STRENGTH_MAX]*/ ++static u32 *zero_hash_table; ++ ++static inline struct node_vma *alloc_node_vma(void) ++{ ++ struct node_vma *node_vma; ++ node_vma = kmem_cache_zalloc(node_vma_cache, GFP_KERNEL | ++ __GFP_NORETRY | __GFP_NOWARN); ++ if (node_vma) { ++ INIT_HLIST_HEAD(&node_vma->rmap_hlist); ++ INIT_HLIST_NODE(&node_vma->hlist); ++ } ++ return node_vma; ++} ++ ++static inline void free_node_vma(struct node_vma *node_vma) ++{ ++ kmem_cache_free(node_vma_cache, node_vma); ++} ++ ++ ++static inline struct vma_slot *alloc_vma_slot(void) ++{ ++ struct vma_slot *slot; ++ ++ /* ++ * In case ksm is not initialized by now. ++ * Oops, we need to consider the call site of uksm_init() in the future. ++ */ ++ if (!vma_slot_cache) ++ return NULL; ++ ++ slot = kmem_cache_zalloc(vma_slot_cache, GFP_KERNEL | ++ __GFP_NORETRY | __GFP_NOWARN); ++ if (slot) { ++ INIT_LIST_HEAD(&slot->slot_list); ++ INIT_LIST_HEAD(&slot->dedup_list); ++ slot->flags |= UKSM_SLOT_NEED_RERAND; ++ } ++ return slot; ++} ++ ++static inline void free_vma_slot(struct vma_slot *vma_slot) ++{ ++ kmem_cache_free(vma_slot_cache, vma_slot); ++} ++ ++ ++ ++static inline struct rmap_item *alloc_rmap_item(void) ++{ ++ struct rmap_item *rmap_item; ++ ++ rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | ++ __GFP_NORETRY | __GFP_NOWARN); ++ if (rmap_item) { ++ /* bug on lowest bit is not clear for flag use */ ++ BUG_ON(is_addr(rmap_item)); ++ } ++ return rmap_item; ++} ++ ++static inline void free_rmap_item(struct rmap_item *rmap_item) ++{ ++ rmap_item->slot = NULL; /* debug safety */ ++ kmem_cache_free(rmap_item_cache, rmap_item); ++} ++ ++static inline struct stable_node *alloc_stable_node(void) ++{ ++ struct stable_node *node; ++ node = kmem_cache_alloc(stable_node_cache, GFP_KERNEL | ++ __GFP_NORETRY | __GFP_NOWARN); ++ if (!node) ++ return NULL; ++ ++ INIT_HLIST_HEAD(&node->hlist); ++ list_add(&node->all_list, &stable_node_list); ++ return node; ++} ++ ++static inline void free_stable_node(struct stable_node *stable_node) ++{ ++ list_del(&stable_node->all_list); ++ kmem_cache_free(stable_node_cache, stable_node); ++} ++ ++static inline struct tree_node *alloc_tree_node(struct list_head *list) ++{ ++ struct tree_node *node; ++ node = kmem_cache_zalloc(tree_node_cache, GFP_KERNEL | ++ __GFP_NORETRY | __GFP_NOWARN); ++ if (!node) ++ return NULL; ++ ++ list_add(&node->all_list, list); ++ return node; ++} ++ ++static inline void free_tree_node(struct tree_node *node) ++{ ++ list_del(&node->all_list); ++ kmem_cache_free(tree_node_cache, node); ++} ++ ++static void uksm_drop_anon_vma(struct rmap_item *rmap_item) ++{ ++ struct anon_vma *anon_vma = rmap_item->anon_vma; ++ ++ put_anon_vma(anon_vma); ++} ++ ++ ++/** ++ * Remove a stable node from stable_tree, may unlink from its tree_node and ++ * may remove its parent tree_node if no other stable node is pending. ++ * ++ * @stable_node The node need to be removed ++ * @unlink_rb Will this node be unlinked from the rbtree? ++ * @remove_tree_ node Will its tree_node be removed if empty? ++ */ ++static void remove_node_from_stable_tree(struct stable_node *stable_node, ++ int unlink_rb, int remove_tree_node) ++{ ++ struct node_vma *node_vma; ++ struct rmap_item *rmap_item; ++ struct hlist_node *n; ++ ++ if (!hlist_empty(&stable_node->hlist)) { ++ hlist_for_each_entry_safe(node_vma, n, ++ &stable_node->hlist, hlist) { ++ hlist_for_each_entry(rmap_item, &node_vma->rmap_hlist, hlist) { ++ uksm_pages_sharing--; ++ ++ uksm_drop_anon_vma(rmap_item); ++ rmap_item->address &= PAGE_MASK; ++ } ++ free_node_vma(node_vma); ++ cond_resched(); ++ } ++ ++ /* the last one is counted as shared */ ++ uksm_pages_shared--; ++ uksm_pages_sharing++; ++ } ++ ++ if (stable_node->tree_node && unlink_rb) { ++ rb_erase(&stable_node->node, ++ &stable_node->tree_node->sub_root); ++ ++ if (RB_EMPTY_ROOT(&stable_node->tree_node->sub_root) && ++ remove_tree_node) { ++ rb_erase(&stable_node->tree_node->node, ++ root_stable_treep); ++ free_tree_node(stable_node->tree_node); ++ } else { ++ stable_node->tree_node->count--; ++ } ++ } ++ ++ free_stable_node(stable_node); ++} ++ ++ ++/* ++ * get_uksm_page: checks if the page indicated by the stable node ++ * is still its ksm page, despite having held no reference to it. ++ * In which case we can trust the content of the page, and it ++ * returns the gotten page; but if the page has now been zapped, ++ * remove the stale node from the stable tree and return NULL. ++ * ++ * You would expect the stable_node to hold a reference to the ksm page. ++ * But if it increments the page's count, swapping out has to wait for ++ * ksmd to come around again before it can free the page, which may take ++ * seconds or even minutes: much too unresponsive. So instead we use a ++ * "keyhole reference": access to the ksm page from the stable node peeps ++ * out through its keyhole to see if that page still holds the right key, ++ * pointing back to this stable node. This relies on freeing a PageAnon ++ * page to reset its page->mapping to NULL, and relies on no other use of ++ * a page to put something that might look like our key in page->mapping. ++ * ++ * include/linux/pagemap.h page_cache_get_speculative() is a good reference, ++ * but this is different - made simpler by uksm_thread_mutex being held, but ++ * interesting for assuming that no other use of the struct page could ever ++ * put our expected_mapping into page->mapping (or a field of the union which ++ * coincides with page->mapping). The RCU calls are not for KSM at all, but ++ * to keep the page_count protocol described with page_cache_get_speculative. ++ * ++ * Note: it is possible that get_uksm_page() will return NULL one moment, ++ * then page the next, if the page is in between page_freeze_refs() and ++ * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page ++ * is on its way to being freed; but it is an anomaly to bear in mind. ++ * ++ * @unlink_rb: if the removal of this node will firstly unlink from ++ * its rbtree. stable_node_reinsert will prevent this when restructuring the ++ * node from its old tree. ++ * ++ * @remove_tree_node: if this is the last one of its tree_node, will the ++ * tree_node be freed ? If we are inserting stable node, this tree_node may ++ * be reused, so don't free it. ++ */ ++static struct page *get_uksm_page(struct stable_node *stable_node, ++ int unlink_rb, int remove_tree_node) ++{ ++ struct page *page; ++ void *expected_mapping; ++ unsigned long kpfn; ++ ++ expected_mapping = (void *)((unsigned long)stable_node | ++ PAGE_MAPPING_KSM); ++again: ++ kpfn = READ_ONCE(stable_node->kpfn); ++ page = pfn_to_page(kpfn); ++ ++ /* ++ * page is computed from kpfn, so on most architectures reading ++ * page->mapping is naturally ordered after reading node->kpfn, ++ * but on Alpha we need to be more careful. ++ */ ++ smp_read_barrier_depends(); ++ ++ if (READ_ONCE(page->mapping) != expected_mapping) ++ goto stale; ++ ++ /* ++ * We cannot do anything with the page while its refcount is 0. ++ * Usually 0 means free, or tail of a higher-order page: in which ++ * case this node is no longer referenced, and should be freed; ++ * however, it might mean that the page is under page_freeze_refs(). ++ * The __remove_mapping() case is easy, again the node is now stale; ++ * but if page is swapcache in migrate_page_move_mapping(), it might ++ * still be our page, in which case it's essential to keep the node. ++ */ ++ while (!get_page_unless_zero(page)) { ++ /* ++ * Another check for page->mapping != expected_mapping would ++ * work here too. We have chosen the !PageSwapCache test to ++ * optimize the common case, when the page is or is about to ++ * be freed: PageSwapCache is cleared (under spin_lock_irq) ++ * in the freeze_refs section of __remove_mapping(); but Anon ++ * page->mapping reset to NULL later, in free_pages_prepare(). ++ */ ++ if (!PageSwapCache(page)) ++ goto stale; ++ cpu_relax(); ++ } ++ ++ if (READ_ONCE(page->mapping) != expected_mapping) { ++ put_page(page); ++ goto stale; ++ } ++ ++ lock_page(page); ++ if (READ_ONCE(page->mapping) != expected_mapping) { ++ unlock_page(page); ++ put_page(page); ++ goto stale; ++ } ++ unlock_page(page); ++ return page; ++stale: ++ /* ++ * We come here from above when page->mapping or !PageSwapCache ++ * suggests that the node is stale; but it might be under migration. ++ * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(), ++ * before checking whether node->kpfn has been changed. ++ */ ++ smp_rmb(); ++ if (stable_node->kpfn != kpfn) ++ goto again; ++ ++ remove_node_from_stable_tree(stable_node, unlink_rb, remove_tree_node); ++ ++ return NULL; ++} ++ ++/* ++ * Removing rmap_item from stable or unstable tree. ++ * This function will clean the information from the stable/unstable tree. ++ */ ++static inline void remove_rmap_item_from_tree(struct rmap_item *rmap_item) ++{ ++ if (rmap_item->address & STABLE_FLAG) { ++ struct stable_node *stable_node; ++ struct node_vma *node_vma; ++ struct page *page; ++ ++ node_vma = rmap_item->head; ++ stable_node = node_vma->head; ++ page = get_uksm_page(stable_node, 1, 1); ++ if (!page) ++ goto out; ++ ++ /* ++ * page lock is needed because it's racing with ++ * try_to_unmap_ksm(), etc. ++ */ ++ lock_page(page); ++ hlist_del(&rmap_item->hlist); ++ ++ if (hlist_empty(&node_vma->rmap_hlist)) { ++ hlist_del(&node_vma->hlist); ++ free_node_vma(node_vma); ++ } ++ unlock_page(page); ++ ++ put_page(page); ++ if (hlist_empty(&stable_node->hlist)) { ++ /* do NOT call remove_node_from_stable_tree() here, ++ * it's possible for a forked rmap_item not in ++ * stable tree while the in-tree rmap_items were ++ * deleted. ++ */ ++ uksm_pages_shared--; ++ } else ++ uksm_pages_sharing--; ++ ++ ++ uksm_drop_anon_vma(rmap_item); ++ } else if (rmap_item->address & UNSTABLE_FLAG) { ++ if (rmap_item->hash_round == uksm_hash_round) { ++ ++ rb_erase(&rmap_item->node, ++ &rmap_item->tree_node->sub_root); ++ if (RB_EMPTY_ROOT(&rmap_item->tree_node->sub_root)) { ++ rb_erase(&rmap_item->tree_node->node, ++ &root_unstable_tree); ++ ++ free_tree_node(rmap_item->tree_node); ++ } else ++ rmap_item->tree_node->count--; ++ } ++ uksm_pages_unshared--; ++ } ++ ++ rmap_item->address &= PAGE_MASK; ++ rmap_item->hash_max = 0; ++ ++out: ++ cond_resched(); /* we're called from many long loops */ ++} ++ ++static inline int slot_in_uksm(struct vma_slot *slot) ++{ ++ return list_empty(&slot->slot_list); ++} ++ ++/* ++ * Test if the mm is exiting ++ */ ++static inline bool uksm_test_exit(struct mm_struct *mm) ++{ ++ return atomic_read(&mm->mm_users) == 0; ++} ++ ++static inline unsigned long vma_pool_size(struct vma_slot *slot) ++{ ++ return round_up(sizeof(struct rmap_list_entry) * slot->pages, ++ PAGE_SIZE) >> PAGE_SHIFT; ++} ++ ++#define CAN_OVERFLOW_U64(x, delta) (U64_MAX - (x) < (delta)) ++ ++/* must be done with sem locked */ ++static int slot_pool_alloc(struct vma_slot *slot) ++{ ++ unsigned long pool_size; ++ ++ if (slot->rmap_list_pool) ++ return 0; ++ ++ pool_size = vma_pool_size(slot); ++ slot->rmap_list_pool = kzalloc(sizeof(struct page *) * ++ pool_size, GFP_KERNEL); ++ if (!slot->rmap_list_pool) ++ return -ENOMEM; ++ ++ slot->pool_counts = kzalloc(sizeof(unsigned int) * pool_size, ++ GFP_KERNEL); ++ if (!slot->pool_counts) { ++ kfree(slot->rmap_list_pool); ++ return -ENOMEM; ++ } ++ ++ slot->pool_size = pool_size; ++ BUG_ON(CAN_OVERFLOW_U64(uksm_pages_total, slot->pages)); ++ slot->flags |= UKSM_SLOT_IN_UKSM; ++ uksm_pages_total += slot->pages; ++ ++ return 0; ++} ++ ++/* ++ * Called after vma is unlinked from its mm ++ */ ++void uksm_remove_vma(struct vm_area_struct *vma) ++{ ++ struct vma_slot *slot; ++ ++ if (!vma->uksm_vma_slot) ++ return; ++ ++ spin_lock(&vma_slot_list_lock); ++ slot = vma->uksm_vma_slot; ++ if (!slot) ++ goto out; ++ ++ if (slot_in_uksm(slot)) { ++ /** ++ * This slot has been added by ksmd, so move to the del list ++ * waiting ksmd to free it. ++ */ ++ list_add_tail(&slot->slot_list, &vma_slot_del); ++ } else { ++ /** ++ * It's still on new list. It's ok to free slot directly. ++ */ ++ list_del(&slot->slot_list); ++ free_vma_slot(slot); ++ } ++out: ++ vma->uksm_vma_slot = NULL; ++ spin_unlock(&vma_slot_list_lock); ++} ++ ++/** ++ * Need to do two things: ++ * 1. check if slot was moved to del list ++ * 2. make sure the mmap_sem is manipulated under valid vma. ++ * ++ * My concern here is that in some cases, this may make ++ * vma_slot_list_lock() waiters to serialized further by some ++ * sem->wait_lock, can this really be expensive? ++ * ++ * ++ * @return ++ * 0: if successfully locked mmap_sem ++ * -ENOENT: this slot was moved to del list ++ * -EBUSY: vma lock failed ++ */ ++static int try_down_read_slot_mmap_sem(struct vma_slot *slot) ++{ ++ struct vm_area_struct *vma; ++ struct mm_struct *mm; ++ struct rw_semaphore *sem; ++ ++ spin_lock(&vma_slot_list_lock); ++ ++ /* the slot_list was removed and inited from new list, when it enters ++ * uksm_list. If now it's not empty, then it must be moved to del list ++ */ ++ if (!slot_in_uksm(slot)) { ++ spin_unlock(&vma_slot_list_lock); ++ return -ENOENT; ++ } ++ ++ BUG_ON(slot->pages != vma_pages(slot->vma)); ++ /* Ok, vma still valid */ ++ vma = slot->vma; ++ mm = vma->vm_mm; ++ sem = &mm->mmap_sem; ++ ++ if (uksm_test_exit(mm)) { ++ spin_unlock(&vma_slot_list_lock); ++ return -ENOENT; ++ } ++ ++ if (down_read_trylock(sem)) { ++ spin_unlock(&vma_slot_list_lock); ++ if (slot_pool_alloc(slot)) { ++ uksm_remove_vma(vma); ++ up_read(sem); ++ return -ENOENT; ++ } ++ return 0; ++ } ++ ++ spin_unlock(&vma_slot_list_lock); ++ return -EBUSY; ++} ++ ++static inline unsigned long ++vma_page_address(struct page *page, struct vm_area_struct *vma) ++{ ++ pgoff_t pgoff = page->index; ++ unsigned long address; ++ ++ address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); ++ if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { ++ /* page should be within @vma mapping range */ ++ return -EFAULT; ++ } ++ return address; ++} ++ ++ ++/* return 0 on success with the item's mmap_sem locked */ ++static inline int get_mergeable_page_lock_mmap(struct rmap_item *item) ++{ ++ struct mm_struct *mm; ++ struct vma_slot *slot = item->slot; ++ int err = -EINVAL; ++ ++ struct page *page; ++ ++ /* ++ * try_down_read_slot_mmap_sem() returns non-zero if the slot ++ * has been removed by uksm_remove_vma(). ++ */ ++ if (try_down_read_slot_mmap_sem(slot)) ++ return -EBUSY; ++ ++ mm = slot->vma->vm_mm; ++ ++ if (uksm_test_exit(mm)) ++ goto failout_up; ++ ++ page = item->page; ++ rcu_read_lock(); ++ if (!get_page_unless_zero(page)) { ++ rcu_read_unlock(); ++ goto failout_up; ++ } ++ ++ /* No need to consider huge page here. */ ++ if (item->slot->vma->anon_vma != page_anon_vma(page) || ++ vma_page_address(page, item->slot->vma) != get_rmap_addr(item)) { ++ /* ++ * TODO: ++ * should we release this item becase of its stale page ++ * mapping? ++ */ ++ put_page(page); ++ rcu_read_unlock(); ++ goto failout_up; ++ } ++ rcu_read_unlock(); ++ return 0; ++ ++failout_up: ++ up_read(&mm->mmap_sem); ++ return err; ++} ++ ++/* ++ * What kind of VMA is considered ? ++ */ ++static inline int vma_can_enter(struct vm_area_struct *vma) ++{ ++ return uksm_flags_can_scan(vma->vm_flags); ++} ++ ++/* ++ * Called whenever a fresh new vma is created A new vma_slot. ++ * is created and inserted into a global list Must be called. ++ * after vma is inserted to its mm . ++ */ ++void uksm_vma_add_new(struct vm_area_struct *vma) ++{ ++ struct vma_slot *slot; ++ ++ if (!vma_can_enter(vma)) { ++ vma->uksm_vma_slot = NULL; ++ return; ++ } ++ ++ slot = alloc_vma_slot(); ++ if (!slot) { ++ vma->uksm_vma_slot = NULL; ++ return; ++ } ++ ++ vma->uksm_vma_slot = slot; ++ vma->vm_flags |= VM_MERGEABLE; ++ slot->vma = vma; ++ slot->mm = vma->vm_mm; ++ slot->ctime_j = jiffies; ++ slot->pages = vma_pages(vma); ++ spin_lock(&vma_slot_list_lock); ++ list_add_tail(&slot->slot_list, &vma_slot_new); ++ spin_unlock(&vma_slot_list_lock); ++} ++ ++/* 32/3 < they < 32/2 */ ++#define shiftl 8 ++#define shiftr 12 ++ ++#define HASH_FROM_TO(from, to) \ ++for (index = from; index < to; index++) { \ ++ pos = random_nums[index]; \ ++ hash += key[pos]; \ ++ hash += (hash << shiftl); \ ++ hash ^= (hash >> shiftr); \ ++} ++ ++ ++#define HASH_FROM_DOWN_TO(from, to) \ ++for (index = from - 1; index >= to; index--) { \ ++ hash ^= (hash >> shiftr); \ ++ hash ^= (hash >> (shiftr*2)); \ ++ hash -= (hash << shiftl); \ ++ hash += (hash << (shiftl*2)); \ ++ pos = random_nums[index]; \ ++ hash -= key[pos]; \ ++} ++ ++/* ++ * The main random sample hash function. ++ */ ++static u32 random_sample_hash(void *addr, u32 hash_strength) ++{ ++ u32 hash = 0xdeadbeef; ++ int index, pos, loop = hash_strength; ++ u32 *key = (u32 *)addr; ++ ++ if (loop > HASH_STRENGTH_FULL) ++ loop = HASH_STRENGTH_FULL; ++ ++ HASH_FROM_TO(0, loop); ++ ++ if (hash_strength > HASH_STRENGTH_FULL) { ++ loop = hash_strength - HASH_STRENGTH_FULL; ++ HASH_FROM_TO(0, loop); ++ } ++ ++ return hash; ++} ++ ++ ++/** ++ * It's used when hash strength is adjusted ++ * ++ * @addr The page's virtual address ++ * @from The original hash strength ++ * @to The hash strength changed to ++ * @hash The hash value generated with "from" hash value ++ * ++ * return the hash value ++ */ ++static u32 delta_hash(void *addr, int from, int to, u32 hash) ++{ ++ u32 *key = (u32 *)addr; ++ int index, pos; /* make sure they are int type */ ++ ++ if (to > from) { ++ if (from >= HASH_STRENGTH_FULL) { ++ from -= HASH_STRENGTH_FULL; ++ to -= HASH_STRENGTH_FULL; ++ HASH_FROM_TO(from, to); ++ } else if (to <= HASH_STRENGTH_FULL) { ++ HASH_FROM_TO(from, to); ++ } else { ++ HASH_FROM_TO(from, HASH_STRENGTH_FULL); ++ HASH_FROM_TO(0, to - HASH_STRENGTH_FULL); ++ } ++ } else { ++ if (from <= HASH_STRENGTH_FULL) { ++ HASH_FROM_DOWN_TO(from, to); ++ } else if (to >= HASH_STRENGTH_FULL) { ++ from -= HASH_STRENGTH_FULL; ++ to -= HASH_STRENGTH_FULL; ++ HASH_FROM_DOWN_TO(from, to); ++ } else { ++ HASH_FROM_DOWN_TO(from - HASH_STRENGTH_FULL, 0); ++ HASH_FROM_DOWN_TO(HASH_STRENGTH_FULL, to); ++ } ++ } ++ ++ return hash; ++} ++ ++/** ++ * ++ * Called when: rshash_pos or rshash_neg is about to overflow or a scan round ++ * has finished. ++ * ++ * return 0 if no page has been scanned since last call, 1 otherwise. ++ */ ++static inline int encode_benefit(void) ++{ ++ u64 scanned_delta, pos_delta, neg_delta; ++ unsigned long base = benefit.base; ++ ++ scanned_delta = uksm_pages_scanned - uksm_pages_scanned_last; ++ ++ if (!scanned_delta) ++ return 0; ++ ++ scanned_delta >>= base; ++ pos_delta = rshash_pos >> base; ++ neg_delta = rshash_neg >> base; ++ ++ if (CAN_OVERFLOW_U64(benefit.pos, pos_delta) || ++ CAN_OVERFLOW_U64(benefit.neg, neg_delta) || ++ CAN_OVERFLOW_U64(benefit.scanned, scanned_delta)) { ++ benefit.scanned >>= 1; ++ benefit.neg >>= 1; ++ benefit.pos >>= 1; ++ benefit.base++; ++ scanned_delta >>= 1; ++ pos_delta >>= 1; ++ neg_delta >>= 1; ++ } ++ ++ benefit.pos += pos_delta; ++ benefit.neg += neg_delta; ++ benefit.scanned += scanned_delta; ++ ++ BUG_ON(!benefit.scanned); ++ ++ rshash_pos = rshash_neg = 0; ++ uksm_pages_scanned_last = uksm_pages_scanned; ++ ++ return 1; ++} ++ ++static inline void reset_benefit(void) ++{ ++ benefit.pos = 0; ++ benefit.neg = 0; ++ benefit.base = 0; ++ benefit.scanned = 0; ++} ++ ++static inline void inc_rshash_pos(unsigned long delta) ++{ ++ if (CAN_OVERFLOW_U64(rshash_pos, delta)) ++ encode_benefit(); ++ ++ rshash_pos += delta; ++} ++ ++static inline void inc_rshash_neg(unsigned long delta) ++{ ++ if (CAN_OVERFLOW_U64(rshash_neg, delta)) ++ encode_benefit(); ++ ++ rshash_neg += delta; ++} ++ ++ ++static inline u32 page_hash(struct page *page, unsigned long hash_strength, ++ int cost_accounting) ++{ ++ u32 val; ++ unsigned long delta; ++ ++ void *addr = kmap_atomic(page); ++ ++ val = random_sample_hash(addr, hash_strength); ++ kunmap_atomic(addr); ++ ++ if (cost_accounting) { ++ if (HASH_STRENGTH_FULL > hash_strength) ++ delta = HASH_STRENGTH_FULL - hash_strength; ++ else ++ delta = 0; ++ ++ inc_rshash_pos(delta); ++ } ++ ++ return val; ++} ++ ++static int memcmp_pages(struct page *page1, struct page *page2, ++ int cost_accounting) ++{ ++ char *addr1, *addr2; ++ int ret; ++ ++ addr1 = kmap_atomic(page1); ++ addr2 = kmap_atomic(page2); ++ ret = memcmp(addr1, addr2, PAGE_SIZE); ++ kunmap_atomic(addr2); ++ kunmap_atomic(addr1); ++ ++ if (cost_accounting) ++ inc_rshash_neg(memcmp_cost); ++ ++ return ret; ++} ++ ++static inline int pages_identical(struct page *page1, struct page *page2) ++{ ++ return !memcmp_pages(page1, page2, 0); ++} ++ ++static inline int is_page_full_zero(struct page *page) ++{ ++ char *addr; ++ int ret; ++ ++ addr = kmap_atomic(page); ++ ret = is_full_zero(addr, PAGE_SIZE); ++ kunmap_atomic(addr); ++ ++ return ret; ++} ++ ++static int write_protect_page(struct vm_area_struct *vma, struct page *page, ++ pte_t *orig_pte, pte_t *old_pte) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ unsigned long addr; ++ pte_t *ptep; ++ spinlock_t *ptl; ++ int swapped; ++ int err = -EFAULT; ++ unsigned long mmun_start; /* For mmu_notifiers */ ++ unsigned long mmun_end; /* For mmu_notifiers */ ++ ++ addr = page_address_in_vma(page, vma); ++ if (addr == -EFAULT) ++ goto out; ++ ++ BUG_ON(PageTransCompound(page)); ++ ++ mmun_start = addr; ++ mmun_end = addr + PAGE_SIZE; ++ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); ++ ++ ptep = page_check_address(page, mm, addr, &ptl, 0); ++ if (!ptep) ++ goto out_mn; ++ ++ if (old_pte) ++ *old_pte = *ptep; ++ ++ if (pte_write(*ptep) || pte_dirty(*ptep)) { ++ pte_t entry; ++ ++ swapped = PageSwapCache(page); ++ flush_cache_page(vma, addr, page_to_pfn(page)); ++ /* ++ * Ok this is tricky, when get_user_pages_fast() run it doesnt ++ * take any lock, therefore the check that we are going to make ++ * with the pagecount against the mapcount is racey and ++ * O_DIRECT can happen right after the check. ++ * So we clear the pte and flush the tlb before the check ++ * this assure us that no O_DIRECT can happen after the check ++ * or in the middle of the check. ++ */ ++ entry = ptep_clear_flush_notify(vma, addr, ptep); ++ /* ++ * Check that no O_DIRECT or similar I/O is in progress on the ++ * page ++ */ ++ if (page_mapcount(page) + 1 + swapped != page_count(page)) { ++ set_pte_at(mm, addr, ptep, entry); ++ goto out_unlock; ++ } ++ if (pte_dirty(entry)) ++ set_page_dirty(page); ++ entry = pte_mkclean(pte_wrprotect(entry)); ++ set_pte_at_notify(mm, addr, ptep, entry); ++ } ++ *orig_pte = *ptep; ++ err = 0; ++ ++out_unlock: ++ pte_unmap_unlock(ptep, ptl); ++out_mn: ++ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); ++out: ++ return err; ++} ++ ++#define MERGE_ERR_PGERR 1 /* the page is invalid cannot continue */ ++#define MERGE_ERR_COLLI 2 /* there is a collision */ ++#define MERGE_ERR_COLLI_MAX 3 /* collision at the max hash strength */ ++#define MERGE_ERR_CHANGED 4 /* the page has changed since last hash */ ++ ++ ++/** ++ * replace_page - replace page in vma by new ksm page ++ * @vma: vma that holds the pte pointing to page ++ * @page: the page we are replacing by kpage ++ * @kpage: the ksm page we replace page by ++ * @orig_pte: the original value of the pte ++ * ++ * Returns 0 on success, MERGE_ERR_PGERR on failure. ++ */ ++static int replace_page(struct vm_area_struct *vma, struct page *page, ++ struct page *kpage, pte_t orig_pte) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ pte_t *ptep; ++ spinlock_t *ptl; ++ pte_t entry; ++ ++ unsigned long addr; ++ int err = MERGE_ERR_PGERR; ++ unsigned long mmun_start; /* For mmu_notifiers */ ++ unsigned long mmun_end; /* For mmu_notifiers */ ++ ++ addr = page_address_in_vma(page, vma); ++ if (addr == -EFAULT) ++ goto out; ++ ++ pgd = pgd_offset(mm, addr); ++ if (!pgd_present(*pgd)) ++ goto out; ++ ++ pud = pud_offset(pgd, addr); ++ if (!pud_present(*pud)) ++ goto out; ++ ++ pmd = pmd_offset(pud, addr); ++ BUG_ON(pmd_trans_huge(*pmd)); ++ if (!pmd_present(*pmd)) ++ goto out; ++ ++ mmun_start = addr; ++ mmun_end = addr + PAGE_SIZE; ++ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); ++ ++ ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); ++ if (!pte_same(*ptep, orig_pte)) { ++ pte_unmap_unlock(ptep, ptl); ++ goto out_mn; ++ } ++ ++ flush_cache_page(vma, addr, pte_pfn(*ptep)); ++ ptep_clear_flush_notify(vma, addr, ptep); ++ entry = mk_pte(kpage, vma->vm_page_prot); ++ ++ /* special treatment is needed for zero_page */ ++ if ((page_to_pfn(kpage) == uksm_zero_pfn) || ++ (page_to_pfn(kpage) == zero_pfn)) { ++ entry = pte_mkspecial(entry); ++ dec_mm_counter(mm, MM_ANONPAGES); ++ inc_zone_page_state(page, NR_UKSM_ZERO_PAGES); ++ } else { ++ get_page(kpage); ++ page_add_anon_rmap(kpage, vma, addr, false); ++ } ++ ++ set_pte_at_notify(mm, addr, ptep, entry); ++ ++ page_remove_rmap(page, false); ++ if (!page_mapped(page)) ++ try_to_free_swap(page); ++ put_page(page); ++ ++ pte_unmap_unlock(ptep, ptl); ++ err = 0; ++out_mn: ++ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); ++out: ++ return err; ++} ++ ++ ++/** ++ * Fully hash a page with HASH_STRENGTH_MAX return a non-zero hash value. The ++ * zero hash value at HASH_STRENGTH_MAX is used to indicated that its ++ * hash_max member has not been calculated. ++ * ++ * @page The page needs to be hashed ++ * @hash_old The hash value calculated with current hash strength ++ * ++ * return the new hash value calculated at HASH_STRENGTH_MAX ++ */ ++static inline u32 page_hash_max(struct page *page, u32 hash_old) ++{ ++ u32 hash_max = 0; ++ void *addr; ++ ++ addr = kmap_atomic(page); ++ hash_max = delta_hash(addr, hash_strength, ++ HASH_STRENGTH_MAX, hash_old); ++ ++ kunmap_atomic(addr); ++ ++ if (!hash_max) ++ hash_max = 1; ++ ++ inc_rshash_neg(HASH_STRENGTH_MAX - hash_strength); ++ return hash_max; ++} ++ ++/* ++ * We compare the hash again, to ensure that it is really a hash collision ++ * instead of being caused by page write. ++ */ ++static inline int check_collision(struct rmap_item *rmap_item, ++ u32 hash) ++{ ++ int err; ++ struct page *page = rmap_item->page; ++ ++ /* if this rmap_item has already been hash_maxed, then the collision ++ * must appears in the second-level rbtree search. In this case we check ++ * if its hash_max value has been changed. Otherwise, the collision ++ * happens in the first-level rbtree search, so we check against it's ++ * current hash value. ++ */ ++ if (rmap_item->hash_max) { ++ inc_rshash_neg(memcmp_cost); ++ inc_rshash_neg(HASH_STRENGTH_MAX - hash_strength); ++ ++ if (rmap_item->hash_max == page_hash_max(page, hash)) ++ err = MERGE_ERR_COLLI; ++ else ++ err = MERGE_ERR_CHANGED; ++ } else { ++ inc_rshash_neg(memcmp_cost + hash_strength); ++ ++ if (page_hash(page, hash_strength, 0) == hash) ++ err = MERGE_ERR_COLLI; ++ else ++ err = MERGE_ERR_CHANGED; ++ } ++ ++ return err; ++} ++ ++/** ++ * Try to merge a rmap_item.page with a kpage in stable node. kpage must ++ * already be a ksm page. ++ * ++ * @return 0 if the pages were merged, -EFAULT otherwise. ++ */ ++static int try_to_merge_with_uksm_page(struct rmap_item *rmap_item, ++ struct page *kpage, u32 hash) ++{ ++ struct vm_area_struct *vma = rmap_item->slot->vma; ++ struct mm_struct *mm = vma->vm_mm; ++ pte_t orig_pte = __pte(0); ++ int err = MERGE_ERR_PGERR; ++ struct page *page; ++ ++ if (uksm_test_exit(mm)) ++ goto out; ++ ++ page = rmap_item->page; ++ ++ if (page == kpage) { /* ksm page forked */ ++ err = 0; ++ goto out; ++ } ++ ++ /* ++ * We need the page lock to read a stable PageSwapCache in ++ * write_protect_page(). We use trylock_page() instead of ++ * lock_page() because we don't want to wait here - we ++ * prefer to continue scanning and merging different pages, ++ * then come back to this page when it is unlocked. ++ */ ++ if (!trylock_page(page)) ++ goto out; ++ ++ if (!PageAnon(page) || !PageKsm(kpage)) ++ goto out_unlock; ++ ++ if (PageTransCompound(page)) { ++ err = split_huge_page(page); ++ if (err) ++ goto out_unlock; ++ } ++ ++ /* ++ * If this anonymous page is mapped only here, its pte may need ++ * to be write-protected. If it's mapped elsewhere, all of its ++ * ptes are necessarily already write-protected. But in either ++ * case, we need to lock and check page_count is not raised. ++ */ ++ if (write_protect_page(vma, page, &orig_pte, NULL) == 0) { ++ if (pages_identical(page, kpage)) ++ err = replace_page(vma, page, kpage, orig_pte); ++ else ++ err = check_collision(rmap_item, hash); ++ } ++ ++ if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { ++ munlock_vma_page(page); ++ if (!PageMlocked(kpage)) { ++ unlock_page(page); ++ lock_page(kpage); ++ mlock_vma_page(kpage); ++ page = kpage; /* for final unlock */ ++ } ++ } ++ ++out_unlock: ++ unlock_page(page); ++out: ++ return err; ++} ++ ++ ++ ++/** ++ * If two pages fail to merge in try_to_merge_two_pages, then we have a chance ++ * to restore a page mapping that has been changed in try_to_merge_two_pages. ++ * ++ * @return 0 on success. ++ */ ++static int restore_uksm_page_pte(struct vm_area_struct *vma, unsigned long addr, ++ pte_t orig_pte, pte_t wprt_pte) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ pte_t *ptep; ++ spinlock_t *ptl; ++ ++ int err = -EFAULT; ++ ++ pgd = pgd_offset(mm, addr); ++ if (!pgd_present(*pgd)) ++ goto out; ++ ++ pud = pud_offset(pgd, addr); ++ if (!pud_present(*pud)) ++ goto out; ++ ++ pmd = pmd_offset(pud, addr); ++ if (!pmd_present(*pmd)) ++ goto out; ++ ++ ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); ++ if (!pte_same(*ptep, wprt_pte)) { ++ /* already copied, let it be */ ++ pte_unmap_unlock(ptep, ptl); ++ goto out; ++ } ++ ++ /* ++ * Good boy, still here. When we still get the ksm page, it does not ++ * return to the free page pool, there is no way that a pte was changed ++ * to other page and gets back to this page. And remind that ksm page ++ * do not reuse in do_wp_page(). So it's safe to restore the original ++ * pte. ++ */ ++ flush_cache_page(vma, addr, pte_pfn(*ptep)); ++ ptep_clear_flush_notify(vma, addr, ptep); ++ set_pte_at_notify(mm, addr, ptep, orig_pte); ++ ++ pte_unmap_unlock(ptep, ptl); ++ err = 0; ++out: ++ return err; ++} ++ ++/** ++ * try_to_merge_two_pages() - take two identical pages and prepare ++ * them to be merged into one page(rmap_item->page) ++ * ++ * @return 0 if we successfully merged two identical pages into ++ * one ksm page. MERGE_ERR_COLLI if it's only a hash collision ++ * search in rbtree. MERGE_ERR_CHANGED if rmap_item has been ++ * changed since it's hashed. MERGE_ERR_PGERR otherwise. ++ * ++ */ ++static int try_to_merge_two_pages(struct rmap_item *rmap_item, ++ struct rmap_item *tree_rmap_item, ++ u32 hash) ++{ ++ pte_t orig_pte1 = __pte(0), orig_pte2 = __pte(0); ++ pte_t wprt_pte1 = __pte(0), wprt_pte2 = __pte(0); ++ struct vm_area_struct *vma1 = rmap_item->slot->vma; ++ struct vm_area_struct *vma2 = tree_rmap_item->slot->vma; ++ struct page *page = rmap_item->page; ++ struct page *tree_page = tree_rmap_item->page; ++ int err = MERGE_ERR_PGERR; ++ struct address_space *saved_mapping; ++ ++ ++ if (rmap_item->page == tree_rmap_item->page) ++ goto out; ++ ++ if (!trylock_page(page)) ++ goto out; ++ ++ if (!PageAnon(page)) ++ goto out_unlock; ++ ++ if (PageTransCompound(page)) { ++ err = split_huge_page(page); ++ if (err) ++ goto out_unlock; ++ } ++ ++ if (write_protect_page(vma1, page, &wprt_pte1, &orig_pte1) != 0) { ++ unlock_page(page); ++ goto out; ++ } ++ ++ /* ++ * While we hold page lock, upgrade page from ++ * PageAnon+anon_vma to PageKsm+NULL stable_node: ++ * stable_tree_insert() will update stable_node. ++ */ ++ saved_mapping = page->mapping; ++ set_page_stable_node(page, NULL); ++ mark_page_accessed(page); ++ if (!PageDirty(page)) ++ SetPageDirty(page); ++ ++ unlock_page(page); ++ ++ if (!trylock_page(tree_page)) ++ goto restore_out; ++ ++ if (!PageAnon(tree_page)) { ++ unlock_page(tree_page); ++ goto restore_out; ++ } ++ ++ if (PageTransCompound(tree_page)) { ++ err = split_huge_page(tree_page); ++ if (err) { ++ unlock_page(tree_page); ++ goto restore_out; ++ } ++ } ++ ++ if (write_protect_page(vma2, tree_page, &wprt_pte2, &orig_pte2) != 0) { ++ unlock_page(tree_page); ++ goto restore_out; ++ } ++ ++ if (pages_identical(page, tree_page)) { ++ err = replace_page(vma2, tree_page, page, wprt_pte2); ++ if (err) { ++ unlock_page(tree_page); ++ goto restore_out; ++ } ++ ++ if ((vma2->vm_flags & VM_LOCKED)) { ++ munlock_vma_page(tree_page); ++ if (!PageMlocked(page)) { ++ unlock_page(tree_page); ++ lock_page(page); ++ mlock_vma_page(page); ++ tree_page = page; /* for final unlock */ ++ } ++ } ++ ++ unlock_page(tree_page); ++ ++ goto out; /* success */ ++ ++ } else { ++ if (tree_rmap_item->hash_max && ++ tree_rmap_item->hash_max == rmap_item->hash_max) { ++ err = MERGE_ERR_COLLI_MAX; ++ } else if (page_hash(page, hash_strength, 0) == ++ page_hash(tree_page, hash_strength, 0)) { ++ inc_rshash_neg(memcmp_cost + hash_strength * 2); ++ err = MERGE_ERR_COLLI; ++ } else { ++ err = MERGE_ERR_CHANGED; ++ } ++ ++ unlock_page(tree_page); ++ } ++ ++restore_out: ++ lock_page(page); ++ if (!restore_uksm_page_pte(vma1, get_rmap_addr(rmap_item), ++ orig_pte1, wprt_pte1)) ++ page->mapping = saved_mapping; ++ ++out_unlock: ++ unlock_page(page); ++out: ++ return err; ++} ++ ++static inline int hash_cmp(u32 new_val, u32 node_val) ++{ ++ if (new_val > node_val) ++ return 1; ++ else if (new_val < node_val) ++ return -1; ++ else ++ return 0; ++} ++ ++static inline u32 rmap_item_hash_max(struct rmap_item *item, u32 hash) ++{ ++ u32 hash_max = item->hash_max; ++ ++ if (!hash_max) { ++ hash_max = page_hash_max(item->page, hash); ++ ++ item->hash_max = hash_max; ++ } ++ ++ return hash_max; ++} ++ ++ ++ ++/** ++ * stable_tree_search() - search the stable tree for a page ++ * ++ * @item: the rmap_item we are comparing with ++ * @hash: the hash value of this item->page already calculated ++ * ++ * @return the page we have found, NULL otherwise. The page returned has ++ * been gotten. ++ */ ++static struct page *stable_tree_search(struct rmap_item *item, u32 hash) ++{ ++ struct rb_node *node = root_stable_treep->rb_node; ++ struct tree_node *tree_node; ++ unsigned long hash_max; ++ struct page *page = item->page; ++ struct stable_node *stable_node; ++ ++ stable_node = page_stable_node(page); ++ if (stable_node) { ++ /* ksm page forked, that is ++ * if (PageKsm(page) && !in_stable_tree(rmap_item)) ++ * it's actually gotten once outside. ++ */ ++ get_page(page); ++ return page; ++ } ++ ++ while (node) { ++ int cmp; ++ ++ tree_node = rb_entry(node, struct tree_node, node); ++ ++ cmp = hash_cmp(hash, tree_node->hash); ++ ++ if (cmp < 0) ++ node = node->rb_left; ++ else if (cmp > 0) ++ node = node->rb_right; ++ else ++ break; ++ } ++ ++ if (!node) ++ return NULL; ++ ++ if (tree_node->count == 1) { ++ stable_node = rb_entry(tree_node->sub_root.rb_node, ++ struct stable_node, node); ++ BUG_ON(!stable_node); ++ ++ goto get_page_out; ++ } ++ ++ /* ++ * ok, we have to search the second ++ * level subtree, hash the page to a ++ * full strength. ++ */ ++ node = tree_node->sub_root.rb_node; ++ BUG_ON(!node); ++ hash_max = rmap_item_hash_max(item, hash); ++ ++ while (node) { ++ int cmp; ++ ++ stable_node = rb_entry(node, struct stable_node, node); ++ ++ cmp = hash_cmp(hash_max, stable_node->hash_max); ++ ++ if (cmp < 0) ++ node = node->rb_left; ++ else if (cmp > 0) ++ node = node->rb_right; ++ else ++ goto get_page_out; ++ } ++ ++ return NULL; ++ ++get_page_out: ++ page = get_uksm_page(stable_node, 1, 1); ++ return page; ++} ++ ++static int try_merge_rmap_item(struct rmap_item *item, ++ struct page *kpage, ++ struct page *tree_page) ++{ ++ spinlock_t *ptl; ++ pte_t *ptep; ++ unsigned long addr; ++ struct vm_area_struct *vma = item->slot->vma; ++ ++ addr = get_rmap_addr(item); ++ ptep = page_check_address(kpage, vma->vm_mm, addr, &ptl, 0); ++ if (!ptep) ++ return 0; ++ ++ if (pte_write(*ptep)) { ++ /* has changed, abort! */ ++ pte_unmap_unlock(ptep, ptl); ++ return 0; ++ } ++ ++ get_page(tree_page); ++ page_add_anon_rmap(tree_page, vma, addr, false); ++ ++ flush_cache_page(vma, addr, pte_pfn(*ptep)); ++ ptep_clear_flush_notify(vma, addr, ptep); ++ set_pte_at_notify(vma->vm_mm, addr, ptep, ++ mk_pte(tree_page, vma->vm_page_prot)); ++ ++ page_remove_rmap(kpage, false); ++ put_page(kpage); ++ ++ pte_unmap_unlock(ptep, ptl); ++ ++ return 1; ++} ++ ++/** ++ * try_to_merge_with_stable_page() - when two rmap_items need to be inserted ++ * into stable tree, the page was found to be identical to a stable ksm page, ++ * this is the last chance we can merge them into one. ++ * ++ * @item1: the rmap_item holding the page which we wanted to insert ++ * into stable tree. ++ * @item2: the other rmap_item we found when unstable tree search ++ * @oldpage: the page currently mapped by the two rmap_items ++ * @tree_page: the page we found identical in stable tree node ++ * @success1: return if item1 is successfully merged ++ * @success2: return if item2 is successfully merged ++ */ ++static void try_merge_with_stable(struct rmap_item *item1, ++ struct rmap_item *item2, ++ struct page **kpage, ++ struct page *tree_page, ++ int *success1, int *success2) ++{ ++ struct vm_area_struct *vma1 = item1->slot->vma; ++ struct vm_area_struct *vma2 = item2->slot->vma; ++ *success1 = 0; ++ *success2 = 0; ++ ++ if (unlikely(*kpage == tree_page)) { ++ /* I don't think this can really happen */ ++ printk(KERN_WARNING "UKSM: unexpected condition detected in " ++ "try_merge_with_stable() -- *kpage == tree_page !\n"); ++ *success1 = 1; ++ *success2 = 1; ++ return; ++ } ++ ++ if (!PageAnon(*kpage) || !PageKsm(*kpage)) ++ goto failed; ++ ++ if (!trylock_page(tree_page)) ++ goto failed; ++ ++ /* If the oldpage is still ksm and still pointed ++ * to in the right place, and still write protected, ++ * we are confident it's not changed, no need to ++ * memcmp anymore. ++ * be ware, we cannot take nested pte locks, ++ * deadlock risk. ++ */ ++ if (!try_merge_rmap_item(item1, *kpage, tree_page)) ++ goto unlock_failed; ++ ++ /* ok, then vma2, remind that pte1 already set */ ++ if (!try_merge_rmap_item(item2, *kpage, tree_page)) ++ goto success_1; ++ ++ *success2 = 1; ++success_1: ++ *success1 = 1; ++ ++ ++ if ((*success1 && vma1->vm_flags & VM_LOCKED) || ++ (*success2 && vma2->vm_flags & VM_LOCKED)) { ++ munlock_vma_page(*kpage); ++ if (!PageMlocked(tree_page)) ++ mlock_vma_page(tree_page); ++ } ++ ++ /* ++ * We do not need oldpage any more in the caller, so can break the lock ++ * now. ++ */ ++ unlock_page(*kpage); ++ *kpage = tree_page; /* Get unlocked outside. */ ++ return; ++ ++unlock_failed: ++ unlock_page(tree_page); ++failed: ++ return; ++} ++ ++static inline void stable_node_hash_max(struct stable_node *node, ++ struct page *page, u32 hash) ++{ ++ u32 hash_max = node->hash_max; ++ ++ if (!hash_max) { ++ hash_max = page_hash_max(page, hash); ++ node->hash_max = hash_max; ++ } ++} ++ ++static inline ++struct stable_node *new_stable_node(struct tree_node *tree_node, ++ struct page *kpage, u32 hash_max) ++{ ++ struct stable_node *new_stable_node; ++ ++ new_stable_node = alloc_stable_node(); ++ if (!new_stable_node) ++ return NULL; ++ ++ new_stable_node->kpfn = page_to_pfn(kpage); ++ new_stable_node->hash_max = hash_max; ++ new_stable_node->tree_node = tree_node; ++ set_page_stable_node(kpage, new_stable_node); ++ ++ return new_stable_node; ++} ++ ++static inline ++struct stable_node *first_level_insert(struct tree_node *tree_node, ++ struct rmap_item *rmap_item, ++ struct rmap_item *tree_rmap_item, ++ struct page **kpage, u32 hash, ++ int *success1, int *success2) ++{ ++ int cmp; ++ struct page *tree_page; ++ u32 hash_max = 0; ++ struct stable_node *stable_node, *new_snode; ++ struct rb_node *parent = NULL, **new; ++ ++ /* this tree node contains no sub-tree yet */ ++ stable_node = rb_entry(tree_node->sub_root.rb_node, ++ struct stable_node, node); ++ ++ tree_page = get_uksm_page(stable_node, 1, 0); ++ if (tree_page) { ++ cmp = memcmp_pages(*kpage, tree_page, 1); ++ if (!cmp) { ++ try_merge_with_stable(rmap_item, tree_rmap_item, kpage, ++ tree_page, success1, success2); ++ put_page(tree_page); ++ if (!*success1 && !*success2) ++ goto failed; ++ ++ return stable_node; ++ ++ } else { ++ /* ++ * collision in first level try to create a subtree. ++ * A new node need to be created. ++ */ ++ put_page(tree_page); ++ ++ stable_node_hash_max(stable_node, tree_page, ++ tree_node->hash); ++ hash_max = rmap_item_hash_max(rmap_item, hash); ++ cmp = hash_cmp(hash_max, stable_node->hash_max); ++ ++ parent = &stable_node->node; ++ if (cmp < 0) { ++ new = &parent->rb_left; ++ } else if (cmp > 0) { ++ new = &parent->rb_right; ++ } else { ++ goto failed; ++ } ++ } ++ ++ } else { ++ /* the only stable_node deleted, we reuse its tree_node. ++ */ ++ parent = NULL; ++ new = &tree_node->sub_root.rb_node; ++ } ++ ++ new_snode = new_stable_node(tree_node, *kpage, hash_max); ++ if (!new_snode) ++ goto failed; ++ ++ rb_link_node(&new_snode->node, parent, new); ++ rb_insert_color(&new_snode->node, &tree_node->sub_root); ++ tree_node->count++; ++ *success1 = *success2 = 1; ++ ++ return new_snode; ++ ++failed: ++ return NULL; ++} ++ ++static inline ++struct stable_node *stable_subtree_insert(struct tree_node *tree_node, ++ struct rmap_item *rmap_item, ++ struct rmap_item *tree_rmap_item, ++ struct page **kpage, u32 hash, ++ int *success1, int *success2) ++{ ++ struct page *tree_page; ++ u32 hash_max; ++ struct stable_node *stable_node, *new_snode; ++ struct rb_node *parent, **new; ++ ++research: ++ parent = NULL; ++ new = &tree_node->sub_root.rb_node; ++ BUG_ON(!*new); ++ hash_max = rmap_item_hash_max(rmap_item, hash); ++ while (*new) { ++ int cmp; ++ ++ stable_node = rb_entry(*new, struct stable_node, node); ++ ++ cmp = hash_cmp(hash_max, stable_node->hash_max); ++ ++ if (cmp < 0) { ++ parent = *new; ++ new = &parent->rb_left; ++ } else if (cmp > 0) { ++ parent = *new; ++ new = &parent->rb_right; ++ } else { ++ tree_page = get_uksm_page(stable_node, 1, 0); ++ if (tree_page) { ++ cmp = memcmp_pages(*kpage, tree_page, 1); ++ if (!cmp) { ++ try_merge_with_stable(rmap_item, ++ tree_rmap_item, kpage, ++ tree_page, success1, success2); ++ ++ put_page(tree_page); ++ if (!*success1 && !*success2) ++ goto failed; ++ /* ++ * successfully merged with a stable ++ * node ++ */ ++ return stable_node; ++ } else { ++ put_page(tree_page); ++ goto failed; ++ } ++ } else { ++ /* ++ * stable node may be deleted, ++ * and subtree maybe ++ * restructed, cannot ++ * continue, research it. ++ */ ++ if (tree_node->count) { ++ goto research; ++ } else { ++ /* reuse the tree node*/ ++ parent = NULL; ++ new = &tree_node->sub_root.rb_node; ++ } ++ } ++ } ++ } ++ ++ new_snode = new_stable_node(tree_node, *kpage, hash_max); ++ if (!new_snode) ++ goto failed; ++ ++ rb_link_node(&new_snode->node, parent, new); ++ rb_insert_color(&new_snode->node, &tree_node->sub_root); ++ tree_node->count++; ++ *success1 = *success2 = 1; ++ ++ return new_snode; ++ ++failed: ++ return NULL; ++} ++ ++ ++/** ++ * stable_tree_insert() - try to insert a merged page in unstable tree to ++ * the stable tree ++ * ++ * @kpage: the page need to be inserted ++ * @hash: the current hash of this page ++ * @rmap_item: the rmap_item being scanned ++ * @tree_rmap_item: the rmap_item found on unstable tree ++ * @success1: return if rmap_item is merged ++ * @success2: return if tree_rmap_item is merged ++ * ++ * @return the stable_node on stable tree if at least one ++ * rmap_item is inserted into stable tree, NULL ++ * otherwise. ++ */ ++static struct stable_node * ++stable_tree_insert(struct page **kpage, u32 hash, ++ struct rmap_item *rmap_item, ++ struct rmap_item *tree_rmap_item, ++ int *success1, int *success2) ++{ ++ struct rb_node **new = &root_stable_treep->rb_node; ++ struct rb_node *parent = NULL; ++ struct stable_node *stable_node; ++ struct tree_node *tree_node; ++ u32 hash_max = 0; ++ ++ *success1 = *success2 = 0; ++ ++ while (*new) { ++ int cmp; ++ ++ tree_node = rb_entry(*new, struct tree_node, node); ++ ++ cmp = hash_cmp(hash, tree_node->hash); ++ ++ if (cmp < 0) { ++ parent = *new; ++ new = &parent->rb_left; ++ } else if (cmp > 0) { ++ parent = *new; ++ new = &parent->rb_right; ++ } else ++ break; ++ } ++ ++ if (*new) { ++ if (tree_node->count == 1) { ++ stable_node = first_level_insert(tree_node, rmap_item, ++ tree_rmap_item, kpage, ++ hash, success1, success2); ++ } else { ++ stable_node = stable_subtree_insert(tree_node, ++ rmap_item, tree_rmap_item, kpage, ++ hash, success1, success2); ++ } ++ } else { ++ ++ /* no tree node found */ ++ tree_node = alloc_tree_node(stable_tree_node_listp); ++ if (!tree_node) { ++ stable_node = NULL; ++ goto out; ++ } ++ ++ stable_node = new_stable_node(tree_node, *kpage, hash_max); ++ if (!stable_node) { ++ free_tree_node(tree_node); ++ goto out; ++ } ++ ++ tree_node->hash = hash; ++ rb_link_node(&tree_node->node, parent, new); ++ rb_insert_color(&tree_node->node, root_stable_treep); ++ parent = NULL; ++ new = &tree_node->sub_root.rb_node; ++ ++ rb_link_node(&stable_node->node, parent, new); ++ rb_insert_color(&stable_node->node, &tree_node->sub_root); ++ tree_node->count++; ++ *success1 = *success2 = 1; ++ } ++ ++out: ++ return stable_node; ++} ++ ++ ++/** ++ * get_tree_rmap_item_page() - try to get the page and lock the mmap_sem ++ * ++ * @return 0 on success, -EBUSY if unable to lock the mmap_sem, ++ * -EINVAL if the page mapping has been changed. ++ */ ++static inline int get_tree_rmap_item_page(struct rmap_item *tree_rmap_item) ++{ ++ int err; ++ ++ err = get_mergeable_page_lock_mmap(tree_rmap_item); ++ ++ if (err == -EINVAL) { ++ /* its page map has been changed, remove it */ ++ remove_rmap_item_from_tree(tree_rmap_item); ++ } ++ ++ /* The page is gotten and mmap_sem is locked now. */ ++ return err; ++} ++ ++ ++/** ++ * unstable_tree_search_insert() - search an unstable tree rmap_item with the ++ * same hash value. Get its page and trylock the mmap_sem ++ */ ++static inline ++struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, ++ u32 hash) ++ ++{ ++ struct rb_node **new = &root_unstable_tree.rb_node; ++ struct rb_node *parent = NULL; ++ struct tree_node *tree_node; ++ u32 hash_max; ++ struct rmap_item *tree_rmap_item; ++ ++ while (*new) { ++ int cmp; ++ ++ tree_node = rb_entry(*new, struct tree_node, node); ++ ++ cmp = hash_cmp(hash, tree_node->hash); ++ ++ if (cmp < 0) { ++ parent = *new; ++ new = &parent->rb_left; ++ } else if (cmp > 0) { ++ parent = *new; ++ new = &parent->rb_right; ++ } else ++ break; ++ } ++ ++ if (*new) { ++ /* got the tree_node */ ++ if (tree_node->count == 1) { ++ tree_rmap_item = rb_entry(tree_node->sub_root.rb_node, ++ struct rmap_item, node); ++ BUG_ON(!tree_rmap_item); ++ ++ goto get_page_out; ++ } ++ ++ /* well, search the collision subtree */ ++ new = &tree_node->sub_root.rb_node; ++ BUG_ON(!*new); ++ hash_max = rmap_item_hash_max(rmap_item, hash); ++ ++ while (*new) { ++ int cmp; ++ ++ tree_rmap_item = rb_entry(*new, struct rmap_item, ++ node); ++ ++ cmp = hash_cmp(hash_max, tree_rmap_item->hash_max); ++ parent = *new; ++ if (cmp < 0) ++ new = &parent->rb_left; ++ else if (cmp > 0) ++ new = &parent->rb_right; ++ else ++ goto get_page_out; ++ } ++ } else { ++ /* alloc a new tree_node */ ++ tree_node = alloc_tree_node(&unstable_tree_node_list); ++ if (!tree_node) ++ return NULL; ++ ++ tree_node->hash = hash; ++ rb_link_node(&tree_node->node, parent, new); ++ rb_insert_color(&tree_node->node, &root_unstable_tree); ++ parent = NULL; ++ new = &tree_node->sub_root.rb_node; ++ } ++ ++ /* did not found even in sub-tree */ ++ rmap_item->tree_node = tree_node; ++ rmap_item->address |= UNSTABLE_FLAG; ++ rmap_item->hash_round = uksm_hash_round; ++ rb_link_node(&rmap_item->node, parent, new); ++ rb_insert_color(&rmap_item->node, &tree_node->sub_root); ++ ++ uksm_pages_unshared++; ++ return NULL; ++ ++get_page_out: ++ if (tree_rmap_item->page == rmap_item->page) ++ return NULL; ++ ++ if (get_tree_rmap_item_page(tree_rmap_item)) ++ return NULL; ++ ++ return tree_rmap_item; ++} ++ ++static void hold_anon_vma(struct rmap_item *rmap_item, ++ struct anon_vma *anon_vma) ++{ ++ rmap_item->anon_vma = anon_vma; ++ get_anon_vma(anon_vma); ++} ++ ++ ++/** ++ * stable_tree_append() - append a rmap_item to a stable node. Deduplication ++ * ratio statistics is done in this function. ++ * ++ */ ++static void stable_tree_append(struct rmap_item *rmap_item, ++ struct stable_node *stable_node, int logdedup) ++{ ++ struct node_vma *node_vma = NULL, *new_node_vma, *node_vma_cont = NULL; ++ unsigned long key = (unsigned long)rmap_item->slot; ++ unsigned long factor = rmap_item->slot->rung->step; ++ ++ BUG_ON(!stable_node); ++ rmap_item->address |= STABLE_FLAG; ++ ++ if (hlist_empty(&stable_node->hlist)) { ++ uksm_pages_shared++; ++ goto node_vma_new; ++ } else { ++ uksm_pages_sharing++; ++ } ++ ++ hlist_for_each_entry(node_vma, &stable_node->hlist, hlist) { ++ if (node_vma->key >= key) ++ break; ++ ++ if (logdedup) { ++ node_vma->slot->pages_bemerged += factor; ++ if (list_empty(&node_vma->slot->dedup_list)) ++ list_add(&node_vma->slot->dedup_list, ++ &vma_slot_dedup); ++ } ++ } ++ ++ if (node_vma) { ++ if (node_vma->key == key) { ++ node_vma_cont = hlist_entry_safe(node_vma->hlist.next, struct node_vma, hlist); ++ goto node_vma_ok; ++ } else if (node_vma->key > key) { ++ node_vma_cont = node_vma; ++ } ++ } ++ ++node_vma_new: ++ /* no same vma already in node, alloc a new node_vma */ ++ new_node_vma = alloc_node_vma(); ++ BUG_ON(!new_node_vma); ++ new_node_vma->head = stable_node; ++ new_node_vma->slot = rmap_item->slot; ++ ++ if (!node_vma) { ++ hlist_add_head(&new_node_vma->hlist, &stable_node->hlist); ++ } else if (node_vma->key != key) { ++ if (node_vma->key < key) ++ hlist_add_behind(&new_node_vma->hlist, &node_vma->hlist); ++ else { ++ hlist_add_before(&new_node_vma->hlist, ++ &node_vma->hlist); ++ } ++ ++ } ++ node_vma = new_node_vma; ++ ++node_vma_ok: /* ok, ready to add to the list */ ++ rmap_item->head = node_vma; ++ hlist_add_head(&rmap_item->hlist, &node_vma->rmap_hlist); ++ hold_anon_vma(rmap_item, rmap_item->slot->vma->anon_vma); ++ if (logdedup) { ++ rmap_item->slot->pages_merged++; ++ if (node_vma_cont) { ++ node_vma = node_vma_cont; ++ hlist_for_each_entry_continue(node_vma, hlist) { ++ node_vma->slot->pages_bemerged += factor; ++ if (list_empty(&node_vma->slot->dedup_list)) ++ list_add(&node_vma->slot->dedup_list, ++ &vma_slot_dedup); ++ } ++ } ++ } ++} ++ ++/* ++ * We use break_ksm to break COW on a ksm page: it's a stripped down ++ * ++ * if (get_user_pages(addr, 1, 1, 1, &page, NULL) == 1) ++ * put_page(page); ++ * ++ * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, ++ * in case the application has unmapped and remapped mm,addr meanwhile. ++ * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP ++ * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. ++ */ ++static int break_ksm(struct vm_area_struct *vma, unsigned long addr) ++{ ++ struct page *page; ++ int ret = 0; ++ ++ do { ++ cond_resched(); ++ page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION | FOLL_REMOTE); ++ if (IS_ERR_OR_NULL(page)) ++ break; ++ if (PageKsm(page)) { ++ ret = handle_mm_fault(vma, addr, ++ FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE); ++ } else ++ ret = VM_FAULT_WRITE; ++ put_page(page); ++ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM))); ++ /* ++ * We must loop because handle_mm_fault() may back out if there's ++ * any difficulty e.g. if pte accessed bit gets updated concurrently. ++ * ++ * VM_FAULT_WRITE is what we have been hoping for: it indicates that ++ * COW has been broken, even if the vma does not permit VM_WRITE; ++ * but note that a concurrent fault might break PageKsm for us. ++ * ++ * VM_FAULT_SIGBUS could occur if we race with truncation of the ++ * backing file, which also invalidates anonymous pages: that's ++ * okay, that truncation will have unmapped the PageKsm for us. ++ * ++ * VM_FAULT_OOM: at the time of writing (late July 2009), setting ++ * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the ++ * current task has TIF_MEMDIE set, and will be OOM killed on return ++ * to user; and ksmd, having no mm, would never be chosen for that. ++ * ++ * But if the mm is in a limited mem_cgroup, then the fault may fail ++ * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and ++ * even ksmd can fail in this way - though it's usually breaking ksm ++ * just to undo a merge it made a moment before, so unlikely to oom. ++ * ++ * That's a pity: we might therefore have more kernel pages allocated ++ * than we're counting as nodes in the stable tree; but uksm_do_scan ++ * will retry to break_cow on each pass, so should recover the page ++ * in due course. The important thing is to not let VM_MERGEABLE ++ * be cleared while any such pages might remain in the area. ++ */ ++ return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; ++} ++ ++static void break_cow(struct rmap_item *rmap_item) ++{ ++ struct vm_area_struct *vma = rmap_item->slot->vma; ++ struct mm_struct *mm = vma->vm_mm; ++ unsigned long addr = get_rmap_addr(rmap_item); ++ ++ if (uksm_test_exit(mm)) ++ goto out; ++ ++ break_ksm(vma, addr); ++out: ++ return; ++} ++ ++/* ++ * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather ++ * than check every pte of a given vma, the locking doesn't quite work for ++ * that - an rmap_item is assigned to the stable tree after inserting ksm ++ * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing ++ * rmap_items from parent to child at fork time (so as not to waste time ++ * if exit comes before the next scan reaches it). ++ * ++ * Similarly, although we'd like to remove rmap_items (so updating counts ++ * and freeing memory) when unmerging an area, it's easier to leave that ++ * to the next pass of ksmd - consider, for example, how ksmd might be ++ * in cmp_and_merge_page on one of the rmap_items we would be removing. ++ */ ++inline int unmerge_uksm_pages(struct vm_area_struct *vma, ++ unsigned long start, unsigned long end) ++{ ++ unsigned long addr; ++ int err = 0; ++ ++ for (addr = start; addr < end && !err; addr += PAGE_SIZE) { ++ if (uksm_test_exit(vma->vm_mm)) ++ break; ++ if (signal_pending(current)) ++ err = -ERESTARTSYS; ++ else ++ err = break_ksm(vma, addr); ++ } ++ return err; ++} ++ ++static inline void inc_uksm_pages_scanned(void) ++{ ++ u64 delta; ++ ++ ++ if (uksm_pages_scanned == U64_MAX) { ++ encode_benefit(); ++ ++ delta = uksm_pages_scanned >> pages_scanned_base; ++ ++ if (CAN_OVERFLOW_U64(pages_scanned_stored, delta)) { ++ pages_scanned_stored >>= 1; ++ delta >>= 1; ++ pages_scanned_base++; ++ } ++ ++ pages_scanned_stored += delta; ++ ++ uksm_pages_scanned = uksm_pages_scanned_last = 0; ++ } ++ ++ uksm_pages_scanned++; ++} ++ ++static inline int find_zero_page_hash(int strength, u32 hash) ++{ ++ return (zero_hash_table[strength] == hash); ++} ++ ++static ++int cmp_and_merge_zero_page(struct vm_area_struct *vma, struct page *page) ++{ ++ struct page *zero_page = empty_uksm_zero_page; ++ struct mm_struct *mm = vma->vm_mm; ++ pte_t orig_pte = __pte(0); ++ int err = -EFAULT; ++ ++ if (uksm_test_exit(mm)) ++ goto out; ++ ++ if (!trylock_page(page)) ++ goto out; ++ ++ if (!PageAnon(page)) ++ goto out_unlock; ++ ++ if (PageTransCompound(page)) { ++ err = split_huge_page(page); ++ if (err) ++ goto out_unlock; ++ } ++ ++ if (write_protect_page(vma, page, &orig_pte, 0) == 0) { ++ if (is_page_full_zero(page)) ++ err = replace_page(vma, page, zero_page, orig_pte); ++ } ++ ++out_unlock: ++ unlock_page(page); ++out: ++ return err; ++} ++ ++/* ++ * cmp_and_merge_page() - first see if page can be merged into the stable ++ * tree; if not, compare hash to previous and if it's the same, see if page ++ * can be inserted into the unstable tree, or merged with a page already there ++ * and both transferred to the stable tree. ++ * ++ * @page: the page that we are searching identical page to. ++ * @rmap_item: the reverse mapping into the virtual address of this page ++ */ ++static void cmp_and_merge_page(struct rmap_item *rmap_item, u32 hash) ++{ ++ struct rmap_item *tree_rmap_item; ++ struct page *page; ++ struct page *kpage = NULL; ++ u32 hash_max; ++ int err; ++ unsigned int success1, success2; ++ struct stable_node *snode; ++ int cmp; ++ struct rb_node *parent = NULL, **new; ++ ++ remove_rmap_item_from_tree(rmap_item); ++ page = rmap_item->page; ++ ++ /* We first start with searching the page inside the stable tree */ ++ kpage = stable_tree_search(rmap_item, hash); ++ if (kpage) { ++ err = try_to_merge_with_uksm_page(rmap_item, kpage, ++ hash); ++ if (!err) { ++ /* ++ * The page was successfully merged, add ++ * its rmap_item to the stable tree. ++ * page lock is needed because it's ++ * racing with try_to_unmap_ksm(), etc. ++ */ ++ lock_page(kpage); ++ snode = page_stable_node(kpage); ++ stable_tree_append(rmap_item, snode, 1); ++ unlock_page(kpage); ++ put_page(kpage); ++ return; /* success */ ++ } ++ put_page(kpage); ++ ++ /* ++ * if it's a collision and it has been search in sub-rbtree ++ * (hash_max != 0), we want to abort, because if it is ++ * successfully merged in unstable tree, the collision trends to ++ * happen again. ++ */ ++ if (err == MERGE_ERR_COLLI && rmap_item->hash_max) ++ return; ++ } ++ ++ tree_rmap_item = ++ unstable_tree_search_insert(rmap_item, hash); ++ if (tree_rmap_item) { ++ err = try_to_merge_two_pages(rmap_item, tree_rmap_item, hash); ++ /* ++ * As soon as we merge this page, we want to remove the ++ * rmap_item of the page we have merged with from the unstable ++ * tree, and insert it instead as new node in the stable tree. ++ */ ++ if (!err) { ++ kpage = page; ++ remove_rmap_item_from_tree(tree_rmap_item); ++ lock_page(kpage); ++ snode = stable_tree_insert(&kpage, hash, ++ rmap_item, tree_rmap_item, ++ &success1, &success2); ++ ++ /* ++ * Do not log dedup for tree item, it's not counted as ++ * scanned in this round. ++ */ ++ if (success2) ++ stable_tree_append(tree_rmap_item, snode, 0); ++ ++ /* ++ * The order of these two stable append is important: ++ * we are scanning rmap_item. ++ */ ++ if (success1) ++ stable_tree_append(rmap_item, snode, 1); ++ ++ /* ++ * The original kpage may be unlocked inside ++ * stable_tree_insert() already. This page ++ * should be unlocked before doing ++ * break_cow(). ++ */ ++ unlock_page(kpage); ++ ++ if (!success1) ++ break_cow(rmap_item); ++ ++ if (!success2) ++ break_cow(tree_rmap_item); ++ ++ } else if (err == MERGE_ERR_COLLI) { ++ BUG_ON(tree_rmap_item->tree_node->count > 1); ++ ++ rmap_item_hash_max(tree_rmap_item, ++ tree_rmap_item->tree_node->hash); ++ ++ hash_max = rmap_item_hash_max(rmap_item, hash); ++ cmp = hash_cmp(hash_max, tree_rmap_item->hash_max); ++ parent = &tree_rmap_item->node; ++ if (cmp < 0) ++ new = &parent->rb_left; ++ else if (cmp > 0) ++ new = &parent->rb_right; ++ else ++ goto put_up_out; ++ ++ rmap_item->tree_node = tree_rmap_item->tree_node; ++ rmap_item->address |= UNSTABLE_FLAG; ++ rmap_item->hash_round = uksm_hash_round; ++ rb_link_node(&rmap_item->node, parent, new); ++ rb_insert_color(&rmap_item->node, ++ &tree_rmap_item->tree_node->sub_root); ++ rmap_item->tree_node->count++; ++ } else { ++ /* ++ * either one of the page has changed or they collide ++ * at the max hash, we consider them as ill items. ++ */ ++ remove_rmap_item_from_tree(tree_rmap_item); ++ } ++put_up_out: ++ put_page(tree_rmap_item->page); ++ up_read(&tree_rmap_item->slot->vma->vm_mm->mmap_sem); ++ } ++} ++ ++ ++ ++ ++static inline unsigned long get_pool_index(struct vma_slot *slot, ++ unsigned long index) ++{ ++ unsigned long pool_index; ++ ++ pool_index = (sizeof(struct rmap_list_entry *) * index) >> PAGE_SHIFT; ++ if (pool_index >= slot->pool_size) ++ BUG(); ++ return pool_index; ++} ++ ++static inline unsigned long index_page_offset(unsigned long index) ++{ ++ return offset_in_page(sizeof(struct rmap_list_entry *) * index); ++} ++ ++static inline ++struct rmap_list_entry *get_rmap_list_entry(struct vma_slot *slot, ++ unsigned long index, int need_alloc) ++{ ++ unsigned long pool_index; ++ struct page *page; ++ void *addr; ++ ++ ++ pool_index = get_pool_index(slot, index); ++ if (!slot->rmap_list_pool[pool_index]) { ++ if (!need_alloc) ++ return NULL; ++ ++ page = alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_NOWARN); ++ if (!page) ++ return NULL; ++ ++ slot->rmap_list_pool[pool_index] = page; ++ } ++ ++ addr = kmap(slot->rmap_list_pool[pool_index]); ++ addr += index_page_offset(index); ++ ++ return addr; ++} ++ ++static inline void put_rmap_list_entry(struct vma_slot *slot, ++ unsigned long index) ++{ ++ unsigned long pool_index; ++ ++ pool_index = get_pool_index(slot, index); ++ BUG_ON(!slot->rmap_list_pool[pool_index]); ++ kunmap(slot->rmap_list_pool[pool_index]); ++} ++ ++static inline int entry_is_new(struct rmap_list_entry *entry) ++{ ++ return !entry->item; ++} ++ ++static inline unsigned long get_index_orig_addr(struct vma_slot *slot, ++ unsigned long index) ++{ ++ return slot->vma->vm_start + (index << PAGE_SHIFT); ++} ++ ++static inline unsigned long get_entry_address(struct rmap_list_entry *entry) ++{ ++ unsigned long addr; ++ ++ if (is_addr(entry->addr)) ++ addr = get_clean_addr(entry->addr); ++ else if (entry->item) ++ addr = get_rmap_addr(entry->item); ++ else ++ BUG(); ++ ++ return addr; ++} ++ ++static inline struct rmap_item *get_entry_item(struct rmap_list_entry *entry) ++{ ++ if (is_addr(entry->addr)) ++ return NULL; ++ ++ return entry->item; ++} ++ ++static inline void inc_rmap_list_pool_count(struct vma_slot *slot, ++ unsigned long index) ++{ ++ unsigned long pool_index; ++ ++ pool_index = get_pool_index(slot, index); ++ BUG_ON(!slot->rmap_list_pool[pool_index]); ++ slot->pool_counts[pool_index]++; ++} ++ ++static inline void dec_rmap_list_pool_count(struct vma_slot *slot, ++ unsigned long index) ++{ ++ unsigned long pool_index; ++ ++ pool_index = get_pool_index(slot, index); ++ BUG_ON(!slot->rmap_list_pool[pool_index]); ++ BUG_ON(!slot->pool_counts[pool_index]); ++ slot->pool_counts[pool_index]--; ++} ++ ++static inline int entry_has_rmap(struct rmap_list_entry *entry) ++{ ++ return !is_addr(entry->addr) && entry->item; ++} ++ ++static inline void swap_entries(struct rmap_list_entry *entry1, ++ unsigned long index1, ++ struct rmap_list_entry *entry2, ++ unsigned long index2) ++{ ++ struct rmap_list_entry tmp; ++ ++ /* swapping two new entries is meaningless */ ++ BUG_ON(entry_is_new(entry1) && entry_is_new(entry2)); ++ ++ tmp = *entry1; ++ *entry1 = *entry2; ++ *entry2 = tmp; ++ ++ if (entry_has_rmap(entry1)) ++ entry1->item->entry_index = index1; ++ ++ if (entry_has_rmap(entry2)) ++ entry2->item->entry_index = index2; ++ ++ if (entry_has_rmap(entry1) && !entry_has_rmap(entry2)) { ++ inc_rmap_list_pool_count(entry1->item->slot, index1); ++ dec_rmap_list_pool_count(entry1->item->slot, index2); ++ } else if (!entry_has_rmap(entry1) && entry_has_rmap(entry2)) { ++ inc_rmap_list_pool_count(entry2->item->slot, index2); ++ dec_rmap_list_pool_count(entry2->item->slot, index1); ++ } ++} ++ ++static inline void free_entry_item(struct rmap_list_entry *entry) ++{ ++ unsigned long index; ++ struct rmap_item *item; ++ ++ if (!is_addr(entry->addr)) { ++ BUG_ON(!entry->item); ++ item = entry->item; ++ entry->addr = get_rmap_addr(item); ++ set_is_addr(entry->addr); ++ index = item->entry_index; ++ remove_rmap_item_from_tree(item); ++ dec_rmap_list_pool_count(item->slot, index); ++ free_rmap_item(item); ++ } ++} ++ ++static inline int pool_entry_boundary(unsigned long index) ++{ ++ unsigned long linear_addr; ++ ++ linear_addr = sizeof(struct rmap_list_entry *) * index; ++ return index && !offset_in_page(linear_addr); ++} ++ ++static inline void try_free_last_pool(struct vma_slot *slot, ++ unsigned long index) ++{ ++ unsigned long pool_index; ++ ++ pool_index = get_pool_index(slot, index); ++ if (slot->rmap_list_pool[pool_index] && ++ !slot->pool_counts[pool_index]) { ++ __free_page(slot->rmap_list_pool[pool_index]); ++ slot->rmap_list_pool[pool_index] = NULL; ++ slot->flags |= UKSM_SLOT_NEED_SORT; ++ } ++ ++} ++ ++static inline unsigned long vma_item_index(struct vm_area_struct *vma, ++ struct rmap_item *item) ++{ ++ return (get_rmap_addr(item) - vma->vm_start) >> PAGE_SHIFT; ++} ++ ++static int within_same_pool(struct vma_slot *slot, ++ unsigned long i, unsigned long j) ++{ ++ unsigned long pool_i, pool_j; ++ ++ pool_i = get_pool_index(slot, i); ++ pool_j = get_pool_index(slot, j); ++ ++ return (pool_i == pool_j); ++} ++ ++static void sort_rmap_entry_list(struct vma_slot *slot) ++{ ++ unsigned long i, j; ++ struct rmap_list_entry *entry, *swap_entry; ++ ++ entry = get_rmap_list_entry(slot, 0, 0); ++ for (i = 0; i < slot->pages; ) { ++ ++ if (!entry) ++ goto skip_whole_pool; ++ ++ if (entry_is_new(entry)) ++ goto next_entry; ++ ++ if (is_addr(entry->addr)) { ++ entry->addr = 0; ++ goto next_entry; ++ } ++ ++ j = vma_item_index(slot->vma, entry->item); ++ if (j == i) ++ goto next_entry; ++ ++ if (within_same_pool(slot, i, j)) ++ swap_entry = entry + j - i; ++ else ++ swap_entry = get_rmap_list_entry(slot, j, 1); ++ ++ swap_entries(entry, i, swap_entry, j); ++ if (!within_same_pool(slot, i, j)) ++ put_rmap_list_entry(slot, j); ++ continue; ++ ++skip_whole_pool: ++ i += PAGE_SIZE / sizeof(*entry); ++ if (i < slot->pages) ++ entry = get_rmap_list_entry(slot, i, 0); ++ continue; ++ ++next_entry: ++ if (i >= slot->pages - 1 || ++ !within_same_pool(slot, i, i + 1)) { ++ put_rmap_list_entry(slot, i); ++ if (i + 1 < slot->pages) ++ entry = get_rmap_list_entry(slot, i + 1, 0); ++ } else ++ entry++; ++ i++; ++ continue; ++ } ++ ++ /* free empty pool entries which contain no rmap_item */ ++ /* CAN be simplied to based on only pool_counts when bug freed !!!!! */ ++ for (i = 0; i < slot->pool_size; i++) { ++ unsigned char has_rmap; ++ void *addr; ++ ++ if (!slot->rmap_list_pool[i]) ++ continue; ++ ++ has_rmap = 0; ++ addr = kmap(slot->rmap_list_pool[i]); ++ BUG_ON(!addr); ++ for (j = 0; j < PAGE_SIZE / sizeof(*entry); j++) { ++ entry = (struct rmap_list_entry *)addr + j; ++ if (is_addr(entry->addr)) ++ continue; ++ if (!entry->item) ++ continue; ++ has_rmap = 1; ++ } ++ kunmap(slot->rmap_list_pool[i]); ++ if (!has_rmap) { ++ BUG_ON(slot->pool_counts[i]); ++ __free_page(slot->rmap_list_pool[i]); ++ slot->rmap_list_pool[i] = NULL; ++ } ++ } ++ ++ slot->flags &= ~UKSM_SLOT_NEED_SORT; ++} ++ ++/* ++ * vma_fully_scanned() - if all the pages in this slot have been scanned. ++ */ ++static inline int vma_fully_scanned(struct vma_slot *slot) ++{ ++ return slot->pages_scanned == slot->pages; ++} ++ ++/** ++ * get_next_rmap_item() - Get the next rmap_item in a vma_slot according to ++ * its random permutation. This function is embedded with the random ++ * permutation index management code. ++ */ ++static struct rmap_item *get_next_rmap_item(struct vma_slot *slot, u32 *hash) ++{ ++ unsigned long rand_range, addr, swap_index, scan_index; ++ struct rmap_item *item = NULL; ++ struct rmap_list_entry *scan_entry, *swap_entry = NULL; ++ struct page *page; ++ ++ scan_index = swap_index = slot->pages_scanned % slot->pages; ++ ++ if (pool_entry_boundary(scan_index)) ++ try_free_last_pool(slot, scan_index - 1); ++ ++ if (vma_fully_scanned(slot)) { ++ if (slot->flags & UKSM_SLOT_NEED_SORT) ++ slot->flags |= UKSM_SLOT_NEED_RERAND; ++ else ++ slot->flags &= ~UKSM_SLOT_NEED_RERAND; ++ if (slot->flags & UKSM_SLOT_NEED_SORT) ++ sort_rmap_entry_list(slot); ++ } ++ ++ scan_entry = get_rmap_list_entry(slot, scan_index, 1); ++ if (!scan_entry) ++ return NULL; ++ ++ if (entry_is_new(scan_entry)) { ++ scan_entry->addr = get_index_orig_addr(slot, scan_index); ++ set_is_addr(scan_entry->addr); ++ } ++ ++ if (slot->flags & UKSM_SLOT_NEED_RERAND) { ++ rand_range = slot->pages - scan_index; ++ BUG_ON(!rand_range); ++ swap_index = scan_index + (prandom_u32() % rand_range); ++ } ++ ++ if (swap_index != scan_index) { ++ swap_entry = get_rmap_list_entry(slot, swap_index, 1); ++ if (entry_is_new(swap_entry)) { ++ swap_entry->addr = get_index_orig_addr(slot, ++ swap_index); ++ set_is_addr(swap_entry->addr); ++ } ++ swap_entries(scan_entry, scan_index, swap_entry, swap_index); ++ } ++ ++ addr = get_entry_address(scan_entry); ++ item = get_entry_item(scan_entry); ++ BUG_ON(addr > slot->vma->vm_end || addr < slot->vma->vm_start); ++ ++ page = follow_page(slot->vma, addr, FOLL_GET); ++ if (IS_ERR_OR_NULL(page)) ++ goto nopage; ++ ++ if (!PageAnon(page)) ++ goto putpage; ++ ++ /*check is zero_page pfn or uksm_zero_page*/ ++ if ((page_to_pfn(page) == zero_pfn) ++ || (page_to_pfn(page) == uksm_zero_pfn)) ++ goto putpage; ++ ++ flush_anon_page(slot->vma, page, addr); ++ flush_dcache_page(page); ++ ++ ++ *hash = page_hash(page, hash_strength, 1); ++ inc_uksm_pages_scanned(); ++ /*if the page content all zero, re-map to zero-page*/ ++ if (find_zero_page_hash(hash_strength, *hash)) { ++ if (!cmp_and_merge_zero_page(slot->vma, page)) { ++ slot->pages_merged++; ++ ++ /* For full-zero pages, no need to create rmap item */ ++ goto putpage; ++ } else { ++ inc_rshash_neg(memcmp_cost / 2); ++ } ++ } ++ ++ if (!item) { ++ item = alloc_rmap_item(); ++ if (item) { ++ /* It has already been zeroed */ ++ item->slot = slot; ++ item->address = addr; ++ item->entry_index = scan_index; ++ scan_entry->item = item; ++ inc_rmap_list_pool_count(slot, scan_index); ++ } else ++ goto putpage; ++ } ++ ++ BUG_ON(item->slot != slot); ++ /* the page may have changed */ ++ item->page = page; ++ put_rmap_list_entry(slot, scan_index); ++ if (swap_entry) ++ put_rmap_list_entry(slot, swap_index); ++ return item; ++ ++putpage: ++ put_page(page); ++ page = NULL; ++nopage: ++ /* no page, store addr back and free rmap_item if possible */ ++ free_entry_item(scan_entry); ++ put_rmap_list_entry(slot, scan_index); ++ if (swap_entry) ++ put_rmap_list_entry(slot, swap_index); ++ return NULL; ++} ++ ++static inline int in_stable_tree(struct rmap_item *rmap_item) ++{ ++ return rmap_item->address & STABLE_FLAG; ++} ++ ++/** ++ * scan_vma_one_page() - scan the next page in a vma_slot. Called with ++ * mmap_sem locked. ++ */ ++static noinline void scan_vma_one_page(struct vma_slot *slot) ++{ ++ u32 hash; ++ struct mm_struct *mm; ++ struct rmap_item *rmap_item = NULL; ++ struct vm_area_struct *vma = slot->vma; ++ ++ mm = vma->vm_mm; ++ BUG_ON(!mm); ++ BUG_ON(!slot); ++ ++ rmap_item = get_next_rmap_item(slot, &hash); ++ if (!rmap_item) ++ goto out1; ++ ++ if (PageKsm(rmap_item->page) && in_stable_tree(rmap_item)) ++ goto out2; ++ ++ cmp_and_merge_page(rmap_item, hash); ++out2: ++ put_page(rmap_item->page); ++out1: ++ slot->pages_scanned++; ++ slot->this_sampled++; ++ if (slot->fully_scanned_round != fully_scanned_round) ++ scanned_virtual_pages++; ++ ++ if (vma_fully_scanned(slot)) ++ slot->fully_scanned_round = fully_scanned_round; ++} ++ ++static inline unsigned long rung_get_pages(struct scan_rung *rung) ++{ ++ struct slot_tree_node *node; ++ ++ if (!rung->vma_root.rnode) ++ return 0; ++ ++ node = container_of(rung->vma_root.rnode, struct slot_tree_node, snode); ++ ++ return node->size; ++} ++ ++#define RUNG_SAMPLED_MIN 3 ++ ++static inline ++void uksm_calc_rung_step(struct scan_rung *rung, ++ unsigned long page_time, unsigned long ratio) ++{ ++ unsigned long sampled, pages; ++ ++ /* will be fully scanned ? */ ++ if (!rung->cover_msecs) { ++ rung->step = 1; ++ return; ++ } ++ ++ sampled = rung->cover_msecs * (NSEC_PER_MSEC / TIME_RATIO_SCALE) ++ * ratio / page_time; ++ ++ /* ++ * Before we finsish a scan round and expensive per-round jobs, ++ * we need to have a chance to estimate the per page time. So ++ * the sampled number can not be too small. ++ */ ++ if (sampled < RUNG_SAMPLED_MIN) ++ sampled = RUNG_SAMPLED_MIN; ++ ++ pages = rung_get_pages(rung); ++ if (likely(pages > sampled)) ++ rung->step = pages / sampled; ++ else ++ rung->step = 1; ++} ++ ++static inline int step_need_recalc(struct scan_rung *rung) ++{ ++ unsigned long pages, stepmax; ++ ++ pages = rung_get_pages(rung); ++ stepmax = pages / RUNG_SAMPLED_MIN; ++ ++ return pages && (rung->step > pages || ++ (stepmax && rung->step > stepmax)); ++} ++ ++static inline ++void reset_current_scan(struct scan_rung *rung, int finished, int step_recalc) ++{ ++ struct vma_slot *slot; ++ ++ if (finished) ++ rung->flags |= UKSM_RUNG_ROUND_FINISHED; ++ ++ if (step_recalc || step_need_recalc(rung)) { ++ uksm_calc_rung_step(rung, uksm_ema_page_time, rung->cpu_ratio); ++ BUG_ON(step_need_recalc(rung)); ++ } ++ ++ slot_iter_index = prandom_u32() % rung->step; ++ BUG_ON(!rung->vma_root.rnode); ++ slot = sradix_tree_next(&rung->vma_root, NULL, 0, slot_iter); ++ BUG_ON(!slot); ++ ++ rung->current_scan = slot; ++ rung->current_offset = slot_iter_index; ++} ++ ++static inline struct sradix_tree_root *slot_get_root(struct vma_slot *slot) ++{ ++ return &slot->rung->vma_root; ++} ++ ++/* ++ * return if resetted. ++ */ ++static int advance_current_scan(struct scan_rung *rung) ++{ ++ unsigned short n; ++ struct vma_slot *slot, *next = NULL; ++ ++ BUG_ON(!rung->vma_root.num); ++ ++ slot = rung->current_scan; ++ n = (slot->pages - rung->current_offset) % rung->step; ++ slot_iter_index = rung->step - n; ++ next = sradix_tree_next(&rung->vma_root, slot->snode, ++ slot->sindex, slot_iter); ++ ++ if (next) { ++ rung->current_offset = slot_iter_index; ++ rung->current_scan = next; ++ return 0; ++ } else { ++ reset_current_scan(rung, 1, 0); ++ return 1; ++ } ++} ++ ++static inline void rung_rm_slot(struct vma_slot *slot) ++{ ++ struct scan_rung *rung = slot->rung; ++ struct sradix_tree_root *root; ++ ++ if (rung->current_scan == slot) ++ advance_current_scan(rung); ++ ++ root = slot_get_root(slot); ++ sradix_tree_delete_from_leaf(root, slot->snode, slot->sindex); ++ slot->snode = NULL; ++ if (step_need_recalc(rung)) { ++ uksm_calc_rung_step(rung, uksm_ema_page_time, rung->cpu_ratio); ++ BUG_ON(step_need_recalc(rung)); ++ } ++ ++ /* In case advance_current_scan loop back to this slot again */ ++ if (rung->vma_root.num && rung->current_scan == slot) ++ reset_current_scan(slot->rung, 1, 0); ++} ++ ++static inline void rung_add_new_slots(struct scan_rung *rung, ++ struct vma_slot **slots, unsigned long num) ++{ ++ int err; ++ struct vma_slot *slot; ++ unsigned long i; ++ struct sradix_tree_root *root = &rung->vma_root; ++ ++ err = sradix_tree_enter(root, (void **)slots, num); ++ BUG_ON(err); ++ ++ for (i = 0; i < num; i++) { ++ slot = slots[i]; ++ slot->rung = rung; ++ BUG_ON(vma_fully_scanned(slot)); ++ } ++ ++ if (rung->vma_root.num == num) ++ reset_current_scan(rung, 0, 1); ++} ++ ++static inline int rung_add_one_slot(struct scan_rung *rung, ++ struct vma_slot *slot) ++{ ++ int err; ++ ++ err = sradix_tree_enter(&rung->vma_root, (void **)&slot, 1); ++ if (err) ++ return err; ++ ++ slot->rung = rung; ++ if (rung->vma_root.num == 1) ++ reset_current_scan(rung, 0, 1); ++ ++ return 0; ++} ++ ++/* ++ * Return true if the slot is deleted from its rung. ++ */ ++static inline int vma_rung_enter(struct vma_slot *slot, struct scan_rung *rung) ++{ ++ struct scan_rung *old_rung = slot->rung; ++ int err; ++ ++ if (old_rung == rung) ++ return 0; ++ ++ rung_rm_slot(slot); ++ err = rung_add_one_slot(rung, slot); ++ if (err) { ++ err = rung_add_one_slot(old_rung, slot); ++ WARN_ON(err); /* OOPS, badly OOM, we lost this slot */ ++ } ++ ++ return 1; ++} ++ ++static inline int vma_rung_up(struct vma_slot *slot) ++{ ++ struct scan_rung *rung; ++ ++ rung = slot->rung; ++ if (slot->rung != &uksm_scan_ladder[SCAN_LADDER_SIZE-1]) ++ rung++; ++ ++ return vma_rung_enter(slot, rung); ++} ++ ++static inline int vma_rung_down(struct vma_slot *slot) ++{ ++ struct scan_rung *rung; ++ ++ rung = slot->rung; ++ if (slot->rung != &uksm_scan_ladder[0]) ++ rung--; ++ ++ return vma_rung_enter(slot, rung); ++} ++ ++/** ++ * cal_dedup_ratio() - Calculate the deduplication ratio for this slot. ++ */ ++static unsigned long cal_dedup_ratio(struct vma_slot *slot) ++{ ++ unsigned long ret; ++ unsigned long pages; ++ ++ pages = slot->this_sampled; ++ if (!pages) ++ return 0; ++ ++ BUG_ON(slot->pages_scanned == slot->last_scanned); ++ ++ ret = slot->pages_merged; ++ ++ /* Thrashing area filtering */ ++ if (ret && uksm_thrash_threshold) { ++ if (slot->pages_cowed * 100 / slot->pages_merged ++ > uksm_thrash_threshold) { ++ ret = 0; ++ } else { ++ ret = slot->pages_merged - slot->pages_cowed; ++ } ++ } ++ ++ return ret * 100 / pages; ++} ++ ++/** ++ * cal_dedup_ratio() - Calculate the deduplication ratio for this slot. ++ */ ++static unsigned long cal_dedup_ratio_old(struct vma_slot *slot) ++{ ++ unsigned long ret; ++ unsigned long pages; ++ ++ pages = slot->pages; ++ if (!pages) ++ return 0; ++ ++ ret = slot->pages_bemerged; ++ ++ /* Thrashing area filtering */ ++ if (ret && uksm_thrash_threshold) { ++ if (slot->pages_cowed * 100 / slot->pages_bemerged ++ > uksm_thrash_threshold) { ++ ret = 0; ++ } else { ++ ret = slot->pages_bemerged - slot->pages_cowed; ++ } ++ } ++ ++ return ret * 100 / pages; ++} ++ ++/** ++ * stable_node_reinsert() - When the hash_strength has been adjusted, the ++ * stable tree need to be restructured, this is the function re-inserting the ++ * stable node. ++ */ ++static inline void stable_node_reinsert(struct stable_node *new_node, ++ struct page *page, ++ struct rb_root *root_treep, ++ struct list_head *tree_node_listp, ++ u32 hash) ++{ ++ struct rb_node **new = &root_treep->rb_node; ++ struct rb_node *parent = NULL; ++ struct stable_node *stable_node; ++ struct tree_node *tree_node; ++ struct page *tree_page; ++ int cmp; ++ ++ while (*new) { ++ int cmp; ++ ++ tree_node = rb_entry(*new, struct tree_node, node); ++ ++ cmp = hash_cmp(hash, tree_node->hash); ++ ++ if (cmp < 0) { ++ parent = *new; ++ new = &parent->rb_left; ++ } else if (cmp > 0) { ++ parent = *new; ++ new = &parent->rb_right; ++ } else ++ break; ++ } ++ ++ if (*new) { ++ /* find a stable tree node with same first level hash value */ ++ stable_node_hash_max(new_node, page, hash); ++ if (tree_node->count == 1) { ++ stable_node = rb_entry(tree_node->sub_root.rb_node, ++ struct stable_node, node); ++ tree_page = get_uksm_page(stable_node, 1, 0); ++ if (tree_page) { ++ stable_node_hash_max(stable_node, ++ tree_page, hash); ++ put_page(tree_page); ++ ++ /* prepare for stable node insertion */ ++ ++ cmp = hash_cmp(new_node->hash_max, ++ stable_node->hash_max); ++ parent = &stable_node->node; ++ if (cmp < 0) ++ new = &parent->rb_left; ++ else if (cmp > 0) ++ new = &parent->rb_right; ++ else ++ goto failed; ++ ++ goto add_node; ++ } else { ++ /* the only stable_node deleted, the tree node ++ * was not deleted. ++ */ ++ goto tree_node_reuse; ++ } ++ } ++ ++ /* well, search the collision subtree */ ++ new = &tree_node->sub_root.rb_node; ++ parent = NULL; ++ BUG_ON(!*new); ++ while (*new) { ++ int cmp; ++ ++ stable_node = rb_entry(*new, struct stable_node, node); ++ ++ cmp = hash_cmp(new_node->hash_max, ++ stable_node->hash_max); ++ ++ if (cmp < 0) { ++ parent = *new; ++ new = &parent->rb_left; ++ } else if (cmp > 0) { ++ parent = *new; ++ new = &parent->rb_right; ++ } else { ++ /* oh, no, still a collision */ ++ goto failed; ++ } ++ } ++ ++ goto add_node; ++ } ++ ++ /* no tree node found */ ++ tree_node = alloc_tree_node(tree_node_listp); ++ if (!tree_node) { ++ printk(KERN_ERR "UKSM: memory allocation error!\n"); ++ goto failed; ++ } else { ++ tree_node->hash = hash; ++ rb_link_node(&tree_node->node, parent, new); ++ rb_insert_color(&tree_node->node, root_treep); ++ ++tree_node_reuse: ++ /* prepare for stable node insertion */ ++ parent = NULL; ++ new = &tree_node->sub_root.rb_node; ++ } ++ ++add_node: ++ rb_link_node(&new_node->node, parent, new); ++ rb_insert_color(&new_node->node, &tree_node->sub_root); ++ new_node->tree_node = tree_node; ++ tree_node->count++; ++ return; ++ ++failed: ++ /* This can only happen when two nodes have collided ++ * in two levels. ++ */ ++ new_node->tree_node = NULL; ++ return; ++} ++ ++static inline void free_all_tree_nodes(struct list_head *list) ++{ ++ struct tree_node *node, *tmp; ++ ++ list_for_each_entry_safe(node, tmp, list, all_list) { ++ free_tree_node(node); ++ } ++} ++ ++/** ++ * stable_tree_delta_hash() - Delta hash the stable tree from previous hash ++ * strength to the current hash_strength. It re-structures the hole tree. ++ */ ++static inline void stable_tree_delta_hash(u32 prev_hash_strength) ++{ ++ struct stable_node *node, *tmp; ++ struct rb_root *root_new_treep; ++ struct list_head *new_tree_node_listp; ++ ++ stable_tree_index = (stable_tree_index + 1) % 2; ++ root_new_treep = &root_stable_tree[stable_tree_index]; ++ new_tree_node_listp = &stable_tree_node_list[stable_tree_index]; ++ *root_new_treep = RB_ROOT; ++ BUG_ON(!list_empty(new_tree_node_listp)); ++ ++ /* ++ * we need to be safe, the node could be removed by get_uksm_page() ++ */ ++ list_for_each_entry_safe(node, tmp, &stable_node_list, all_list) { ++ void *addr; ++ struct page *node_page; ++ u32 hash; ++ ++ /* ++ * We are completely re-structuring the stable nodes to a new ++ * stable tree. We don't want to touch the old tree unlinks and ++ * old tree_nodes. The old tree_nodes will be freed at once. ++ */ ++ node_page = get_uksm_page(node, 0, 0); ++ if (!node_page) ++ continue; ++ ++ if (node->tree_node) { ++ hash = node->tree_node->hash; ++ ++ addr = kmap_atomic(node_page); ++ ++ hash = delta_hash(addr, prev_hash_strength, ++ hash_strength, hash); ++ kunmap_atomic(addr); ++ } else { ++ /* ++ *it was not inserted to rbtree due to collision in last ++ *round scan. ++ */ ++ hash = page_hash(node_page, hash_strength, 0); ++ } ++ ++ stable_node_reinsert(node, node_page, root_new_treep, ++ new_tree_node_listp, hash); ++ put_page(node_page); ++ } ++ ++ root_stable_treep = root_new_treep; ++ free_all_tree_nodes(stable_tree_node_listp); ++ BUG_ON(!list_empty(stable_tree_node_listp)); ++ stable_tree_node_listp = new_tree_node_listp; ++} ++ ++static inline void inc_hash_strength(unsigned long delta) ++{ ++ hash_strength += 1 << delta; ++ if (hash_strength > HASH_STRENGTH_MAX) ++ hash_strength = HASH_STRENGTH_MAX; ++} ++ ++static inline void dec_hash_strength(unsigned long delta) ++{ ++ unsigned long change = 1 << delta; ++ ++ if (hash_strength <= change + 1) ++ hash_strength = 1; ++ else ++ hash_strength -= change; ++} ++ ++static inline void inc_hash_strength_delta(void) ++{ ++ hash_strength_delta++; ++ if (hash_strength_delta > HASH_STRENGTH_DELTA_MAX) ++ hash_strength_delta = HASH_STRENGTH_DELTA_MAX; ++} ++ ++/* ++static inline unsigned long get_current_neg_ratio(void) ++{ ++ if (!rshash_pos || rshash_neg > rshash_pos) ++ return 100; ++ ++ return div64_u64(100 * rshash_neg , rshash_pos); ++} ++*/ ++ ++static inline unsigned long get_current_neg_ratio(void) ++{ ++ u64 pos = benefit.pos; ++ u64 neg = benefit.neg; ++ ++ if (!neg) ++ return 0; ++ ++ if (!pos || neg > pos) ++ return 100; ++ ++ if (neg > div64_u64(U64_MAX, 100)) ++ pos = div64_u64(pos, 100); ++ else ++ neg *= 100; ++ ++ return div64_u64(neg, pos); ++} ++ ++static inline unsigned long get_current_benefit(void) ++{ ++ u64 pos = benefit.pos; ++ u64 neg = benefit.neg; ++ u64 scanned = benefit.scanned; ++ ++ if (neg > pos) ++ return 0; ++ ++ return div64_u64((pos - neg), scanned); ++} ++ ++static inline int judge_rshash_direction(void) ++{ ++ u64 current_neg_ratio, stable_benefit; ++ u64 current_benefit, delta = 0; ++ int ret = STILL; ++ ++ /* Try to probe a value after the boot, and in case the system ++ are still for a long time. */ ++ if ((fully_scanned_round & 0xFFULL) == 10) { ++ ret = OBSCURE; ++ goto out; ++ } ++ ++ current_neg_ratio = get_current_neg_ratio(); ++ ++ if (current_neg_ratio == 0) { ++ rshash_neg_cont_zero++; ++ if (rshash_neg_cont_zero > 2) ++ return GO_DOWN; ++ else ++ return STILL; ++ } ++ rshash_neg_cont_zero = 0; ++ ++ if (current_neg_ratio > 90) { ++ ret = GO_UP; ++ goto out; ++ } ++ ++ current_benefit = get_current_benefit(); ++ stable_benefit = rshash_state.stable_benefit; ++ ++ if (!stable_benefit) { ++ ret = OBSCURE; ++ goto out; ++ } ++ ++ if (current_benefit > stable_benefit) ++ delta = current_benefit - stable_benefit; ++ else if (current_benefit < stable_benefit) ++ delta = stable_benefit - current_benefit; ++ ++ delta = div64_u64(100 * delta , stable_benefit); ++ ++ if (delta > 50) { ++ rshash_cont_obscure++; ++ if (rshash_cont_obscure > 2) ++ return OBSCURE; ++ else ++ return STILL; ++ } ++ ++out: ++ rshash_cont_obscure = 0; ++ return ret; ++} ++ ++/** ++ * rshash_adjust() - The main function to control the random sampling state ++ * machine for hash strength adapting. ++ * ++ * return true if hash_strength has changed. ++ */ ++static inline int rshash_adjust(void) ++{ ++ unsigned long prev_hash_strength = hash_strength; ++ ++ if (!encode_benefit()) ++ return 0; ++ ++ switch (rshash_state.state) { ++ case RSHASH_STILL: ++ switch (judge_rshash_direction()) { ++ case GO_UP: ++ if (rshash_state.pre_direct == GO_DOWN) ++ hash_strength_delta = 0; ++ ++ inc_hash_strength(hash_strength_delta); ++ inc_hash_strength_delta(); ++ rshash_state.stable_benefit = get_current_benefit(); ++ rshash_state.pre_direct = GO_UP; ++ break; ++ ++ case GO_DOWN: ++ if (rshash_state.pre_direct == GO_UP) ++ hash_strength_delta = 0; ++ ++ dec_hash_strength(hash_strength_delta); ++ inc_hash_strength_delta(); ++ rshash_state.stable_benefit = get_current_benefit(); ++ rshash_state.pre_direct = GO_DOWN; ++ break; ++ ++ case OBSCURE: ++ rshash_state.stable_point = hash_strength; ++ rshash_state.turn_point_down = hash_strength; ++ rshash_state.turn_point_up = hash_strength; ++ rshash_state.turn_benefit_down = get_current_benefit(); ++ rshash_state.turn_benefit_up = get_current_benefit(); ++ rshash_state.lookup_window_index = 0; ++ rshash_state.state = RSHASH_TRYDOWN; ++ dec_hash_strength(hash_strength_delta); ++ inc_hash_strength_delta(); ++ break; ++ ++ case STILL: ++ break; ++ default: ++ BUG(); ++ } ++ break; ++ ++ case RSHASH_TRYDOWN: ++ if (rshash_state.lookup_window_index++ % 5 == 0) ++ rshash_state.below_count = 0; ++ ++ if (get_current_benefit() < rshash_state.stable_benefit) ++ rshash_state.below_count++; ++ else if (get_current_benefit() > ++ rshash_state.turn_benefit_down) { ++ rshash_state.turn_point_down = hash_strength; ++ rshash_state.turn_benefit_down = get_current_benefit(); ++ } ++ ++ if (rshash_state.below_count >= 3 || ++ judge_rshash_direction() == GO_UP || ++ hash_strength == 1) { ++ hash_strength = rshash_state.stable_point; ++ hash_strength_delta = 0; ++ inc_hash_strength(hash_strength_delta); ++ inc_hash_strength_delta(); ++ rshash_state.lookup_window_index = 0; ++ rshash_state.state = RSHASH_TRYUP; ++ hash_strength_delta = 0; ++ } else { ++ dec_hash_strength(hash_strength_delta); ++ inc_hash_strength_delta(); ++ } ++ break; ++ ++ case RSHASH_TRYUP: ++ if (rshash_state.lookup_window_index++ % 5 == 0) ++ rshash_state.below_count = 0; ++ ++ if (get_current_benefit() < rshash_state.turn_benefit_down) ++ rshash_state.below_count++; ++ else if (get_current_benefit() > rshash_state.turn_benefit_up) { ++ rshash_state.turn_point_up = hash_strength; ++ rshash_state.turn_benefit_up = get_current_benefit(); ++ } ++ ++ if (rshash_state.below_count >= 3 || ++ judge_rshash_direction() == GO_DOWN || ++ hash_strength == HASH_STRENGTH_MAX) { ++ hash_strength = rshash_state.turn_benefit_up > ++ rshash_state.turn_benefit_down ? ++ rshash_state.turn_point_up : ++ rshash_state.turn_point_down; ++ ++ rshash_state.state = RSHASH_PRE_STILL; ++ } else { ++ inc_hash_strength(hash_strength_delta); ++ inc_hash_strength_delta(); ++ } ++ ++ break; ++ ++ case RSHASH_NEW: ++ case RSHASH_PRE_STILL: ++ rshash_state.stable_benefit = get_current_benefit(); ++ rshash_state.state = RSHASH_STILL; ++ hash_strength_delta = 0; ++ break; ++ default: ++ BUG(); ++ } ++ ++ /* rshash_neg = rshash_pos = 0; */ ++ reset_benefit(); ++ ++ if (prev_hash_strength != hash_strength) ++ stable_tree_delta_hash(prev_hash_strength); ++ ++ return prev_hash_strength != hash_strength; ++} ++ ++/** ++ * round_update_ladder() - The main function to do update of all the ++ * adjustments whenever a scan round is finished. ++ */ ++static noinline void round_update_ladder(void) ++{ ++ int i; ++ unsigned long dedup; ++ struct vma_slot *slot, *tmp_slot; ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ uksm_scan_ladder[i].flags &= ~UKSM_RUNG_ROUND_FINISHED; ++ } ++ ++ list_for_each_entry_safe(slot, tmp_slot, &vma_slot_dedup, dedup_list) { ++ ++ /* slot may be rung_rm_slot() when mm exits */ ++ if (slot->snode) { ++ dedup = cal_dedup_ratio_old(slot); ++ if (dedup && dedup >= uksm_abundant_threshold) ++ vma_rung_up(slot); ++ } ++ ++ slot->pages_bemerged = 0; ++ slot->pages_cowed = 0; ++ ++ list_del_init(&slot->dedup_list); ++ } ++} ++ ++static void uksm_del_vma_slot(struct vma_slot *slot) ++{ ++ int i, j; ++ struct rmap_list_entry *entry; ++ ++ if (slot->snode) { ++ /* ++ * In case it just failed when entering the rung, it's not ++ * necessary. ++ */ ++ rung_rm_slot(slot); ++ } ++ ++ if (!list_empty(&slot->dedup_list)) ++ list_del(&slot->dedup_list); ++ ++ if (!slot->rmap_list_pool || !slot->pool_counts) { ++ /* In case it OOMed in uksm_vma_enter() */ ++ goto out; ++ } ++ ++ for (i = 0; i < slot->pool_size; i++) { ++ void *addr; ++ ++ if (!slot->rmap_list_pool[i]) ++ continue; ++ ++ addr = kmap(slot->rmap_list_pool[i]); ++ for (j = 0; j < PAGE_SIZE / sizeof(*entry); j++) { ++ entry = (struct rmap_list_entry *)addr + j; ++ if (is_addr(entry->addr)) ++ continue; ++ if (!entry->item) ++ continue; ++ ++ remove_rmap_item_from_tree(entry->item); ++ free_rmap_item(entry->item); ++ slot->pool_counts[i]--; ++ } ++ BUG_ON(slot->pool_counts[i]); ++ kunmap(slot->rmap_list_pool[i]); ++ __free_page(slot->rmap_list_pool[i]); ++ } ++ kfree(slot->rmap_list_pool); ++ kfree(slot->pool_counts); ++ ++out: ++ slot->rung = NULL; ++ if (slot->flags & UKSM_SLOT_IN_UKSM) { ++ BUG_ON(uksm_pages_total < slot->pages); ++ uksm_pages_total -= slot->pages; ++ } ++ ++ if (slot->fully_scanned_round == fully_scanned_round) ++ scanned_virtual_pages -= slot->pages; ++ else ++ scanned_virtual_pages -= slot->pages_scanned; ++ free_vma_slot(slot); ++} ++ ++ ++#define SPIN_LOCK_PERIOD 32 ++static struct vma_slot *cleanup_slots[SPIN_LOCK_PERIOD]; ++static inline void cleanup_vma_slots(void) ++{ ++ struct vma_slot *slot; ++ int i; ++ ++ i = 0; ++ spin_lock(&vma_slot_list_lock); ++ while (!list_empty(&vma_slot_del)) { ++ slot = list_entry(vma_slot_del.next, ++ struct vma_slot, slot_list); ++ list_del(&slot->slot_list); ++ cleanup_slots[i++] = slot; ++ if (i == SPIN_LOCK_PERIOD) { ++ spin_unlock(&vma_slot_list_lock); ++ while (--i >= 0) ++ uksm_del_vma_slot(cleanup_slots[i]); ++ i = 0; ++ spin_lock(&vma_slot_list_lock); ++ } ++ } ++ spin_unlock(&vma_slot_list_lock); ++ ++ while (--i >= 0) ++ uksm_del_vma_slot(cleanup_slots[i]); ++} ++ ++/* ++*expotional moving average formula ++*/ ++static inline unsigned long ema(unsigned long curr, unsigned long last_ema) ++{ ++ /* ++ * For a very high burst, even the ema cannot work well, a false very ++ * high per-page time estimation can result in feedback in very high ++ * overhead of context swith and rung update -- this will then lead ++ * to higher per-paper time, this may not converge. ++ * ++ * Instead, we try to approach this value in a binary manner. ++ */ ++ if (curr > last_ema * 10) ++ return last_ema * 2; ++ ++ return (EMA_ALPHA * curr + (100 - EMA_ALPHA) * last_ema) / 100; ++} ++ ++/* ++ * convert cpu ratio in 1/TIME_RATIO_SCALE configured by user to ++ * nanoseconds based on current uksm_sleep_jiffies. ++ */ ++static inline unsigned long cpu_ratio_to_nsec(unsigned int ratio) ++{ ++ return NSEC_PER_USEC * jiffies_to_usecs(uksm_sleep_jiffies) / ++ (TIME_RATIO_SCALE - ratio) * ratio; ++} ++ ++ ++static inline unsigned long rung_real_ratio(int cpu_time_ratio) ++{ ++ unsigned long ret; ++ ++ BUG_ON(!cpu_time_ratio); ++ ++ if (cpu_time_ratio > 0) ++ ret = cpu_time_ratio; ++ else ++ ret = (unsigned long)(-cpu_time_ratio) * ++ uksm_max_cpu_percentage / 100UL; ++ ++ return ret ? ret : 1; ++} ++ ++static noinline void uksm_calc_scan_pages(void) ++{ ++ struct scan_rung *ladder = uksm_scan_ladder; ++ unsigned long sleep_usecs, nsecs; ++ unsigned long ratio; ++ int i; ++ unsigned long per_page; ++ ++ if (uksm_ema_page_time > 100000 || ++ (((unsigned long) uksm_eval_round & (256UL - 1)) == 0UL)) ++ uksm_ema_page_time = UKSM_PAGE_TIME_DEFAULT; ++ ++ per_page = uksm_ema_page_time; ++ BUG_ON(!per_page); ++ ++ /* ++ * For every 8 eval round, we try to probe a uksm_sleep_jiffies value ++ * based on saved user input. ++ */ ++ if (((unsigned long) uksm_eval_round & (8UL - 1)) == 0UL) ++ uksm_sleep_jiffies = uksm_sleep_saved; ++ ++ /* We require a rung scan at least 1 page in a period. */ ++ nsecs = per_page; ++ ratio = rung_real_ratio(ladder[0].cpu_ratio); ++ if (cpu_ratio_to_nsec(ratio) < nsecs) { ++ sleep_usecs = nsecs * (TIME_RATIO_SCALE - ratio) / ratio ++ / NSEC_PER_USEC; ++ uksm_sleep_jiffies = usecs_to_jiffies(sleep_usecs) + 1; ++ } ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ ratio = rung_real_ratio(ladder[i].cpu_ratio); ++ ladder[i].pages_to_scan = cpu_ratio_to_nsec(ratio) / ++ per_page; ++ BUG_ON(!ladder[i].pages_to_scan); ++ uksm_calc_rung_step(&ladder[i], per_page, ratio); ++ } ++} ++ ++/* ++ * From the scan time of this round (ns) to next expected min sleep time ++ * (ms), be careful of the possible overflows. ratio is taken from ++ * rung_real_ratio() ++ */ ++static inline ++unsigned int scan_time_to_sleep(unsigned long long scan_time, unsigned long ratio) ++{ ++ scan_time >>= 20; /* to msec level now */ ++ BUG_ON(scan_time > (ULONG_MAX / TIME_RATIO_SCALE)); ++ ++ return (unsigned int) ((unsigned long) scan_time * ++ (TIME_RATIO_SCALE - ratio) / ratio); ++} ++ ++#define __round_mask(x, y) ((__typeof__(x))((y)-1)) ++#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1) ++ ++static void uksm_vma_enter(struct vma_slot **slots, unsigned long num) ++{ ++ struct scan_rung *rung; ++ ++ rung = &uksm_scan_ladder[0]; ++ rung_add_new_slots(rung, slots, num); ++} ++ ++static struct vma_slot *batch_slots[SLOT_TREE_NODE_STORE_SIZE]; ++ ++static void uksm_enter_all_slots(void) ++{ ++ struct vma_slot *slot; ++ unsigned long index; ++ struct list_head empty_vma_list; ++ int i; ++ ++ i = 0; ++ index = 0; ++ INIT_LIST_HEAD(&empty_vma_list); ++ ++ spin_lock(&vma_slot_list_lock); ++ while (!list_empty(&vma_slot_new)) { ++ slot = list_entry(vma_slot_new.next, ++ struct vma_slot, slot_list); ++ ++ if (!slot->vma->anon_vma) { ++ list_move(&slot->slot_list, &empty_vma_list); ++ } else if (vma_can_enter(slot->vma)) { ++ batch_slots[index++] = slot; ++ list_del_init(&slot->slot_list); ++ } else { ++ list_move(&slot->slot_list, &vma_slot_noadd); ++ } ++ ++ if (++i == SPIN_LOCK_PERIOD || ++ (index && !(index % SLOT_TREE_NODE_STORE_SIZE))) { ++ spin_unlock(&vma_slot_list_lock); ++ ++ if (index && !(index % SLOT_TREE_NODE_STORE_SIZE)) { ++ uksm_vma_enter(batch_slots, index); ++ index = 0; ++ } ++ i = 0; ++ cond_resched(); ++ spin_lock(&vma_slot_list_lock); ++ } ++ } ++ ++ list_splice(&empty_vma_list, &vma_slot_new); ++ ++ spin_unlock(&vma_slot_list_lock); ++ ++ if (index) ++ uksm_vma_enter(batch_slots, index); ++ ++} ++ ++static inline int rung_round_finished(struct scan_rung *rung) ++{ ++ return rung->flags & UKSM_RUNG_ROUND_FINISHED; ++} ++ ++static inline void judge_slot(struct vma_slot *slot) ++{ ++ struct scan_rung *rung = slot->rung; ++ unsigned long dedup; ++ int deleted; ++ ++ dedup = cal_dedup_ratio(slot); ++ if (vma_fully_scanned(slot) && uksm_thrash_threshold) ++ deleted = vma_rung_enter(slot, &uksm_scan_ladder[0]); ++ else if (dedup && dedup >= uksm_abundant_threshold) ++ deleted = vma_rung_up(slot); ++ else ++ deleted = vma_rung_down(slot); ++ ++ slot->pages_merged = 0; ++ slot->pages_cowed = 0; ++ slot->this_sampled = 0; ++ ++ if (vma_fully_scanned(slot)) { ++ slot->pages_scanned = 0; ++ } ++ ++ slot->last_scanned = slot->pages_scanned; ++ ++ /* If its deleted in above, then rung was already advanced. */ ++ if (!deleted) ++ advance_current_scan(rung); ++} ++ ++ ++static inline int hash_round_finished(void) ++{ ++ if (scanned_virtual_pages > (uksm_pages_total >> 2)) { ++ scanned_virtual_pages = 0; ++ if (uksm_pages_scanned) ++ fully_scanned_round++; ++ ++ return 1; ++ } else { ++ return 0; ++ } ++} ++ ++#define UKSM_MMSEM_BATCH 5 ++#define BUSY_RETRY 100 ++ ++/** ++ * uksm_do_scan() - the main worker function. ++ */ ++static noinline void uksm_do_scan(void) ++{ ++ struct vma_slot *slot, *iter; ++ struct mm_struct *busy_mm; ++ unsigned char round_finished, all_rungs_emtpy; ++ int i, err, mmsem_batch; ++ unsigned long pcost; ++ long long delta_exec; ++ unsigned long vpages, max_cpu_ratio; ++ unsigned long long start_time, end_time, scan_time; ++ unsigned int expected_jiffies; ++ ++ might_sleep(); ++ ++ vpages = 0; ++ ++ start_time = task_sched_runtime(current); ++ max_cpu_ratio = 0; ++ mmsem_batch = 0; ++ ++ for (i = 0; i < SCAN_LADDER_SIZE;) { ++ struct scan_rung *rung = &uksm_scan_ladder[i]; ++ unsigned long ratio; ++ int busy_retry; ++ ++ if (!rung->pages_to_scan) { ++ i++; ++ continue; ++ } ++ ++ if (!rung->vma_root.num) { ++ rung->pages_to_scan = 0; ++ i++; ++ continue; ++ } ++ ++ ratio = rung_real_ratio(rung->cpu_ratio); ++ if (ratio > max_cpu_ratio) ++ max_cpu_ratio = ratio; ++ ++ busy_retry = BUSY_RETRY; ++ /* ++ * Do not consider rung_round_finished() here, just used up the ++ * rung->pages_to_scan quota. ++ */ ++ while (rung->pages_to_scan && rung->vma_root.num && ++ likely(!freezing(current))) { ++ int reset = 0; ++ ++ slot = rung->current_scan; ++ ++ BUG_ON(vma_fully_scanned(slot)); ++ ++ if (mmsem_batch) { ++ err = 0; ++ } else { ++ err = try_down_read_slot_mmap_sem(slot); ++ } ++ ++ if (err == -ENOENT) { ++rm_slot: ++ rung_rm_slot(slot); ++ continue; ++ } ++ ++ busy_mm = slot->mm; ++ ++ if (err == -EBUSY) { ++ /* skip other vmas on the same mm */ ++ do { ++ reset = advance_current_scan(rung); ++ iter = rung->current_scan; ++ busy_retry--; ++ if (iter->vma->vm_mm != busy_mm || ++ !busy_retry || reset) ++ break; ++ } while (1); ++ ++ if (iter->vma->vm_mm != busy_mm) { ++ continue; ++ } else { ++ /* scan round finsished */ ++ break; ++ } ++ } ++ ++ BUG_ON(!vma_can_enter(slot->vma)); ++ if (uksm_test_exit(slot->vma->vm_mm)) { ++ mmsem_batch = 0; ++ up_read(&slot->vma->vm_mm->mmap_sem); ++ goto rm_slot; ++ } ++ ++ if (mmsem_batch) ++ mmsem_batch--; ++ else ++ mmsem_batch = UKSM_MMSEM_BATCH; ++ ++ /* Ok, we have take the mmap_sem, ready to scan */ ++ scan_vma_one_page(slot); ++ rung->pages_to_scan--; ++ vpages++; ++ ++ if (rung->current_offset + rung->step > slot->pages - 1 ++ || vma_fully_scanned(slot)) { ++ up_read(&slot->vma->vm_mm->mmap_sem); ++ judge_slot(slot); ++ mmsem_batch = 0; ++ } else { ++ rung->current_offset += rung->step; ++ if (!mmsem_batch) ++ up_read(&slot->vma->vm_mm->mmap_sem); ++ } ++ ++ busy_retry = BUSY_RETRY; ++ cond_resched(); ++ } ++ ++ if (mmsem_batch) { ++ up_read(&slot->vma->vm_mm->mmap_sem); ++ mmsem_batch = 0; ++ } ++ ++ if (freezing(current)) ++ break; ++ ++ cond_resched(); ++ } ++ end_time = task_sched_runtime(current); ++ delta_exec = end_time - start_time; ++ ++ if (freezing(current)) ++ return; ++ ++ cleanup_vma_slots(); ++ uksm_enter_all_slots(); ++ ++ round_finished = 1; ++ all_rungs_emtpy = 1; ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ struct scan_rung *rung = &uksm_scan_ladder[i]; ++ ++ if (rung->vma_root.num) { ++ all_rungs_emtpy = 0; ++ if (!rung_round_finished(rung)) ++ round_finished = 0; ++ } ++ } ++ ++ if (all_rungs_emtpy) ++ round_finished = 0; ++ ++ if (round_finished) { ++ round_update_ladder(); ++ uksm_eval_round++; ++ ++ if (hash_round_finished() && rshash_adjust()) { ++ /* Reset the unstable root iff hash strength changed */ ++ uksm_hash_round++; ++ root_unstable_tree = RB_ROOT; ++ free_all_tree_nodes(&unstable_tree_node_list); ++ } ++ ++ /* ++ * A number of pages can hang around indefinitely on per-cpu ++ * pagevecs, raised page count preventing write_protect_page ++ * from merging them. Though it doesn't really matter much, ++ * it is puzzling to see some stuck in pages_volatile until ++ * other activity jostles them out, and they also prevented ++ * LTP's KSM test from succeeding deterministically; so drain ++ * them here (here rather than on entry to uksm_do_scan(), ++ * so we don't IPI too often when pages_to_scan is set low). ++ */ ++ lru_add_drain_all(); ++ } ++ ++ ++ if (vpages && delta_exec > 0) { ++ pcost = (unsigned long) delta_exec / vpages; ++ if (likely(uksm_ema_page_time)) ++ uksm_ema_page_time = ema(pcost, uksm_ema_page_time); ++ else ++ uksm_ema_page_time = pcost; ++ } ++ ++ uksm_calc_scan_pages(); ++ uksm_sleep_real = uksm_sleep_jiffies; ++ /* in case of radical cpu bursts, apply the upper bound */ ++ end_time = task_sched_runtime(current); ++ if (max_cpu_ratio && end_time > start_time) { ++ scan_time = end_time - start_time; ++ expected_jiffies = msecs_to_jiffies( ++ scan_time_to_sleep(scan_time, max_cpu_ratio)); ++ ++ if (expected_jiffies > uksm_sleep_real) ++ uksm_sleep_real = expected_jiffies; ++ ++ /* We have a 1 second up bound for responsiveness. */ ++ if (jiffies_to_msecs(uksm_sleep_real) > MSEC_PER_SEC) ++ uksm_sleep_real = msecs_to_jiffies(1000); ++ } ++ ++ return; ++} ++ ++static int ksmd_should_run(void) ++{ ++ return uksm_run & UKSM_RUN_MERGE; ++} ++ ++static int uksm_scan_thread(void *nothing) ++{ ++ set_freezable(); ++ set_user_nice(current, 5); ++ ++ while (!kthread_should_stop()) { ++ mutex_lock(&uksm_thread_mutex); ++ if (ksmd_should_run()) { ++ uksm_do_scan(); ++ } ++ mutex_unlock(&uksm_thread_mutex); ++ ++ try_to_freeze(); ++ ++ if (ksmd_should_run()) { ++ schedule_timeout_interruptible(uksm_sleep_real); ++ uksm_sleep_times++; ++ } else { ++ wait_event_freezable(uksm_thread_wait, ++ ksmd_should_run() || kthread_should_stop()); ++ } ++ } ++ return 0; ++} ++ ++int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) ++{ ++ struct stable_node *stable_node; ++ struct node_vma *node_vma; ++ struct rmap_item *rmap_item; ++ int ret = SWAP_AGAIN; ++ int search_new_forks = 0; ++ unsigned long address; ++ ++ VM_BUG_ON_PAGE(!PageKsm(page), page); ++ VM_BUG_ON_PAGE(!PageLocked(page), page); ++ ++ stable_node = page_stable_node(page); ++ if (!stable_node) ++ return ret; ++again: ++ hlist_for_each_entry(node_vma, &stable_node->hlist, hlist) { ++ hlist_for_each_entry(rmap_item, &node_vma->rmap_hlist, hlist) { ++ struct anon_vma *anon_vma = rmap_item->anon_vma; ++ struct anon_vma_chain *vmac; ++ struct vm_area_struct *vma; ++ ++ cond_resched(); ++ anon_vma_lock_read(anon_vma); ++ anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, ++ 0, ULONG_MAX) { ++ cond_resched(); ++ vma = vmac->vma; ++ address = get_rmap_addr(rmap_item); ++ ++ if (address < vma->vm_start || ++ address >= vma->vm_end) ++ continue; ++ ++ if ((rmap_item->slot->vma == vma) == ++ search_new_forks) ++ continue; ++ ++ if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) ++ continue; ++ ++ ret = rwc->rmap_one(page, vma, address, rwc->arg); ++ if (ret != SWAP_AGAIN) { ++ anon_vma_unlock_read(anon_vma); ++ goto out; ++ } ++ ++ if (rwc->done && rwc->done(page)) { ++ anon_vma_unlock_read(anon_vma); ++ goto out; ++ } ++ } ++ anon_vma_unlock_read(anon_vma); ++ } ++ } ++ if (!search_new_forks++) ++ goto again; ++out: ++ return ret; ++} ++ ++#ifdef CONFIG_MIGRATION ++/* Common ksm interface but may be specific to uksm */ ++void ksm_migrate_page(struct page *newpage, struct page *oldpage) ++{ ++ struct stable_node *stable_node; ++ ++ VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); ++ VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); ++ VM_BUG_ON(newpage->mapping != oldpage->mapping); ++ ++ stable_node = page_stable_node(newpage); ++ if (stable_node) { ++ VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); ++ stable_node->kpfn = page_to_pfn(newpage); ++ /* ++ * newpage->mapping was set in advance; now we need smp_wmb() ++ * to make sure that the new stable_node->kpfn is visible ++ * to get_ksm_page() before it can see that oldpage->mapping ++ * has gone stale (or that PageSwapCache has been cleared). ++ */ ++ smp_wmb(); ++ set_page_stable_node(oldpage, NULL); ++ } ++} ++#endif /* CONFIG_MIGRATION */ ++ ++#ifdef CONFIG_MEMORY_HOTREMOVE ++static struct stable_node *uksm_check_stable_tree(unsigned long start_pfn, ++ unsigned long end_pfn) ++{ ++ struct rb_node *node; ++ ++ for (node = rb_first(root_stable_treep); node; node = rb_next(node)) { ++ struct stable_node *stable_node; ++ ++ stable_node = rb_entry(node, struct stable_node, node); ++ if (stable_node->kpfn >= start_pfn && ++ stable_node->kpfn < end_pfn) ++ return stable_node; ++ } ++ return NULL; ++} ++ ++static int uksm_memory_callback(struct notifier_block *self, ++ unsigned long action, void *arg) ++{ ++ struct memory_notify *mn = arg; ++ struct stable_node *stable_node; ++ ++ switch (action) { ++ case MEM_GOING_OFFLINE: ++ /* ++ * Keep it very simple for now: just lock out ksmd and ++ * MADV_UNMERGEABLE while any memory is going offline. ++ * mutex_lock_nested() is necessary because lockdep was alarmed ++ * that here we take uksm_thread_mutex inside notifier chain ++ * mutex, and later take notifier chain mutex inside ++ * uksm_thread_mutex to unlock it. But that's safe because both ++ * are inside mem_hotplug_mutex. ++ */ ++ mutex_lock_nested(&uksm_thread_mutex, SINGLE_DEPTH_NESTING); ++ break; ++ ++ case MEM_OFFLINE: ++ /* ++ * Most of the work is done by page migration; but there might ++ * be a few stable_nodes left over, still pointing to struct ++ * pages which have been offlined: prune those from the tree. ++ */ ++ while ((stable_node = uksm_check_stable_tree(mn->start_pfn, ++ mn->start_pfn + mn->nr_pages)) != NULL) ++ remove_node_from_stable_tree(stable_node, 1, 1); ++ /* fallthrough */ ++ ++ case MEM_CANCEL_OFFLINE: ++ mutex_unlock(&uksm_thread_mutex); ++ break; ++ } ++ return NOTIFY_OK; ++} ++#endif /* CONFIG_MEMORY_HOTREMOVE */ ++ ++#ifdef CONFIG_SYSFS ++/* ++ * This all compiles without CONFIG_SYSFS, but is a waste of space. ++ */ ++ ++#define UKSM_ATTR_RO(_name) \ ++ static struct kobj_attribute _name##_attr = __ATTR_RO(_name) ++#define UKSM_ATTR(_name) \ ++ static struct kobj_attribute _name##_attr = \ ++ __ATTR(_name, 0644, _name##_show, _name##_store) ++ ++static ssize_t max_cpu_percentage_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%u\n", uksm_max_cpu_percentage); ++} ++ ++static ssize_t max_cpu_percentage_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ unsigned long max_cpu_percentage; ++ int err; ++ ++ err = kstrtoul(buf, 10, &max_cpu_percentage); ++ if (err || max_cpu_percentage > 100) ++ return -EINVAL; ++ ++ if (max_cpu_percentage == 100) ++ max_cpu_percentage = 99; ++ else if (max_cpu_percentage < 10) ++ max_cpu_percentage = 10; ++ ++ uksm_max_cpu_percentage = max_cpu_percentage; ++ ++ return count; ++} ++UKSM_ATTR(max_cpu_percentage); ++ ++static ssize_t sleep_millisecs_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%u\n", jiffies_to_msecs(uksm_sleep_jiffies)); ++} ++ ++static ssize_t sleep_millisecs_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ unsigned long msecs; ++ int err; ++ ++ err = kstrtoul(buf, 10, &msecs); ++ if (err || msecs > MSEC_PER_SEC) ++ return -EINVAL; ++ ++ uksm_sleep_jiffies = msecs_to_jiffies(msecs); ++ uksm_sleep_saved = uksm_sleep_jiffies; ++ ++ return count; ++} ++UKSM_ATTR(sleep_millisecs); ++ ++ ++static ssize_t cpu_governor_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ int n = sizeof(uksm_cpu_governor_str) / sizeof(char *); ++ int i; ++ ++ buf[0] = '\0'; ++ for (i = 0; i < n ; i++) { ++ if (uksm_cpu_governor == i) ++ strcat(buf, "["); ++ ++ strcat(buf, uksm_cpu_governor_str[i]); ++ ++ if (uksm_cpu_governor == i) ++ strcat(buf, "]"); ++ ++ strcat(buf, " "); ++ } ++ strcat(buf, "\n"); ++ ++ return strlen(buf); ++} ++ ++static inline void init_performance_values(void) ++{ ++ int i; ++ struct scan_rung *rung; ++ struct uksm_cpu_preset_s *preset = uksm_cpu_preset + uksm_cpu_governor; ++ ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ rung = uksm_scan_ladder + i; ++ rung->cpu_ratio = preset->cpu_ratio[i]; ++ rung->cover_msecs = preset->cover_msecs[i]; ++ } ++ ++ uksm_max_cpu_percentage = preset->max_cpu; ++} ++ ++static ssize_t cpu_governor_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ int n = sizeof(uksm_cpu_governor_str) / sizeof(char *); ++ ++ for (n--; n >=0 ; n--) { ++ if (!strncmp(buf, uksm_cpu_governor_str[n], ++ strlen(uksm_cpu_governor_str[n]))) ++ break; ++ } ++ ++ if (n < 0) ++ return -EINVAL; ++ else ++ uksm_cpu_governor = n; ++ ++ init_performance_values(); ++ ++ return count; ++} ++UKSM_ATTR(cpu_governor); ++ ++static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, ++ char *buf) ++{ ++ return sprintf(buf, "%u\n", uksm_run); ++} ++ ++static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ int err; ++ unsigned long flags; ++ ++ err = kstrtoul(buf, 10, &flags); ++ if (err || flags > UINT_MAX) ++ return -EINVAL; ++ if (flags > UKSM_RUN_MERGE) ++ return -EINVAL; ++ ++ mutex_lock(&uksm_thread_mutex); ++ if (uksm_run != flags) { ++ uksm_run = flags; ++ } ++ mutex_unlock(&uksm_thread_mutex); ++ ++ if (flags & UKSM_RUN_MERGE) ++ wake_up_interruptible(&uksm_thread_wait); ++ ++ return count; ++} ++UKSM_ATTR(run); ++ ++static ssize_t abundant_threshold_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%u\n", uksm_abundant_threshold); ++} ++ ++static ssize_t abundant_threshold_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ int err; ++ unsigned long flags; ++ ++ err = kstrtoul(buf, 10, &flags); ++ if (err || flags > 99) ++ return -EINVAL; ++ ++ uksm_abundant_threshold = flags; ++ ++ return count; ++} ++UKSM_ATTR(abundant_threshold); ++ ++static ssize_t thrash_threshold_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%u\n", uksm_thrash_threshold); ++} ++ ++static ssize_t thrash_threshold_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ int err; ++ unsigned long flags; ++ ++ err = kstrtoul(buf, 10, &flags); ++ if (err || flags > 99) ++ return -EINVAL; ++ ++ uksm_thrash_threshold = flags; ++ ++ return count; ++} ++UKSM_ATTR(thrash_threshold); ++ ++static ssize_t cpu_ratios_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ int i, size; ++ struct scan_rung *rung; ++ char *p = buf; ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ rung = &uksm_scan_ladder[i]; ++ ++ if (rung->cpu_ratio > 0) ++ size = sprintf(p, "%d ", rung->cpu_ratio); ++ else ++ size = sprintf(p, "MAX/%d ", ++ TIME_RATIO_SCALE / -rung->cpu_ratio); ++ ++ p += size; ++ } ++ ++ *p++ = '\n'; ++ *p = '\0'; ++ ++ return p - buf; ++} ++ ++static ssize_t cpu_ratios_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ int i, cpuratios[SCAN_LADDER_SIZE], err; ++ unsigned long value; ++ struct scan_rung *rung; ++ char *p, *end = NULL; ++ ++ p = kzalloc(count, GFP_KERNEL); ++ if (!p) ++ return -ENOMEM; ++ ++ memcpy(p, buf, count); ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ if (i != SCAN_LADDER_SIZE -1) { ++ end = strchr(p, ' '); ++ if (!end) ++ return -EINVAL; ++ ++ *end = '\0'; ++ } ++ ++ if (strstr(p, "MAX/")) { ++ p = strchr(p, '/') + 1; ++ err = kstrtoul(p, 10, &value); ++ if (err || value > TIME_RATIO_SCALE || !value) ++ return -EINVAL; ++ ++ cpuratios[i] = - (int) (TIME_RATIO_SCALE / value); ++ } else { ++ err = kstrtoul(p, 10, &value); ++ if (err || value > TIME_RATIO_SCALE || !value) ++ return -EINVAL; ++ ++ cpuratios[i] = value; ++ } ++ ++ p = end + 1; ++ } ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ rung = &uksm_scan_ladder[i]; ++ ++ rung->cpu_ratio = cpuratios[i]; ++ } ++ ++ return count; ++} ++UKSM_ATTR(cpu_ratios); ++ ++static ssize_t eval_intervals_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ int i, size; ++ struct scan_rung *rung; ++ char *p = buf; ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ rung = &uksm_scan_ladder[i]; ++ size = sprintf(p, "%u ", rung->cover_msecs); ++ p += size; ++ } ++ ++ *p++ = '\n'; ++ *p = '\0'; ++ ++ return p - buf; ++} ++ ++static ssize_t eval_intervals_store(struct kobject *kobj, ++ struct kobj_attribute *attr, ++ const char *buf, size_t count) ++{ ++ int i, err; ++ unsigned long values[SCAN_LADDER_SIZE]; ++ struct scan_rung *rung; ++ char *p, *end = NULL; ++ ssize_t ret = count; ++ ++ p = kzalloc(count + 2, GFP_KERNEL); ++ if (!p) ++ return -ENOMEM; ++ ++ memcpy(p, buf, count); ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ if (i != SCAN_LADDER_SIZE -1) { ++ end = strchr(p, ' '); ++ if (!end) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ *end = '\0'; ++ } ++ ++ err = kstrtoul(p, 10, &values[i]); ++ if (err) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ p = end + 1; ++ } ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ rung = &uksm_scan_ladder[i]; ++ ++ rung->cover_msecs = values[i]; ++ } ++ ++out: ++ kfree(p); ++ return ret; ++} ++UKSM_ATTR(eval_intervals); ++ ++static ssize_t ema_per_page_time_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%lu\n", uksm_ema_page_time); ++} ++UKSM_ATTR_RO(ema_per_page_time); ++ ++static ssize_t pages_shared_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%lu\n", uksm_pages_shared); ++} ++UKSM_ATTR_RO(pages_shared); ++ ++static ssize_t pages_sharing_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%lu\n", uksm_pages_sharing); ++} ++UKSM_ATTR_RO(pages_sharing); ++ ++static ssize_t pages_unshared_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%lu\n", uksm_pages_unshared); ++} ++UKSM_ATTR_RO(pages_unshared); ++ ++static ssize_t full_scans_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%llu\n", fully_scanned_round); ++} ++UKSM_ATTR_RO(full_scans); ++ ++static ssize_t pages_scanned_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ unsigned long base = 0; ++ u64 delta, ret; ++ ++ if (pages_scanned_stored) { ++ base = pages_scanned_base; ++ ret = pages_scanned_stored; ++ delta = uksm_pages_scanned >> base; ++ if (CAN_OVERFLOW_U64(ret, delta)) { ++ ret >>= 1; ++ delta >>= 1; ++ base++; ++ ret += delta; ++ } ++ } else { ++ ret = uksm_pages_scanned; ++ } ++ ++ while (ret > ULONG_MAX) { ++ ret >>= 1; ++ base++; ++ } ++ ++ if (base) ++ return sprintf(buf, "%lu * 2^%lu\n", (unsigned long)ret, base); ++ else ++ return sprintf(buf, "%lu\n", (unsigned long)ret); ++} ++UKSM_ATTR_RO(pages_scanned); ++ ++static ssize_t hash_strength_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%lu\n", hash_strength); ++} ++UKSM_ATTR_RO(hash_strength); ++ ++static ssize_t sleep_times_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%llu\n", uksm_sleep_times); ++} ++UKSM_ATTR_RO(sleep_times); ++ ++ ++static struct attribute *uksm_attrs[] = { ++ &max_cpu_percentage_attr.attr, ++ &sleep_millisecs_attr.attr, ++ &cpu_governor_attr.attr, ++ &run_attr.attr, ++ &ema_per_page_time_attr.attr, ++ &pages_shared_attr.attr, ++ &pages_sharing_attr.attr, ++ &pages_unshared_attr.attr, ++ &full_scans_attr.attr, ++ &pages_scanned_attr.attr, ++ &hash_strength_attr.attr, ++ &sleep_times_attr.attr, ++ &thrash_threshold_attr.attr, ++ &abundant_threshold_attr.attr, ++ &cpu_ratios_attr.attr, ++ &eval_intervals_attr.attr, ++ NULL, ++}; ++ ++static struct attribute_group uksm_attr_group = { ++ .attrs = uksm_attrs, ++ .name = "uksm", ++}; ++#endif /* CONFIG_SYSFS */ ++ ++static inline void init_scan_ladder(void) ++{ ++ int i; ++ struct scan_rung *rung; ++ ++ for (i = 0; i < SCAN_LADDER_SIZE; i++) { ++ rung = uksm_scan_ladder + i; ++ slot_tree_init_root(&rung->vma_root); ++ } ++ ++ init_performance_values(); ++ uksm_calc_scan_pages(); ++} ++ ++static inline int cal_positive_negative_costs(void) ++{ ++ struct page *p1, *p2; ++ unsigned char *addr1, *addr2; ++ unsigned long i, time_start, hash_cost; ++ unsigned long loopnum = 0; ++ ++ /*IMPORTANT: volatile is needed to prevent over-optimization by gcc. */ ++ volatile u32 hash; ++ volatile int ret; ++ ++ p1 = alloc_page(GFP_KERNEL); ++ if (!p1) ++ return -ENOMEM; ++ ++ p2 = alloc_page(GFP_KERNEL); ++ if (!p2) ++ return -ENOMEM; ++ ++ addr1 = kmap_atomic(p1); ++ addr2 = kmap_atomic(p2); ++ memset(addr1, prandom_u32(), PAGE_SIZE); ++ memcpy(addr2, addr1, PAGE_SIZE); ++ ++ /* make sure that the two pages differ in last byte */ ++ addr2[PAGE_SIZE-1] = ~addr2[PAGE_SIZE-1]; ++ kunmap_atomic(addr2); ++ kunmap_atomic(addr1); ++ ++ time_start = jiffies; ++ while (jiffies - time_start < 100) { ++ for (i = 0; i < 100; i++) ++ hash = page_hash(p1, HASH_STRENGTH_FULL, 0); ++ loopnum += 100; ++ } ++ hash_cost = (jiffies - time_start); ++ ++ time_start = jiffies; ++ for (i = 0; i < loopnum; i++) ++ ret = pages_identical(p1, p2); ++ memcmp_cost = HASH_STRENGTH_FULL * (jiffies - time_start); ++ memcmp_cost /= hash_cost; ++ printk(KERN_INFO "UKSM: relative memcmp_cost = %lu " ++ "hash=%u cmp_ret=%d.\n", ++ memcmp_cost, hash, ret); ++ ++ __free_page(p1); ++ __free_page(p2); ++ return 0; ++} ++ ++static int init_zeropage_hash_table(void) ++{ ++ struct page *page; ++ char *addr; ++ int i; ++ ++ page = alloc_page(GFP_KERNEL); ++ if (!page) ++ return -ENOMEM; ++ ++ addr = kmap_atomic(page); ++ memset(addr, 0, PAGE_SIZE); ++ kunmap_atomic(addr); ++ ++ zero_hash_table = kmalloc(HASH_STRENGTH_MAX * sizeof(u32), ++ GFP_KERNEL); ++ if (!zero_hash_table) ++ return -ENOMEM; ++ ++ for (i = 0; i < HASH_STRENGTH_MAX; i++) ++ zero_hash_table[i] = page_hash(page, i, 0); ++ ++ __free_page(page); ++ ++ return 0; ++} ++ ++static inline int init_random_sampling(void) ++{ ++ unsigned long i; ++ random_nums = kmalloc(PAGE_SIZE, GFP_KERNEL); ++ if (!random_nums) ++ return -ENOMEM; ++ ++ for (i = 0; i < HASH_STRENGTH_FULL; i++) ++ random_nums[i] = i; ++ ++ for (i = 0; i < HASH_STRENGTH_FULL; i++) { ++ unsigned long rand_range, swap_index, tmp; ++ ++ rand_range = HASH_STRENGTH_FULL - i; ++ swap_index = i + prandom_u32() % rand_range; ++ tmp = random_nums[i]; ++ random_nums[i] = random_nums[swap_index]; ++ random_nums[swap_index] = tmp; ++ } ++ ++ rshash_state.state = RSHASH_NEW; ++ rshash_state.below_count = 0; ++ rshash_state.lookup_window_index = 0; ++ ++ return cal_positive_negative_costs(); ++} ++ ++static int __init uksm_slab_init(void) ++{ ++ rmap_item_cache = UKSM_KMEM_CACHE(rmap_item, 0); ++ if (!rmap_item_cache) ++ goto out; ++ ++ stable_node_cache = UKSM_KMEM_CACHE(stable_node, 0); ++ if (!stable_node_cache) ++ goto out_free1; ++ ++ node_vma_cache = UKSM_KMEM_CACHE(node_vma, 0); ++ if (!node_vma_cache) ++ goto out_free2; ++ ++ vma_slot_cache = UKSM_KMEM_CACHE(vma_slot, 0); ++ if (!vma_slot_cache) ++ goto out_free3; ++ ++ tree_node_cache = UKSM_KMEM_CACHE(tree_node, 0); ++ if (!tree_node_cache) ++ goto out_free4; ++ ++ return 0; ++ ++out_free4: ++ kmem_cache_destroy(vma_slot_cache); ++out_free3: ++ kmem_cache_destroy(node_vma_cache); ++out_free2: ++ kmem_cache_destroy(stable_node_cache); ++out_free1: ++ kmem_cache_destroy(rmap_item_cache); ++out: ++ return -ENOMEM; ++} ++ ++static void __init uksm_slab_free(void) ++{ ++ kmem_cache_destroy(stable_node_cache); ++ kmem_cache_destroy(rmap_item_cache); ++ kmem_cache_destroy(node_vma_cache); ++ kmem_cache_destroy(vma_slot_cache); ++ kmem_cache_destroy(tree_node_cache); ++} ++ ++/* Common interface to ksm, different to it. */ ++int ksm_madvise(struct vm_area_struct *vma, unsigned long start, ++ unsigned long end, int advice, unsigned long *vm_flags) ++{ ++ int err; ++ ++ switch (advice) { ++ case MADV_MERGEABLE: ++ return 0; /* just ignore the advice */ ++ ++ case MADV_UNMERGEABLE: ++ if (!(*vm_flags & VM_MERGEABLE) || !uksm_flags_can_scan(*vm_flags)) ++ return 0; /* just ignore the advice */ ++ ++ if (vma->anon_vma) { ++ err = unmerge_uksm_pages(vma, start, end); ++ if (err) ++ return err; ++ } ++ ++ uksm_remove_vma(vma); ++ *vm_flags &= ~VM_MERGEABLE; ++ break; ++ } ++ ++ return 0; ++} ++ ++/* Common interface to ksm, actually the same. */ ++struct page *ksm_might_need_to_copy(struct page *page, ++ struct vm_area_struct *vma, unsigned long address) ++{ ++ struct anon_vma *anon_vma = page_anon_vma(page); ++ struct page *new_page; ++ ++ if (PageKsm(page)) { ++ if (page_stable_node(page)) ++ return page; /* no need to copy it */ ++ } else if (!anon_vma) { ++ return page; /* no need to copy it */ ++ } else if (anon_vma->root == vma->anon_vma->root && ++ page->index == linear_page_index(vma, address)) { ++ return page; /* still no need to copy it */ ++ } ++ if (!PageUptodate(page)) ++ return page; /* let do_swap_page report the error */ ++ ++ new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); ++ if (new_page) { ++ copy_user_highpage(new_page, page, address, vma); ++ ++ SetPageDirty(new_page); ++ __SetPageUptodate(new_page); ++ __SetPageLocked(new_page); ++ } ++ ++ return new_page; ++} ++ ++static int __init uksm_init(void) ++{ ++ struct task_struct *uksm_thread; ++ int err; ++ ++ uksm_sleep_jiffies = msecs_to_jiffies(100); ++ uksm_sleep_saved = uksm_sleep_jiffies; ++ ++ slot_tree_init(); ++ init_scan_ladder(); ++ ++ ++ err = init_random_sampling(); ++ if (err) ++ goto out_free2; ++ ++ err = uksm_slab_init(); ++ if (err) ++ goto out_free1; ++ ++ err = init_zeropage_hash_table(); ++ if (err) ++ goto out_free0; ++ ++ uksm_thread = kthread_run(uksm_scan_thread, NULL, "uksmd"); ++ if (IS_ERR(uksm_thread)) { ++ printk(KERN_ERR "uksm: creating kthread failed\n"); ++ err = PTR_ERR(uksm_thread); ++ goto out_free; ++ } ++ ++#ifdef CONFIG_SYSFS ++ err = sysfs_create_group(mm_kobj, &uksm_attr_group); ++ if (err) { ++ printk(KERN_ERR "uksm: register sysfs failed\n"); ++ kthread_stop(uksm_thread); ++ goto out_free; ++ } ++#else ++ uksm_run = UKSM_RUN_MERGE; /* no way for user to start it */ ++ ++#endif /* CONFIG_SYSFS */ ++ ++#ifdef CONFIG_MEMORY_HOTREMOVE ++ /* ++ * Choose a high priority since the callback takes uksm_thread_mutex: ++ * later callbacks could only be taking locks which nest within that. ++ */ ++ hotplug_memory_notifier(uksm_memory_callback, 100); ++#endif ++ return 0; ++ ++out_free: ++ kfree(zero_hash_table); ++out_free0: ++ uksm_slab_free(); ++out_free1: ++ kfree(random_nums); ++out_free2: ++ kfree(uksm_scan_ladder); ++ return err; ++} ++ ++#ifdef MODULE ++subsys_initcall(ksm_init); ++#else ++late_initcall(uksm_init); ++#endif ++ +diff --git a/mm/vmstat.c b/mm/vmstat.c +index 7c28df3..b1f783f 100644 +--- a/mm/vmstat.c ++++ b/mm/vmstat.c +@@ -974,6 +974,9 @@ const char * const vmstat_text[] = { + "nr_dirtied", + "nr_written", + ++#ifdef CONFIG_UKSM ++ "nr_uksm_zero_pages", ++#endif + /* enum writeback_stat_item counters */ + "nr_dirty_threshold", + "nr_dirty_background_threshold", diff --git a/patches_disabled/05-intel-pstate-backport.patch b/patches_disabled/05-intel-pstate-backport.patch new file mode 100644 index 0000000..9e466e1 --- /dev/null +++ b/patches_disabled/05-intel-pstate-backport.patch @@ -0,0 +1,148 @@ +--- linux-4.8/drivers/cpufreq/intel_pstate.c.orig 2016-10-02 19:24:33.000000000 -0400 ++++ linux-4.8/drivers/cpufreq/intel_pstate.c 2016-10-09 19:32:01.073141319 -0400 +@@ -181,6 +181,8 @@ + * @cpu: CPU number for this instance data + * @update_util: CPUFreq utility callback information + * @update_util_set: CPUFreq utility callback is set ++ * @iowait_boost: iowait-related boost fraction ++ * @last_update: Time of the last update. + * @pstate: Stores P state limits for this CPU + * @vid: Stores VID limits for this CPU + * @pid: Stores PID parameters for this CPU +@@ -206,6 +208,7 @@ + struct vid_data vid; + struct _pid pid; + ++ u64 last_update; + u64 last_sample_time; + u64 prev_aperf; + u64 prev_mperf; +@@ -216,6 +219,7 @@ + struct acpi_processor_performance acpi_perf_data; + bool valid_pss_table; + #endif ++ unsigned int iowait_boost; + }; + + static struct cpudata **all_cpu_data; +@@ -229,6 +233,7 @@ + * @p_gain_pct: PID proportional gain + * @i_gain_pct: PID integral gain + * @d_gain_pct: PID derivative gain ++ * @boost_iowait: Whether or not to use iowait boosting. + * + * Stores per CPU model static PID configuration data. + */ +@@ -240,6 +245,7 @@ + int p_gain_pct; + int d_gain_pct; + int i_gain_pct; ++ bool boost_iowait; + }; + + /** +@@ -1029,7 +1035,7 @@ + }, + }; + +-static struct cpu_defaults silvermont_params = { ++static const struct cpu_defaults silvermont_params = { + .pid_policy = { + .sample_rate_ms = 10, + .deadband = 0, +@@ -1037,6 +1043,7 @@ + .p_gain_pct = 14, + .d_gain_pct = 0, + .i_gain_pct = 4, ++ .boost_iowait = true, + }, + .funcs = { + .get_max = atom_get_max_pstate, +@@ -1050,7 +1057,7 @@ + }, + }; + +-static struct cpu_defaults airmont_params = { ++static const struct cpu_defaults airmont_params = { + .pid_policy = { + .sample_rate_ms = 10, + .deadband = 0, +@@ -1058,6 +1065,7 @@ + .p_gain_pct = 14, + .d_gain_pct = 0, + .i_gain_pct = 4, ++ .boost_iowait = true, + }, + .funcs = { + .get_max = atom_get_max_pstate, +@@ -1071,7 +1079,7 @@ + }, + }; + +-static struct cpu_defaults knl_params = { ++static const struct cpu_defaults knl_params = { + .pid_policy = { + .sample_rate_ms = 10, + .deadband = 0, +@@ -1091,7 +1099,7 @@ + }, + }; + +-static struct cpu_defaults bxt_params = { ++static const struct cpu_defaults bxt_params = { + .pid_policy = { + .sample_rate_ms = 10, + .deadband = 0, +@@ -1099,6 +1107,7 @@ + .p_gain_pct = 14, + .d_gain_pct = 0, + .i_gain_pct = 4, ++ .boost_iowait = true, + }, + .funcs = { + .get_max = core_get_max_pstate, +@@ -1222,36 +1231,18 @@ + static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu) + { + struct sample *sample = &cpu->sample; +- u64 cummulative_iowait, delta_iowait_us; +- u64 delta_iowait_mperf; +- u64 mperf, now; +- int32_t cpu_load; ++ int32_t busy_frac, boost; + +- cummulative_iowait = get_cpu_iowait_time_us(cpu->cpu, &now); ++ busy_frac = div_fp(sample->mperf, sample->tsc); + +- /* +- * Convert iowait time into number of IO cycles spent at max_freq. +- * IO is considered as busy only for the cpu_load algorithm. For +- * performance this is not needed since we always try to reach the +- * maximum P-State, so we are already boosting the IOs. +- */ +- delta_iowait_us = cummulative_iowait - cpu->prev_cummulative_iowait; +- delta_iowait_mperf = div64_u64(delta_iowait_us * cpu->pstate.scaling * +- cpu->pstate.max_pstate, MSEC_PER_SEC); +- +- mperf = cpu->sample.mperf + delta_iowait_mperf; +- cpu->prev_cummulative_iowait = cummulative_iowait; ++ boost = cpu->iowait_boost; ++ cpu->iowait_boost >>= 1; + +- /* +- * The load can be estimated as the ratio of the mperf counter +- * running at a constant frequency during active periods +- * (C0) and the time stamp counter running at the same frequency +- * also during C-states. +- */ +- cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc); +- cpu->sample.busy_scaled = cpu_load; ++ if (busy_frac < boost) ++ busy_frac = boost; + +- return get_avg_pstate(cpu) - pid_calc(&cpu->pid, cpu_load); ++ sample->busy_scaled = busy_frac * 100; ++ return get_avg_pstate(cpu) - pid_calc(&cpu->pid, sample->busy_scaled); + } + + static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu) diff --git a/patches_disabled/06-dirty_cow.patch b/patches_disabled/06-dirty_cow.patch new file mode 100644 index 0000000..0b9d611 --- /dev/null +++ b/patches_disabled/06-dirty_cow.patch @@ -0,0 +1,94 @@ +From 19be0eaffa3ac7d8eb6784ad9bdbc7d67ed8e619 Mon Sep 17 00:00:00 2001 +From: Linus Torvalds +Date: Thu, 13 Oct 2016 13:07:36 -0700 +Subject: mm: remove gup_flags FOLL_WRITE games from __get_user_pages() + +This is an ancient bug that was actually attempted to be fixed once +(badly) by me eleven years ago in commit 4ceb5db9757a ("Fix +get_user_pages() race for write access") but that was then undone due to +problems on s390 by commit f33ea7f404e5 ("fix get_user_pages bug"). + +In the meantime, the s390 situation has long been fixed, and we can now +fix it by checking the pte_dirty() bit properly (and do it better). The +s390 dirty bit was implemented in abf09bed3cce ("s390/mm: implement +software dirty bits") which made it into v3.9. Earlier kernels will +have to look at the page state itself. + +Also, the VM has become more scalable, and what used a purely +theoretical race back then has become easier to trigger. + +To fix it, we introduce a new internal FOLL_COW flag to mark the "yes, +we already did a COW" rather than play racy games with FOLL_WRITE that +is very fundamental, and then use the pte dirty flag to validate that +the FOLL_COW flag is still valid. + +Reported-and-tested-by: Phil "not Paul" Oester +Acked-by: Hugh Dickins +Reviewed-by: Michal Hocko +Cc: Andy Lutomirski +Cc: Kees Cook +Cc: Oleg Nesterov +Cc: Willy Tarreau +Cc: Nick Piggin +Cc: Greg Thelen +Cc: stable@vger.kernel.org +Signed-off-by: Linus Torvalds +--- + include/linux/mm.h | 1 + + mm/gup.c | 14 ++++++++++++-- + 2 files changed, 13 insertions(+), 2 deletions(-) + +diff --git a/include/linux/mm.h b/include/linux/mm.h +index e9caec6..ed85879 100644 +--- a/include/linux/mm.h ++++ b/include/linux/mm.h +@@ -2232,6 +2232,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma, + #define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */ + #define FOLL_MLOCK 0x1000 /* lock present pages */ + #define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */ ++#define FOLL_COW 0x4000 /* internal GUP flag */ + + typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr, + void *data); +diff --git a/mm/gup.c b/mm/gup.c +index 96b2b2f..22cc22e 100644 +--- a/mm/gup.c ++++ b/mm/gup.c +@@ -60,6 +60,16 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address, + return -EEXIST; + } + ++/* ++ * FOLL_FORCE can write to even unwritable pte's, but only ++ * after we've gone through a COW cycle and they are dirty. ++ */ ++static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) ++{ ++ return pte_write(pte) || ++ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); ++} ++ + static struct page *follow_page_pte(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmd, unsigned int flags) + { +@@ -95,7 +105,7 @@ retry: + } + if ((flags & FOLL_NUMA) && pte_protnone(pte)) + goto no_page; +- if ((flags & FOLL_WRITE) && !pte_write(pte)) { ++ if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) { + pte_unmap_unlock(ptep, ptl); + return NULL; + } +@@ -412,7 +422,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, + * reCOWed by userspace write). + */ + if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) +- *flags &= ~FOLL_WRITE; ++ *flags |= FOLL_COW; + return 0; + } + +-- +cgit v0.12 + diff --git a/patches_disabled/4.4-sched-bfs-467.patch b/patches_disabled/4.4-sched-bfs-467.patch new file mode 100644 index 0000000..a58da98 --- /dev/null +++ b/patches_disabled/4.4-sched-bfs-467.patch @@ -0,0 +1,9153 @@ +The Brain Fuck Scheduler v0.467 by Con Kolivas. + +A single shared runqueue O(n) strict fairness earliest deadline first design. + +Excellent throughput and latency for 1 to many CPUs on desktop and server +commodity hardware. +Not recommended for 4096 cpus. + +Scalability is optimal when your workload is equal to the number of CPUs on +bfs. ie you should ONLY do make -j4 on quad core, -j2 on dual core and so on. + +Features SCHED_IDLEPRIO and SCHED_ISO scheduling policies as well. +You do NOT need to use these policies for good performance, they are purely +optional for even better performance in extreme conditions. + +To run something idleprio, use schedtool like so: + +schedtool -D -e make -j4 + +To run something isoprio, use schedtool like so: + +schedtool -I -e amarok + +Includes configurable SMT-nice support for better nice level and scheduling +policy support across SMT (aka hyperthread) sibling CPUs. + +Includes accurate sub-tick accounting of tasks so userspace reported +cpu usage may be very different if you have very short lived tasks. + +-ck + +--- + +--- + Documentation/scheduler/sched-BFS.txt | 347 + + Documentation/sysctl/kernel.txt | 26 + arch/powerpc/platforms/cell/spufs/sched.c | 5 + arch/x86/Kconfig | 22 + drivers/cpufreq/cpufreq.c | 7 + drivers/cpufreq/cpufreq_conservative.c | 4 + drivers/cpufreq/cpufreq_ondemand.c | 4 + drivers/cpufreq/intel_pstate.c | 9 + fs/proc/base.c | 2 + include/linux/init_task.h | 76 + include/linux/ioprio.h | 2 + include/linux/jiffies.h | 2 + include/linux/sched.h | 93 + include/linux/sched/prio.h | 12 + include/uapi/linux/sched.h | 9 + init/Kconfig | 54 + init/main.c | 3 + kernel/delayacct.c | 2 + kernel/exit.c | 2 + kernel/sched/Makefile | 10 + kernel/sched/bfs.c | 7585 ++++++++++++++++++++++++++++++ + kernel/sched/bfs_sched.h | 181 + kernel/sched/idle.c | 4 + kernel/sched/stats.c | 4 + kernel/sysctl.c | 41 + kernel/time/Kconfig | 2 + kernel/time/posix-cpu-timers.c | 10 + kernel/trace/trace_selftest.c | 5 + lib/Kconfig.debug | 2 + 29 files changed, 8451 insertions(+), 74 deletions(-) + +Index: linux-4.4-ck1/arch/powerpc/platforms/cell/spufs/sched.c +=================================================================== +--- linux-4.4-ck1.orig/arch/powerpc/platforms/cell/spufs/sched.c 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/arch/powerpc/platforms/cell/spufs/sched.c 2016-03-25 16:03:45.288350392 +1100 +@@ -64,11 +64,6 @@ static struct timer_list spusched_timer; + static struct timer_list spuloadavg_timer; + + /* +- * Priority of a normal, non-rt, non-niced'd process (aka nice level 0). +- */ +-#define NORMAL_PRIO 120 +- +-/* + * Frequency of the spu scheduler tick. By default we do one SPU scheduler + * tick for every 10 CPU scheduler ticks. + */ +Index: linux-4.4-ck1/Documentation/scheduler/sched-BFS.txt +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-4.4-ck1/Documentation/scheduler/sched-BFS.txt 2016-03-25 16:03:45.288350392 +1100 +@@ -0,0 +1,347 @@ ++BFS - The Brain Fuck Scheduler by Con Kolivas. ++ ++Goals. ++ ++The goal of the Brain Fuck Scheduler, referred to as BFS from here on, is to ++completely do away with the complex designs of the past for the cpu process ++scheduler and instead implement one that is very simple in basic design. ++The main focus of BFS is to achieve excellent desktop interactivity and ++responsiveness without heuristics and tuning knobs that are difficult to ++understand, impossible to model and predict the effect of, and when tuned to ++one workload cause massive detriment to another. ++ ++ ++Design summary. ++ ++BFS is best described as a single runqueue, O(n) lookup, earliest effective ++virtual deadline first design, loosely based on EEVDF (earliest eligible virtual ++deadline first) and my previous Staircase Deadline scheduler. Each component ++shall be described in order to understand the significance of, and reasoning for ++it. The codebase when the first stable version was released was approximately ++9000 lines less code than the existing mainline linux kernel scheduler (in ++2.6.31). This does not even take into account the removal of documentation and ++the cgroups code that is not used. ++ ++Design reasoning. ++ ++The single runqueue refers to the queued but not running processes for the ++entire system, regardless of the number of CPUs. The reason for going back to ++a single runqueue design is that once multiple runqueues are introduced, ++per-CPU or otherwise, there will be complex interactions as each runqueue will ++be responsible for the scheduling latency and fairness of the tasks only on its ++own runqueue, and to achieve fairness and low latency across multiple CPUs, any ++advantage in throughput of having CPU local tasks causes other disadvantages. ++This is due to requiring a very complex balancing system to at best achieve some ++semblance of fairness across CPUs and can only maintain relatively low latency ++for tasks bound to the same CPUs, not across them. To increase said fairness ++and latency across CPUs, the advantage of local runqueue locking, which makes ++for better scalability, is lost due to having to grab multiple locks. ++ ++A significant feature of BFS is that all accounting is done purely based on CPU ++used and nowhere is sleep time used in any way to determine entitlement or ++interactivity. Interactivity "estimators" that use some kind of sleep/run ++algorithm are doomed to fail to detect all interactive tasks, and to falsely tag ++tasks that aren't interactive as being so. The reason for this is that it is ++close to impossible to determine that when a task is sleeping, whether it is ++doing it voluntarily, as in a userspace application waiting for input in the ++form of a mouse click or otherwise, or involuntarily, because it is waiting for ++another thread, process, I/O, kernel activity or whatever. Thus, such an ++estimator will introduce corner cases, and more heuristics will be required to ++cope with those corner cases, introducing more corner cases and failed ++interactivity detection and so on. Interactivity in BFS is built into the design ++by virtue of the fact that tasks that are waking up have not used up their quota ++of CPU time, and have earlier effective deadlines, thereby making it very likely ++they will preempt any CPU bound task of equivalent nice level. See below for ++more information on the virtual deadline mechanism. Even if they do not preempt ++a running task, because the rr interval is guaranteed to have a bound upper ++limit on how long a task will wait for, it will be scheduled within a timeframe ++that will not cause visible interface jitter. ++ ++ ++Design details. ++ ++Task insertion. ++ ++BFS inserts tasks into each relevant queue as an O(1) insertion into a double ++linked list. On insertion, *every* running queue is checked to see if the newly ++queued task can run on any idle queue, or preempt the lowest running task on the ++system. This is how the cross-CPU scheduling of BFS achieves significantly lower ++latency per extra CPU the system has. In this case the lookup is, in the worst ++case scenario, O(n) where n is the number of CPUs on the system. ++ ++Data protection. ++ ++BFS has one single lock protecting the process local data of every task in the ++global queue. Thus every insertion, removal and modification of task data in the ++global runqueue needs to grab the global lock. However, once a task is taken by ++a CPU, the CPU has its own local data copy of the running process' accounting ++information which only that CPU accesses and modifies (such as during a ++timer tick) thus allowing the accounting data to be updated lockless. Once a ++CPU has taken a task to run, it removes it from the global queue. Thus the ++global queue only ever has, at most, ++ ++ (number of tasks requesting cpu time) - (number of logical CPUs) + 1 ++ ++tasks in the global queue. This value is relevant for the time taken to look up ++tasks during scheduling. This will increase if many tasks with CPU affinity set ++in their policy to limit which CPUs they're allowed to run on if they outnumber ++the number of CPUs. The +1 is because when rescheduling a task, the CPU's ++currently running task is put back on the queue. Lookup will be described after ++the virtual deadline mechanism is explained. ++ ++Virtual deadline. ++ ++The key to achieving low latency, scheduling fairness, and "nice level" ++distribution in BFS is entirely in the virtual deadline mechanism. The one ++tunable in BFS is the rr_interval, or "round robin interval". This is the ++maximum time two SCHED_OTHER (or SCHED_NORMAL, the common scheduling policy) ++tasks of the same nice level will be running for, or looking at it the other ++way around, the longest duration two tasks of the same nice level will be ++delayed for. When a task requests cpu time, it is given a quota (time_slice) ++equal to the rr_interval and a virtual deadline. The virtual deadline is ++offset from the current time in jiffies by this equation: ++ ++ jiffies + (prio_ratio * rr_interval) ++ ++The prio_ratio is determined as a ratio compared to the baseline of nice -20 ++and increases by 10% per nice level. The deadline is a virtual one only in that ++no guarantee is placed that a task will actually be scheduled by this time, but ++it is used to compare which task should go next. There are three components to ++how a task is next chosen. First is time_slice expiration. If a task runs out ++of its time_slice, it is descheduled, the time_slice is refilled, and the ++deadline reset to that formula above. Second is sleep, where a task no longer ++is requesting CPU for whatever reason. The time_slice and deadline are _not_ ++adjusted in this case and are just carried over for when the task is next ++scheduled. Third is preemption, and that is when a newly waking task is deemed ++higher priority than a currently running task on any cpu by virtue of the fact ++that it has an earlier virtual deadline than the currently running task. The ++earlier deadline is the key to which task is next chosen for the first and ++second cases. Once a task is descheduled, it is put back on the queue, and an ++O(n) lookup of all queued-but-not-running tasks is done to determine which has ++the earliest deadline and that task is chosen to receive CPU next. ++ ++The CPU proportion of different nice tasks works out to be approximately the ++ ++ (prio_ratio difference)^2 ++ ++The reason it is squared is that a task's deadline does not change while it is ++running unless it runs out of time_slice. Thus, even if the time actually ++passes the deadline of another task that is queued, it will not get CPU time ++unless the current running task deschedules, and the time "base" (jiffies) is ++constantly moving. ++ ++Task lookup. ++ ++BFS has 103 priority queues. 100 of these are dedicated to the static priority ++of realtime tasks, and the remaining 3 are, in order of best to worst priority, ++SCHED_ISO (isochronous), SCHED_NORMAL, and SCHED_IDLEPRIO (idle priority ++scheduling). When a task of these priorities is queued, a bitmap of running ++priorities is set showing which of these priorities has tasks waiting for CPU ++time. When a CPU is made to reschedule, the lookup for the next task to get ++CPU time is performed in the following way: ++ ++First the bitmap is checked to see what static priority tasks are queued. If ++any realtime priorities are found, the corresponding queue is checked and the ++first task listed there is taken (provided CPU affinity is suitable) and lookup ++is complete. If the priority corresponds to a SCHED_ISO task, they are also ++taken in FIFO order (as they behave like SCHED_RR). If the priority corresponds ++to either SCHED_NORMAL or SCHED_IDLEPRIO, then the lookup becomes O(n). At this ++stage, every task in the runlist that corresponds to that priority is checked ++to see which has the earliest set deadline, and (provided it has suitable CPU ++affinity) it is taken off the runqueue and given the CPU. If a task has an ++expired deadline, it is taken and the rest of the lookup aborted (as they are ++chosen in FIFO order). ++ ++Thus, the lookup is O(n) in the worst case only, where n is as described ++earlier, as tasks may be chosen before the whole task list is looked over. ++ ++ ++Scalability. ++ ++The major limitations of BFS will be that of scalability, as the separate ++runqueue designs will have less lock contention as the number of CPUs rises. ++However they do not scale linearly even with separate runqueues as multiple ++runqueues will need to be locked concurrently on such designs to be able to ++achieve fair CPU balancing, to try and achieve some sort of nice-level fairness ++across CPUs, and to achieve low enough latency for tasks on a busy CPU when ++other CPUs would be more suited. BFS has the advantage that it requires no ++balancing algorithm whatsoever, as balancing occurs by proxy simply because ++all CPUs draw off the global runqueue, in priority and deadline order. Despite ++the fact that scalability is _not_ the prime concern of BFS, it both shows very ++good scalability to smaller numbers of CPUs and is likely a more scalable design ++at these numbers of CPUs. ++ ++It also has some very low overhead scalability features built into the design ++when it has been deemed their overhead is so marginal that they're worth adding. ++The first is the local copy of the running process' data to the CPU it's running ++on to allow that data to be updated lockless where possible. Then there is ++deference paid to the last CPU a task was running on, by trying that CPU first ++when looking for an idle CPU to use the next time it's scheduled. Finally there ++is the notion of "sticky" tasks that are flagged when they are involuntarily ++descheduled, meaning they still want further CPU time. This sticky flag is ++used to bias heavily against those tasks being scheduled on a different CPU ++unless that CPU would be otherwise idle. When a cpu frequency governor is used ++that scales with CPU load, such as ondemand, sticky tasks are not scheduled ++on a different CPU at all, preferring instead to go idle. This means the CPU ++they were bound to is more likely to increase its speed while the other CPU ++will go idle, thus speeding up total task execution time and likely decreasing ++power usage. This is the only scenario where BFS will allow a CPU to go idle ++in preference to scheduling a task on the earliest available spare CPU. ++ ++The real cost of migrating a task from one CPU to another is entirely dependant ++on the cache footprint of the task, how cache intensive the task is, how long ++it's been running on that CPU to take up the bulk of its cache, how big the CPU ++cache is, how fast and how layered the CPU cache is, how fast a context switch ++is... and so on. In other words, it's close to random in the real world where we ++do more than just one sole workload. The only thing we can be sure of is that ++it's not free. So BFS uses the principle that an idle CPU is a wasted CPU and ++utilising idle CPUs is more important than cache locality, and cache locality ++only plays a part after that. ++ ++When choosing an idle CPU for a waking task, the cache locality is determined ++according to where the task last ran and then idle CPUs are ranked from best ++to worst to choose the most suitable idle CPU based on cache locality, NUMA ++node locality and hyperthread sibling business. They are chosen in the ++following preference (if idle): ++ ++* Same core, idle or busy cache, idle threads ++* Other core, same cache, idle or busy cache, idle threads. ++* Same node, other CPU, idle cache, idle threads. ++* Same node, other CPU, busy cache, idle threads. ++* Same core, busy threads. ++* Other core, same cache, busy threads. ++* Same node, other CPU, busy threads. ++* Other node, other CPU, idle cache, idle threads. ++* Other node, other CPU, busy cache, idle threads. ++* Other node, other CPU, busy threads. ++ ++This shows the SMT or "hyperthread" awareness in the design as well which will ++choose a real idle core first before a logical SMT sibling which already has ++tasks on the physical CPU. ++ ++Early benchmarking of BFS suggested scalability dropped off at the 16 CPU mark. ++However this benchmarking was performed on an earlier design that was far less ++scalable than the current one so it's hard to know how scalable it is in terms ++of both CPUs (due to the global runqueue) and heavily loaded machines (due to ++O(n) lookup) at this stage. Note that in terms of scalability, the number of ++_logical_ CPUs matters, not the number of _physical_ CPUs. Thus, a dual (2x) ++quad core (4X) hyperthreaded (2X) machine is effectively a 16X. Newer benchmark ++results are very promising indeed, without needing to tweak any knobs, features ++or options. Benchmark contributions are most welcome. ++ ++ ++Features ++ ++As the initial prime target audience for BFS was the average desktop user, it ++was designed to not need tweaking, tuning or have features set to obtain benefit ++from it. Thus the number of knobs and features has been kept to an absolute ++minimum and should not require extra user input for the vast majority of cases. ++There are precisely 2 tunables, and 2 extra scheduling policies. The rr_interval ++and iso_cpu tunables, and the SCHED_ISO and SCHED_IDLEPRIO policies. In addition ++to this, BFS also uses sub-tick accounting. What BFS does _not_ now feature is ++support for CGROUPS. The average user should neither need to know what these ++are, nor should they need to be using them to have good desktop behaviour. ++ ++rr_interval ++ ++There is only one "scheduler" tunable, the round robin interval. This can be ++accessed in ++ ++ /proc/sys/kernel/rr_interval ++ ++The value is in milliseconds, and the default value is set to 6ms. Valid values ++are from 1 to 1000. Decreasing the value will decrease latencies at the cost of ++decreasing throughput, while increasing it will improve throughput, but at the ++cost of worsening latencies. The accuracy of the rr interval is limited by HZ ++resolution of the kernel configuration. Thus, the worst case latencies are ++usually slightly higher than this actual value. BFS uses "dithering" to try and ++minimise the effect the Hz limitation has. The default value of 6 is not an ++arbitrary one. It is based on the fact that humans can detect jitter at ++approximately 7ms, so aiming for much lower latencies is pointless under most ++circumstances. It is worth noting this fact when comparing the latency ++performance of BFS to other schedulers. Worst case latencies being higher than ++7ms are far worse than average latencies not being in the microsecond range. ++Experimentation has shown that rr intervals being increased up to 300 can ++improve throughput but beyond that, scheduling noise from elsewhere prevents ++further demonstrable throughput. ++ ++Isochronous scheduling. ++ ++Isochronous scheduling is a unique scheduling policy designed to provide ++near-real-time performance to unprivileged (ie non-root) users without the ++ability to starve the machine indefinitely. Isochronous tasks (which means ++"same time") are set using, for example, the schedtool application like so: ++ ++ schedtool -I -e amarok ++ ++This will start the audio application "amarok" as SCHED_ISO. How SCHED_ISO works ++is that it has a priority level between true realtime tasks and SCHED_NORMAL ++which would allow them to preempt all normal tasks, in a SCHED_RR fashion (ie, ++if multiple SCHED_ISO tasks are running, they purely round robin at rr_interval ++rate). However if ISO tasks run for more than a tunable finite amount of time, ++they are then demoted back to SCHED_NORMAL scheduling. This finite amount of ++time is the percentage of _total CPU_ available across the machine, configurable ++as a percentage in the following "resource handling" tunable (as opposed to a ++scheduler tunable): ++ ++ /proc/sys/kernel/iso_cpu ++ ++and is set to 70% by default. It is calculated over a rolling 5 second average ++Because it is the total CPU available, it means that on a multi CPU machine, it ++is possible to have an ISO task running as realtime scheduling indefinitely on ++just one CPU, as the other CPUs will be available. Setting this to 100 is the ++equivalent of giving all users SCHED_RR access and setting it to 0 removes the ++ability to run any pseudo-realtime tasks. ++ ++A feature of BFS is that it detects when an application tries to obtain a ++realtime policy (SCHED_RR or SCHED_FIFO) and the caller does not have the ++appropriate privileges to use those policies. When it detects this, it will ++give the task SCHED_ISO policy instead. Thus it is transparent to the user. ++Because some applications constantly set their policy as well as their nice ++level, there is potential for them to undo the override specified by the user ++on the command line of setting the policy to SCHED_ISO. To counter this, once ++a task has been set to SCHED_ISO policy, it needs superuser privileges to set ++it back to SCHED_NORMAL. This will ensure the task remains ISO and all child ++processes and threads will also inherit the ISO policy. ++ ++Idleprio scheduling. ++ ++Idleprio scheduling is a scheduling policy designed to give out CPU to a task ++_only_ when the CPU would be otherwise idle. The idea behind this is to allow ++ultra low priority tasks to be run in the background that have virtually no ++effect on the foreground tasks. This is ideally suited to distributed computing ++clients (like setiathome, folding, mprime etc) but can also be used to start ++a video encode or so on without any slowdown of other tasks. To avoid this ++policy from grabbing shared resources and holding them indefinitely, if it ++detects a state where the task is waiting on I/O, the machine is about to ++suspend to ram and so on, it will transiently schedule them as SCHED_NORMAL. As ++per the Isochronous task management, once a task has been scheduled as IDLEPRIO, ++it cannot be put back to SCHED_NORMAL without superuser privileges. Tasks can ++be set to start as SCHED_IDLEPRIO with the schedtool command like so: ++ ++ schedtool -D -e ./mprime ++ ++Subtick accounting. ++ ++It is surprisingly difficult to get accurate CPU accounting, and in many cases, ++the accounting is done by simply determining what is happening at the precise ++moment a timer tick fires off. This becomes increasingly inaccurate as the ++timer tick frequency (HZ) is lowered. It is possible to create an application ++which uses almost 100% CPU, yet by being descheduled at the right time, records ++zero CPU usage. While the main problem with this is that there are possible ++security implications, it is also difficult to determine how much CPU a task ++really does use. BFS tries to use the sub-tick accounting from the TSC clock, ++where possible, to determine real CPU usage. This is not entirely reliable, but ++is far more likely to produce accurate CPU usage data than the existing designs ++and will not show tasks as consuming no CPU usage when they actually are. Thus, ++the amount of CPU reported as being used by BFS will more accurately represent ++how much CPU the task itself is using (as is shown for example by the 'time' ++application), so the reported values may be quite different to other schedulers. ++Values reported as the 'load' are more prone to problems with this design, but ++per process values are closer to real usage. When comparing throughput of BFS ++to other designs, it is important to compare the actual completed work in terms ++of total wall clock time taken and total work done, rather than the reported ++"cpu usage". ++ ++ ++Con Kolivas Tue, 5 Apr 2011 +Index: linux-4.4-ck1/Documentation/sysctl/kernel.txt +=================================================================== +--- linux-4.4-ck1.orig/Documentation/sysctl/kernel.txt 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/Documentation/sysctl/kernel.txt 2016-03-25 16:03:45.289350411 +1100 +@@ -39,6 +39,7 @@ show up in /proc/sys/kernel: + - hung_task_timeout_secs + - hung_task_warnings + - kexec_load_disabled ++- iso_cpu + - kptr_restrict + - kstack_depth_to_print [ X86 only ] + - l2cr [ PPC only ] +@@ -67,6 +68,7 @@ show up in /proc/sys/kernel: + - randomize_va_space + - real-root-dev ==> Documentation/initrd.txt + - reboot-cmd [ SPARC only ] ++- rr_interval + - rtsig-max + - rtsig-nr + - sem +@@ -396,6 +398,16 @@ kernel stack. + + ============================================================== + ++iso_cpu: (BFS CPU scheduler only). ++ ++This sets the percentage cpu that the unprivileged SCHED_ISO tasks can ++run effectively at realtime priority, averaged over a rolling five ++seconds over the -whole- system, meaning all cpus. ++ ++Set to 70 (percent) by default. ++ ++============================================================== ++ + l2cr: (PPC only) + + This flag controls the L2 cache of G3 processor boards. If +@@ -735,6 +747,20 @@ rebooting. ??? + + ============================================================== + ++rr_interval: (BFS CPU scheduler only) ++ ++This is the smallest duration that any cpu process scheduling unit ++will run for. Increasing this value can increase throughput of cpu ++bound tasks substantially but at the expense of increased latencies ++overall. Conversely decreasing it will decrease average and maximum ++latencies but at the expense of throughput. This value is in ++milliseconds and the default value chosen depends on the number of ++cpus available at scheduler initialisation with a minimum of 6. ++ ++Valid values are from 1-1000. ++ ++============================================================== ++ + rtsig-max & rtsig-nr: + + The file rtsig-max can be used to tune the maximum number +Index: linux-4.4-ck1/fs/proc/base.c +=================================================================== +--- linux-4.4-ck1.orig/fs/proc/base.c 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/fs/proc/base.c 2016-03-25 16:03:45.289350411 +1100 +@@ -504,7 +504,7 @@ static int proc_pid_schedstat(struct seq + seq_printf(m, "0 0 0\n"); + else + seq_printf(m, "%llu %llu %lu\n", +- (unsigned long long)task->se.sum_exec_runtime, ++ (unsigned long long)tsk_seruntime(task), + (unsigned long long)task->sched_info.run_delay, + task->sched_info.pcount); + +Index: linux-4.4-ck1/include/linux/init_task.h +=================================================================== +--- linux-4.4-ck1.orig/include/linux/init_task.h 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/include/linux/init_task.h 2016-03-25 16:03:45.289350411 +1100 +@@ -157,8 +157,6 @@ extern struct task_group root_task_group + # define INIT_VTIME(tsk) + #endif + +-#define INIT_TASK_COMM "swapper" +- + #ifdef CONFIG_RT_MUTEXES + # define INIT_RT_MUTEXES(tsk) \ + .pi_waiters = RB_ROOT, \ +@@ -187,6 +185,78 @@ extern struct task_group root_task_group + * INIT_TASK is used to set up the first task table, touch at + * your own risk!. Base=0, limit=0x1fffff (=2MB) + */ ++#ifdef CONFIG_SCHED_BFS ++#define INIT_TASK_COMM "BFS" ++#define INIT_TASK(tsk) \ ++{ \ ++ .state = 0, \ ++ .stack = &init_thread_info, \ ++ .usage = ATOMIC_INIT(2), \ ++ .flags = PF_KTHREAD, \ ++ .prio = NORMAL_PRIO, \ ++ .static_prio = MAX_PRIO-20, \ ++ .normal_prio = NORMAL_PRIO, \ ++ .deadline = 0, \ ++ .policy = SCHED_NORMAL, \ ++ .cpus_allowed = CPU_MASK_ALL, \ ++ .mm = NULL, \ ++ .active_mm = &init_mm, \ ++ .restart_block = { \ ++ .fn = do_no_restart_syscall, \ ++ }, \ ++ .run_list = LIST_HEAD_INIT(tsk.run_list), \ ++ .time_slice = HZ, \ ++ .tasks = LIST_HEAD_INIT(tsk.tasks), \ ++ INIT_PUSHABLE_TASKS(tsk) \ ++ .ptraced = LIST_HEAD_INIT(tsk.ptraced), \ ++ .ptrace_entry = LIST_HEAD_INIT(tsk.ptrace_entry), \ ++ .real_parent = &tsk, \ ++ .parent = &tsk, \ ++ .children = LIST_HEAD_INIT(tsk.children), \ ++ .sibling = LIST_HEAD_INIT(tsk.sibling), \ ++ .group_leader = &tsk, \ ++ RCU_POINTER_INITIALIZER(real_cred, &init_cred), \ ++ RCU_POINTER_INITIALIZER(cred, &init_cred), \ ++ .comm = INIT_TASK_COMM, \ ++ .thread = INIT_THREAD, \ ++ .fs = &init_fs, \ ++ .files = &init_files, \ ++ .signal = &init_signals, \ ++ .sighand = &init_sighand, \ ++ .nsproxy = &init_nsproxy, \ ++ .pending = { \ ++ .list = LIST_HEAD_INIT(tsk.pending.list), \ ++ .signal = {{0}}}, \ ++ .blocked = {{0}}, \ ++ .alloc_lock = __SPIN_LOCK_UNLOCKED(tsk.alloc_lock), \ ++ .journal_info = NULL, \ ++ .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ ++ .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ ++ .timer_slack_ns = 50000, /* 50 usec default slack */ \ ++ .pids = { \ ++ [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ ++ [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ ++ [PIDTYPE_SID] = INIT_PID_LINK(PIDTYPE_SID), \ ++ }, \ ++ .thread_group = LIST_HEAD_INIT(tsk.thread_group), \ ++ .thread_node = LIST_HEAD_INIT(init_signals.thread_head), \ ++ INIT_IDS \ ++ INIT_PERF_EVENTS(tsk) \ ++ INIT_TRACE_IRQFLAGS \ ++ INIT_LOCKDEP \ ++ INIT_FTRACE_GRAPH \ ++ INIT_TRACE_RECURSION \ ++ INIT_TASK_RCU_PREEMPT(tsk) \ ++ INIT_TASK_RCU_TASKS(tsk) \ ++ INIT_CPUSET_SEQ(tsk) \ ++ INIT_RT_MUTEXES(tsk) \ ++ INIT_PREV_CPUTIME(tsk) \ ++ INIT_VTIME(tsk) \ ++ INIT_NUMA_BALANCING(tsk) \ ++ INIT_KASAN(tsk) \ ++} ++#else /* CONFIG_SCHED_BFS */ ++#define INIT_TASK_COMM "swapper" + #define INIT_TASK(tsk) \ + { \ + .state = 0, \ +@@ -261,7 +331,7 @@ extern struct task_group root_task_group + INIT_NUMA_BALANCING(tsk) \ + INIT_KASAN(tsk) \ + } +- ++#endif /* CONFIG_SCHED_BFS */ + + #define INIT_CPU_TIMERS(cpu_timers) \ + { \ +Index: linux-4.4-ck1/include/linux/ioprio.h +=================================================================== +--- linux-4.4-ck1.orig/include/linux/ioprio.h 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/include/linux/ioprio.h 2016-03-25 16:03:45.289350411 +1100 +@@ -52,6 +52,8 @@ enum { + */ + static inline int task_nice_ioprio(struct task_struct *task) + { ++ if (iso_task(task)) ++ return 0; + return (task_nice(task) + 20) / 5; + } + +Index: linux-4.4-ck1/include/linux/sched.h +=================================================================== +--- linux-4.4-ck1.orig/include/linux/sched.h 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/include/linux/sched.h 2016-03-25 16:03:45.290350430 +1100 +@@ -176,7 +176,7 @@ extern void get_iowait_load(unsigned lon + + extern void calc_global_load(unsigned long ticks); + +-#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) ++#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_BFS) + extern void update_cpu_load_nohz(void); + #else + static inline void update_cpu_load_nohz(void) { } +@@ -339,8 +339,6 @@ extern void init_idle_bootup_task(struct + + extern cpumask_var_t cpu_isolated_map; + +-extern int runqueue_is_locked(int cpu); +- + #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON) + extern void nohz_balance_enter_idle(int cpu); + extern void set_cpu_sd_state_idle(void); +@@ -1382,9 +1380,11 @@ struct task_struct { + unsigned int flags; /* per process flags, defined below */ + unsigned int ptrace; + ++#if defined(CONFIG_SMP) || defined(CONFIG_SCHED_BFS) ++ int on_cpu; ++#endif + #ifdef CONFIG_SMP + struct llist_node wake_entry; +- int on_cpu; + unsigned int wakee_flips; + unsigned long wakee_flip_decay_ts; + struct task_struct *last_wakee; +@@ -1392,12 +1392,29 @@ struct task_struct { + int wake_cpu; + #endif + int on_rq; +- + int prio, static_prio, normal_prio; + unsigned int rt_priority; ++#ifdef CONFIG_SCHED_BFS ++ int time_slice; ++ u64 deadline; ++ struct list_head run_list; ++ u64 last_ran; ++ u64 sched_time; /* sched_clock time spent running */ ++#ifdef CONFIG_SMT_NICE ++ int smt_bias; /* Policy/nice level bias across smt siblings */ ++#endif ++#ifdef CONFIG_SMP ++ bool sticky; /* Soft affined flag */ ++#endif ++#ifdef CONFIG_HOTPLUG_CPU ++ bool zerobound; /* Bound to CPU0 for hotplug */ ++#endif ++ unsigned long rt_timeout; ++#else /* CONFIG_SCHED_BFS */ + const struct sched_class *sched_class; + struct sched_entity se; + struct sched_rt_entity rt; ++#endif + #ifdef CONFIG_CGROUP_SCHED + struct task_group *sched_task_group; + #endif +@@ -1517,6 +1534,9 @@ struct task_struct { + int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ + + cputime_t utime, stime, utimescaled, stimescaled; ++#ifdef CONFIG_SCHED_BFS ++ unsigned long utime_pc, stime_pc; ++#endif + cputime_t gtime; + struct prev_cputime prev_cputime; + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +@@ -1829,6 +1849,63 @@ extern int arch_task_struct_size __read_ + # define arch_task_struct_size (sizeof(struct task_struct)) + #endif + ++#ifdef CONFIG_SCHED_BFS ++bool grunqueue_is_locked(void); ++void grq_unlock_wait(void); ++void cpu_scaling(int cpu); ++void cpu_nonscaling(int cpu); ++#define tsk_seruntime(t) ((t)->sched_time) ++#define tsk_rttimeout(t) ((t)->rt_timeout) ++ ++static inline void tsk_cpus_current(struct task_struct *p) ++{ ++} ++ ++static inline int runqueue_is_locked(int cpu) ++{ ++ return grunqueue_is_locked(); ++} ++ ++void print_scheduler_version(void); ++ ++static inline bool iso_task(struct task_struct *p) ++{ ++ return (p->policy == SCHED_ISO); ++} ++#else /* CFS */ ++extern int runqueue_is_locked(int cpu); ++static inline void cpu_scaling(int cpu) ++{ ++} ++ ++static inline void cpu_nonscaling(int cpu) ++{ ++} ++#define tsk_seruntime(t) ((t)->se.sum_exec_runtime) ++#define tsk_rttimeout(t) ((t)->rt.timeout) ++ ++static inline void tsk_cpus_current(struct task_struct *p) ++{ ++ p->nr_cpus_allowed = current->nr_cpus_allowed; ++} ++ ++static inline void print_scheduler_version(void) ++{ ++ printk(KERN_INFO"CFS CPU scheduler.\n"); ++} ++ ++static inline bool iso_task(struct task_struct *p) ++{ ++ return false; ++} ++ ++/* Anyone feel like implementing this? */ ++static inline bool above_background_load(void) ++{ ++ return false; ++} ++#endif /* CONFIG_SCHED_BFS */ ++ + /* Future-safe accessor for struct task_struct's cpus_allowed. */ + #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) + +@@ -2244,7 +2321,7 @@ static inline int set_cpus_allowed_ptr(s + } + #endif + +-#ifdef CONFIG_NO_HZ_COMMON ++#if defined(CONFIG_NO_HZ_COMMON) && !defined(CONFIG_SCHED_BFS) + void calc_load_enter_idle(void); + void calc_load_exit_idle(void); + #else +@@ -2317,7 +2394,7 @@ extern unsigned long long + task_sched_runtime(struct task_struct *task); + + /* sched_exec is called by processes performing an exec */ +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) && !defined(CONFIG_SCHED_BFS) + extern void sched_exec(void); + #else + #define sched_exec() {} +@@ -3102,7 +3179,7 @@ static inline unsigned int task_cpu(cons + return 0; + } + +-static inline void set_task_cpu(struct task_struct *p, unsigned int cpu) ++static inline void set_task_cpu(struct task_struct *p, int cpu) + { + } + +Index: linux-4.4-ck1/init/Kconfig +=================================================================== +--- linux-4.4-ck1.orig/init/Kconfig 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/init/Kconfig 2016-03-25 16:03:45.290350430 +1100 +@@ -28,6 +28,20 @@ config BUILDTIME_EXTABLE_SORT + + menu "General setup" + ++config SCHED_BFS ++ bool "BFS cpu scheduler" ++ ---help--- ++ The Brain Fuck CPU Scheduler for excellent interactivity and ++ responsiveness on the desktop and solid scalability on normal ++ hardware and commodity servers. Not recommended for 4096 CPUs. ++ ++ Currently incompatible with the Group CPU scheduler, and RCU TORTURE ++ TEST so these options are disabled. ++ ++ Say Y here. ++ default y ++ ++ + config BROKEN + bool + +@@ -340,7 +354,7 @@ choice + # Kind of a stub config for the pure tick based cputime accounting + config TICK_CPU_ACCOUNTING + bool "Simple tick based cputime accounting" +- depends on !S390 && !NO_HZ_FULL ++ depends on !S390 && !NO_HZ_FULL && !SCHED_BFS + help + This is the basic tick based cputime accounting that maintains + statistics about user, system and idle time spent on per jiffies +@@ -365,6 +379,7 @@ config VIRT_CPU_ACCOUNTING_GEN + bool "Full dynticks CPU time accounting" + depends on HAVE_CONTEXT_TRACKING + depends on HAVE_VIRT_CPU_ACCOUNTING_GEN ++ depends on !SCHED_BFS + select VIRT_CPU_ACCOUNTING + select CONTEXT_TRACKING + help +@@ -699,6 +714,7 @@ config RCU_NOCB_CPU + bool "Offload RCU callback processing from boot-selected CPUs" + depends on TREE_RCU || PREEMPT_RCU + depends on RCU_EXPERT || NO_HZ_FULL ++ depends on !SCHED_BFS + default n + help + Use this option to reduce OS jitter for aggressive HPC or +@@ -909,6 +925,7 @@ config NUMA_BALANCING + depends on ARCH_SUPPORTS_NUMA_BALANCING + depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY + depends on SMP && NUMA && MIGRATION ++ depends on !SCHED_BFS + help + This option adds support for automatic NUMA aware memory/task placement. + The mechanism is quite primitive and is based on migrating memory when +@@ -995,6 +1012,7 @@ config PROC_PID_CPUSET + + config CGROUP_CPUACCT + bool "Simple CPU accounting cgroup subsystem" ++ depends on !SCHED_BFS + help + Provides a simple Resource Controller for monitoring the + total CPU consumed by the tasks in a cgroup. +@@ -1080,6 +1098,7 @@ config CGROUP_PERF + + menuconfig CGROUP_SCHED + bool "Group CPU scheduler" ++ depends on !SCHED_BFS + default n + help + This feature lets CPU scheduler recognize task groups and control CPU +@@ -1227,6 +1246,7 @@ endif # NAMESPACES + + config SCHED_AUTOGROUP + bool "Automatic process group scheduling" ++ depends on !SCHED_BFS + select CGROUPS + select CGROUP_SCHED + select FAIR_GROUP_SCHED +@@ -1711,38 +1731,8 @@ config COMPAT_BRK + + On non-ancient distros (post-2000 ones) N is usually a safe choice. + +-choice +- prompt "Choose SLAB allocator" +- default SLUB +- help +- This option allows to select a slab allocator. +- +-config SLAB +- bool "SLAB" +- help +- The regular slab allocator that is established and known to work +- well in all environments. It organizes cache hot objects in +- per cpu and per node queues. +- + config SLUB +- bool "SLUB (Unqueued Allocator)" +- help +- SLUB is a slab allocator that minimizes cache line usage +- instead of managing queues of cached objects (SLAB approach). +- Per cpu caching is realized using slabs of objects instead +- of queues of objects. SLUB can use memory efficiently +- and has enhanced diagnostics. SLUB is the default choice for +- a slab allocator. +- +-config SLOB +- depends on EXPERT +- bool "SLOB (Simple Allocator)" +- help +- SLOB replaces the stock allocator with a drastically simpler +- allocator. SLOB is generally more space efficient but +- does not perform as well on large systems. +- +-endchoice ++ def_bool y + + config SLUB_CPU_PARTIAL + default y +Index: linux-4.4-ck1/init/main.c +=================================================================== +--- linux-4.4-ck1.orig/init/main.c 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/init/main.c 2016-03-25 16:03:45.290350430 +1100 +@@ -808,7 +808,6 @@ int __init_or_module do_one_initcall(ini + return ret; + } + +- + extern initcall_t __initcall_start[]; + extern initcall_t __initcall0_start[]; + extern initcall_t __initcall1_start[]; +@@ -943,6 +942,8 @@ static int __ref kernel_init(void *unuse + + flush_delayed_fput(); + ++ print_scheduler_version(); ++ + if (ramdisk_execute_command) { + ret = run_init_process(ramdisk_execute_command); + if (!ret) +Index: linux-4.4-ck1/kernel/delayacct.c +=================================================================== +--- linux-4.4-ck1.orig/kernel/delayacct.c 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/kernel/delayacct.c 2016-03-25 16:03:45.290350430 +1100 +@@ -104,7 +104,7 @@ int __delayacct_add_tsk(struct taskstats + */ + t1 = tsk->sched_info.pcount; + t2 = tsk->sched_info.run_delay; +- t3 = tsk->se.sum_exec_runtime; ++ t3 = tsk_seruntime(tsk); + + d->cpu_count += t1; + +Index: linux-4.4-ck1/kernel/exit.c +=================================================================== +--- linux-4.4-ck1.orig/kernel/exit.c 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/kernel/exit.c 2016-03-25 16:03:45.290350430 +1100 +@@ -135,7 +135,7 @@ static void __exit_signal(struct task_st + sig->inblock += task_io_get_inblock(tsk); + sig->oublock += task_io_get_oublock(tsk); + task_io_accounting_add(&sig->ioac, &tsk->ioac); +- sig->sum_sched_runtime += tsk->se.sum_exec_runtime; ++ sig->sum_sched_runtime += tsk_seruntime(tsk); + sig->nr_threads--; + __unhash_process(tsk, group_dead); + write_sequnlock(&sig->stats_lock); +Index: linux-4.4-ck1/kernel/sysctl.c +=================================================================== +--- linux-4.4-ck1.orig/kernel/sysctl.c 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/kernel/sysctl.c 2016-03-25 16:03:45.291350449 +1100 +@@ -125,7 +125,13 @@ static int __maybe_unused one = 1; + static int __maybe_unused two = 2; + static int __maybe_unused four = 4; + static unsigned long one_ul = 1; +-static int one_hundred = 100; ++static int __maybe_unused one_hundred = 100; ++#ifdef CONFIG_SCHED_BFS ++extern int rr_interval; ++extern int sched_interactive; ++extern int sched_iso_cpu; ++static int __read_mostly one_thousand = 1000; ++#endif + #ifdef CONFIG_PRINTK + static int ten_thousand = 10000; + #endif +@@ -260,7 +266,7 @@ static struct ctl_table sysctl_base_tabl + { } + }; + +-#ifdef CONFIG_SCHED_DEBUG ++#if defined(CONFIG_SCHED_DEBUG) && !defined(CONFIG_SCHED_BFS) + static int min_sched_granularity_ns = 100000; /* 100 usecs */ + static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ + static int min_wakeup_granularity_ns; /* 0 usecs */ +@@ -277,6 +283,7 @@ static int max_extfrag_threshold = 1000; + #endif + + static struct ctl_table kern_table[] = { ++#ifndef CONFIG_SCHED_BFS + { + .procname = "sched_child_runs_first", + .data = &sysctl_sched_child_runs_first, +@@ -434,6 +441,7 @@ static struct ctl_table kern_table[] = { + .extra1 = &one, + }, + #endif ++#endif /* !CONFIG_SCHED_BFS */ + #ifdef CONFIG_PROVE_LOCKING + { + .procname = "prove_locking", +@@ -991,6 +999,35 @@ static struct ctl_table kern_table[] = { + .proc_handler = proc_dointvec, + }, + #endif ++#ifdef CONFIG_SCHED_BFS ++ { ++ .procname = "rr_interval", ++ .data = &rr_interval, ++ .maxlen = sizeof (int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &one, ++ .extra2 = &one_thousand, ++ }, ++ { ++ .procname = "interactive", ++ .data = &sched_interactive, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &zero, ++ .extra2 = &one, ++ }, ++ { ++ .procname = "iso_cpu", ++ .data = &sched_iso_cpu, ++ .maxlen = sizeof (int), ++ .mode = 0644, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &zero, ++ .extra2 = &one_hundred, ++ }, ++#endif + #if defined(CONFIG_S390) && defined(CONFIG_SMP) + { + .procname = "spin_retry", +Index: linux-4.4-ck1/lib/Kconfig.debug +=================================================================== +--- linux-4.4-ck1.orig/lib/Kconfig.debug 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/lib/Kconfig.debug 2016-03-25 16:03:45.291350449 +1100 +@@ -1239,7 +1239,7 @@ config TORTURE_TEST + + config RCU_TORTURE_TEST + tristate "torture tests for RCU" +- depends on DEBUG_KERNEL ++ depends on DEBUG_KERNEL && !SCHED_BFS + select TORTURE_TEST + select SRCU + select TASKS_RCU +Index: linux-4.4-ck1/include/linux/jiffies.h +=================================================================== +--- linux-4.4-ck1.orig/include/linux/jiffies.h 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/include/linux/jiffies.h 2016-03-25 16:03:45.291350449 +1100 +@@ -164,7 +164,7 @@ static inline u64 get_jiffies_64(void) + * Have the 32 bit jiffies value wrap 5 minutes after boot + * so jiffies wrap bugs show up earlier. + */ +-#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-300*HZ)) ++#define INITIAL_JIFFIES ((unsigned long)(unsigned int) (-10*HZ)) + + /* + * Change timeval to jiffies, trying to avoid the +Index: linux-4.4-ck1/drivers/cpufreq/cpufreq.c +=================================================================== +--- linux-4.4-ck1.orig/drivers/cpufreq/cpufreq.c 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/drivers/cpufreq/cpufreq.c 2016-03-25 16:03:45.291350449 +1100 +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -1900,6 +1901,12 @@ int __cpufreq_driver_target(struct cpufr + } + + out: ++ if (likely(retval != -EINVAL)) { ++ if (target_freq == policy->max) ++ cpu_nonscaling(policy->cpu); ++ else ++ cpu_scaling(policy->cpu); ++ } + return retval; + } + EXPORT_SYMBOL_GPL(__cpufreq_driver_target); +Index: linux-4.4-ck1/drivers/cpufreq/cpufreq_ondemand.c +=================================================================== +--- linux-4.4-ck1.orig/drivers/cpufreq/cpufreq_ondemand.c 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/drivers/cpufreq/cpufreq_ondemand.c 2016-03-25 16:03:45.291350449 +1100 +@@ -19,7 +19,7 @@ + #include "cpufreq_governor.h" + + /* On-demand governor macros */ +-#define DEF_FREQUENCY_UP_THRESHOLD (80) ++#define DEF_FREQUENCY_UP_THRESHOLD (63) + #define DEF_SAMPLING_DOWN_FACTOR (1) + #define MAX_SAMPLING_DOWN_FACTOR (100000) + #define MICRO_FREQUENCY_UP_THRESHOLD (95) +@@ -148,7 +148,7 @@ static void dbs_freq_increase(struct cpu + } + + /* +- * Every sampling_rate, we check, if current idle time is less than 20% ++ * Every sampling_rate, we check, if current idle time is less than 37% + * (default), then we try to increase frequency. Else, we adjust the frequency + * proportional to load. + */ +Index: linux-4.4-ck1/kernel/sched/bfs.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-4.4-ck1/kernel/sched/bfs.c 2016-03-25 16:05:20.525834708 +1100 +@@ -0,0 +1,7585 @@ ++/* ++ * kernel/sched/bfs.c, was kernel/sched.c ++ * ++ * Kernel scheduler and related syscalls ++ * ++ * Copyright (C) 1991-2002 Linus Torvalds ++ * ++ * 1996-12-23 Modified by Dave Grothe to fix bugs in semaphores and ++ * make semaphores SMP safe ++ * 1998-11-19 Implemented schedule_timeout() and related stuff ++ * by Andrea Arcangeli ++ * 2002-01-04 New ultra-scalable O(1) scheduler by Ingo Molnar: ++ * hybrid priority-list and round-robin design with ++ * an array-switch method of distributing timeslices ++ * and per-CPU runqueues. Cleanups and useful suggestions ++ * by Davide Libenzi, preemptible kernel bits by Robert Love. ++ * 2003-09-03 Interactivity tuning by Con Kolivas. ++ * 2004-04-02 Scheduler domains code by Nick Piggin ++ * 2007-04-15 Work begun on replacing all interactivity tuning with a ++ * fair scheduling design by Con Kolivas. ++ * 2007-05-05 Load balancing (smp-nice) and other improvements ++ * by Peter Williams ++ * 2007-05-06 Interactivity improvements to CFS by Mike Galbraith ++ * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri ++ * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, ++ * Thomas Gleixner, Mike Kravetz ++ * now Brainfuck deadline scheduling policy by Con Kolivas deletes ++ * a whole lot of those previous things. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#ifdef CONFIG_PARAVIRT ++#include ++#endif ++ ++#include "cpupri.h" ++#include "../workqueue_internal.h" ++#include "../smpboot.h" ++ ++#define CREATE_TRACE_POINTS ++#include ++ ++#include "bfs_sched.h" ++ ++#define rt_prio(prio) unlikely((prio) < MAX_RT_PRIO) ++#define rt_task(p) rt_prio((p)->prio) ++#define rt_queue(rq) rt_prio((rq)->rq_prio) ++#define batch_task(p) (unlikely((p)->policy == SCHED_BATCH)) ++#define is_rt_policy(policy) ((policy) == SCHED_FIFO || \ ++ (policy) == SCHED_RR) ++#define has_rt_policy(p) unlikely(is_rt_policy((p)->policy)) ++ ++#define is_idle_policy(policy) ((policy) == SCHED_IDLEPRIO) ++#define idleprio_task(p) unlikely(is_idle_policy((p)->policy)) ++#define task_running_idle(p) unlikely((p)->prio == IDLE_PRIO) ++#define idle_queue(rq) (unlikely(is_idle_policy((rq)->rq_policy))) ++ ++#define is_iso_policy(policy) ((policy) == SCHED_ISO) ++#define iso_task(p) unlikely(is_iso_policy((p)->policy)) ++#define iso_queue(rq) unlikely(is_iso_policy((rq)->rq_policy)) ++#define task_running_iso(p) unlikely((p)->prio == ISO_PRIO) ++#define rq_running_iso(rq) ((rq)->rq_prio == ISO_PRIO) ++ ++#define rq_idle(rq) ((rq)->rq_prio == PRIO_LIMIT) ++ ++#define ISO_PERIOD ((5 * HZ * grq.noc) + 1) ++ ++#define SCHED_PRIO(p) ((p) + MAX_RT_PRIO) ++#define STOP_PRIO (MAX_RT_PRIO - 1) ++ ++/* ++ * Some helpers for converting to/from various scales. Use shifts to get ++ * approximate multiples of ten for less overhead. ++ */ ++#define JIFFIES_TO_NS(TIME) ((TIME) * (1000000000 / HZ)) ++#define JIFFY_NS (1000000000 / HZ) ++#define HALF_JIFFY_NS (1000000000 / HZ / 2) ++#define HALF_JIFFY_US (1000000 / HZ / 2) ++#define MS_TO_NS(TIME) ((TIME) << 20) ++#define MS_TO_US(TIME) ((TIME) << 10) ++#define NS_TO_MS(TIME) ((TIME) >> 20) ++#define NS_TO_US(TIME) ((TIME) >> 10) ++ ++#define RESCHED_US (100) /* Reschedule if less than this many μs left */ ++ ++void print_scheduler_version(void) ++{ ++ printk(KERN_INFO "BFS CPU scheduler v0.467 by Con Kolivas.\n"); ++} ++ ++/* ++ * This is the time all tasks within the same priority round robin. ++ * Value is in ms and set to a minimum of 6ms. Scales with number of cpus. ++ * Tunable via /proc interface. ++ */ ++int rr_interval __read_mostly = 6; ++ ++/* Tunable to choose whether to prioritise latency or throughput, simple ++ * binary yes or no */ ++ ++int sched_interactive __read_mostly = 1; ++ ++/* ++ * sched_iso_cpu - sysctl which determines the cpu percentage SCHED_ISO tasks ++ * are allowed to run five seconds as real time tasks. This is the total over ++ * all online cpus. ++ */ ++int sched_iso_cpu __read_mostly = 70; ++ ++/* ++ * The relative length of deadline for each priority(nice) level. ++ */ ++static int prio_ratios[NICE_WIDTH] __read_mostly; ++ ++/* ++ * The quota handed out to tasks of all priority levels when refilling their ++ * time_slice. ++ */ ++static inline int timeslice(void) ++{ ++ return MS_TO_US(rr_interval); ++} ++ ++/* ++ * The global runqueue data that all CPUs work off. Data is protected either ++ * by the global grq lock, or the discrete lock that precedes the data in this ++ * struct. ++ */ ++struct global_rq { ++ raw_spinlock_t lock; ++ unsigned long nr_running; ++ unsigned long nr_uninterruptible; ++ unsigned long long nr_switches; ++ struct list_head queue[PRIO_LIMIT]; ++ DECLARE_BITMAP(prio_bitmap, PRIO_LIMIT + 1); ++ unsigned long qnr; /* queued not running */ ++#ifdef CONFIG_SMP ++ cpumask_t cpu_idle_map; ++ bool idle_cpus; ++#endif ++ int noc; /* num_online_cpus stored and updated when it changes */ ++ u64 niffies; /* Nanosecond jiffies */ ++ unsigned long last_jiffy; /* Last jiffy we updated niffies */ ++ ++ raw_spinlock_t iso_lock; ++ int iso_ticks; ++ bool iso_refractory; ++}; ++ ++#ifdef CONFIG_SMP ++/* ++ * We add the notion of a root-domain which will be used to define per-domain ++ * variables. Each exclusive cpuset essentially defines an island domain by ++ * fully partitioning the member cpus from any other cpuset. Whenever a new ++ * exclusive cpuset is created, we also create and attach a new root-domain ++ * object. ++ * ++ */ ++struct root_domain { ++ atomic_t refcount; ++ atomic_t rto_count; ++ struct rcu_head rcu; ++ cpumask_var_t span; ++ cpumask_var_t online; ++ ++ /* ++ * The "RT overload" flag: it gets set if a CPU has more than ++ * one runnable RT task. ++ */ ++ cpumask_var_t rto_mask; ++ struct cpupri cpupri; ++}; ++ ++/* ++ * By default the system creates a single root-domain with all cpus as ++ * members (mimicking the global state we have today). ++ */ ++static struct root_domain def_root_domain; ++ ++#endif /* CONFIG_SMP */ ++ ++/* There can be only one */ ++static struct global_rq grq; ++ ++static DEFINE_MUTEX(sched_hotcpu_mutex); ++ ++/* cpus with isolated domains */ ++cpumask_var_t cpu_isolated_map; ++ ++DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); ++#ifdef CONFIG_SMP ++struct rq *cpu_rq(int cpu) ++{ ++ return &per_cpu(runqueues, (cpu)); ++} ++#define task_rq(p) cpu_rq(task_cpu(p)) ++#define cpu_curr(cpu) (cpu_rq(cpu)->curr) ++/* ++ * sched_domains_mutex serialises calls to init_sched_domains, ++ * detach_destroy_domains and partition_sched_domains. ++ */ ++DEFINE_MUTEX(sched_domains_mutex); ++ ++/* ++ * By default the system creates a single root-domain with all cpus as ++ * members (mimicking the global state we have today). ++ */ ++static struct root_domain def_root_domain; ++ ++int __weak arch_sd_sibling_asym_packing(void) ++{ ++ return 0*SD_ASYM_PACKING; ++} ++#else ++struct rq *uprq; ++#endif /* CONFIG_SMP */ ++ ++static inline void update_rq_clock(struct rq *rq); ++ ++/* ++ * Sanity check should sched_clock return bogus values. We make sure it does ++ * not appear to go backwards, and use jiffies to determine the maximum and ++ * minimum it could possibly have increased, and round down to the nearest ++ * jiffy when it falls outside this. ++ */ ++static inline void niffy_diff(s64 *niff_diff, int jiff_diff) ++{ ++ unsigned long min_diff, max_diff; ++ ++ if (jiff_diff > 1) ++ min_diff = JIFFIES_TO_NS(jiff_diff - 1); ++ else ++ min_diff = 1; ++ /* Round up to the nearest tick for maximum */ ++ max_diff = JIFFIES_TO_NS(jiff_diff + 1); ++ ++ if (unlikely(*niff_diff < min_diff || *niff_diff > max_diff)) ++ *niff_diff = min_diff; ++} ++ ++#ifdef CONFIG_SMP ++static inline int cpu_of(struct rq *rq) ++{ ++ return rq->cpu; ++} ++ ++/* ++ * Niffies are a globally increasing nanosecond counter. Whenever a runqueue ++ * clock is updated with the grq.lock held, it is an opportunity to update the ++ * niffies value. Any CPU can update it by adding how much its clock has ++ * increased since it last updated niffies, minus any added niffies by other ++ * CPUs. ++ */ ++static inline void update_clocks(struct rq *rq) ++{ ++ s64 ndiff; ++ long jdiff; ++ ++ update_rq_clock(rq); ++ ndiff = rq->clock - rq->old_clock; ++ /* old_clock is only updated when we are updating niffies */ ++ rq->old_clock = rq->clock; ++ ndiff -= grq.niffies - rq->last_niffy; ++ jdiff = jiffies - grq.last_jiffy; ++ niffy_diff(&ndiff, jdiff); ++ grq.last_jiffy += jdiff; ++ grq.niffies += ndiff; ++ rq->last_niffy = grq.niffies; ++} ++#else /* CONFIG_SMP */ ++static inline int cpu_of(struct rq *rq) ++{ ++ return 0; ++} ++ ++static inline void update_clocks(struct rq *rq) ++{ ++ s64 ndiff; ++ long jdiff; ++ ++ update_rq_clock(rq); ++ ndiff = rq->clock - rq->old_clock; ++ rq->old_clock = rq->clock; ++ jdiff = jiffies - grq.last_jiffy; ++ niffy_diff(&ndiff, jdiff); ++ grq.last_jiffy += jdiff; ++ grq.niffies += ndiff; ++} ++#endif ++ ++#include "stats.h" ++ ++#ifndef prepare_arch_switch ++# define prepare_arch_switch(next) do { } while (0) ++#endif ++#ifndef finish_arch_switch ++# define finish_arch_switch(prev) do { } while (0) ++#endif ++#ifndef finish_arch_post_lock_switch ++# define finish_arch_post_lock_switch() do { } while (0) ++#endif ++ ++/* ++ * All common locking functions performed on grq.lock. rq->clock is local to ++ * the CPU accessing it so it can be modified just with interrupts disabled ++ * when we're not updating niffies. ++ * Looking up task_rq must be done under grq.lock to be safe. ++ */ ++static void update_rq_clock_task(struct rq *rq, s64 delta); ++ ++static inline void update_rq_clock(struct rq *rq) ++{ ++ s64 delta = sched_clock_cpu(cpu_of(rq)) - rq->clock; ++ ++ if (unlikely(delta < 0)) ++ return; ++ rq->clock += delta; ++ update_rq_clock_task(rq, delta); ++} ++ ++static inline bool task_running(struct task_struct *p) ++{ ++ return p->on_cpu; ++} ++ ++static inline void grq_lock(void) ++ __acquires(grq.lock) ++{ ++ raw_spin_lock(&grq.lock); ++} ++ ++static inline void grq_unlock(void) ++ __releases(grq.lock) ++{ ++ raw_spin_unlock(&grq.lock); ++} ++ ++static inline void grq_lock_irq(void) ++ __acquires(grq.lock) ++{ ++ raw_spin_lock_irq(&grq.lock); ++} ++ ++static inline void time_lock_grq(struct rq *rq) ++ __acquires(grq.lock) ++{ ++ grq_lock(); ++ update_clocks(rq); ++} ++ ++static inline void grq_unlock_irq(void) ++ __releases(grq.lock) ++{ ++ raw_spin_unlock_irq(&grq.lock); ++} ++ ++static inline void grq_lock_irqsave(unsigned long *flags) ++ __acquires(grq.lock) ++{ ++ raw_spin_lock_irqsave(&grq.lock, *flags); ++} ++ ++static inline void grq_unlock_irqrestore(unsigned long *flags) ++ __releases(grq.lock) ++{ ++ raw_spin_unlock_irqrestore(&grq.lock, *flags); ++} ++ ++static inline struct rq ++*task_grq_lock(struct task_struct *p, unsigned long *flags) ++ __acquires(grq.lock) ++{ ++ grq_lock_irqsave(flags); ++ return task_rq(p); ++} ++ ++static inline struct rq ++*time_task_grq_lock(struct task_struct *p, unsigned long *flags) ++ __acquires(grq.lock) ++{ ++ struct rq *rq = task_grq_lock(p, flags); ++ update_clocks(rq); ++ return rq; ++} ++ ++static inline struct rq *task_grq_lock_irq(struct task_struct *p) ++ __acquires(grq.lock) ++{ ++ grq_lock_irq(); ++ return task_rq(p); ++} ++ ++static inline void time_task_grq_lock_irq(struct task_struct *p) ++ __acquires(grq.lock) ++{ ++ struct rq *rq = task_grq_lock_irq(p); ++ update_clocks(rq); ++} ++ ++static inline void task_grq_unlock_irq(void) ++ __releases(grq.lock) ++{ ++ grq_unlock_irq(); ++} ++ ++static inline void task_grq_unlock(unsigned long *flags) ++ __releases(grq.lock) ++{ ++ grq_unlock_irqrestore(flags); ++} ++ ++/** ++ * grunqueue_is_locked ++ * ++ * Returns true if the global runqueue is locked. ++ * This interface allows printk to be called with the runqueue lock ++ * held and know whether or not it is OK to wake up the klogd. ++ */ ++bool grunqueue_is_locked(void) ++{ ++ return raw_spin_is_locked(&grq.lock); ++} ++ ++void grq_unlock_wait(void) ++ __releases(grq.lock) ++{ ++ smp_mb(); /* spin-unlock-wait is not a full memory barrier */ ++ raw_spin_unlock_wait(&grq.lock); ++} ++ ++static inline void time_grq_lock(struct rq *rq, unsigned long *flags) ++ __acquires(grq.lock) ++{ ++ local_irq_save(*flags); ++ time_lock_grq(rq); ++} ++ ++static inline struct rq *__task_grq_lock(struct task_struct *p) ++ __acquires(grq.lock) ++{ ++ grq_lock(); ++ return task_rq(p); ++} ++ ++static inline void __task_grq_unlock(void) ++ __releases(grq.lock) ++{ ++ grq_unlock(); ++} ++ ++static inline void prepare_lock_switch(struct rq *rq, struct task_struct *next) ++{ ++} ++ ++static inline void finish_lock_switch(struct rq *rq, struct task_struct *prev) ++{ ++#ifdef CONFIG_DEBUG_SPINLOCK ++ /* this is a valid case when another task releases the spinlock */ ++ grq.lock.owner = current; ++#endif ++ /* ++ * If we are tracking spinlock dependencies then we have to ++ * fix up the runqueue lock - which gets 'carried over' from ++ * prev into current: ++ */ ++ spin_acquire(&grq.lock.dep_map, 0, 0, _THIS_IP_); ++ ++ grq_unlock_irq(); ++} ++ ++static inline bool deadline_before(u64 deadline, u64 time) ++{ ++ return (deadline < time); ++} ++ ++static inline bool deadline_after(u64 deadline, u64 time) ++{ ++ return (deadline > time); ++} ++ ++/* ++ * A task that is queued but not running will be on the grq run list. ++ * A task that is not running or queued will not be on the grq run list. ++ * A task that is currently running will have ->on_cpu set but not on the ++ * grq run list. ++ */ ++static inline bool task_queued(struct task_struct *p) ++{ ++ return (!list_empty(&p->run_list)); ++} ++ ++/* ++ * Removing from the global runqueue. Enter with grq locked. ++ */ ++static void dequeue_task(struct task_struct *p) ++{ ++ list_del_init(&p->run_list); ++ if (list_empty(grq.queue + p->prio)) ++ __clear_bit(p->prio, grq.prio_bitmap); ++ sched_info_dequeued(task_rq(p), p); ++} ++ ++/* ++ * To determine if it's safe for a task of SCHED_IDLEPRIO to actually run as ++ * an idle task, we ensure none of the following conditions are met. ++ */ ++static bool idleprio_suitable(struct task_struct *p) ++{ ++ return (!freezing(p) && !signal_pending(p) && ++ !(task_contributes_to_load(p)) && !(p->flags & (PF_EXITING))); ++} ++ ++/* ++ * To determine if a task of SCHED_ISO can run in pseudo-realtime, we check ++ * that the iso_refractory flag is not set. ++ */ ++static bool isoprio_suitable(void) ++{ ++ return !grq.iso_refractory; ++} ++ ++/* ++ * Adding to the global runqueue. Enter with grq locked. ++ */ ++static void enqueue_task(struct task_struct *p, struct rq *rq) ++{ ++ if (!rt_task(p)) { ++ /* Check it hasn't gotten rt from PI */ ++ if ((idleprio_task(p) && idleprio_suitable(p)) || ++ (iso_task(p) && isoprio_suitable())) ++ p->prio = p->normal_prio; ++ else ++ p->prio = NORMAL_PRIO; ++ } ++ __set_bit(p->prio, grq.prio_bitmap); ++ list_add_tail(&p->run_list, grq.queue + p->prio); ++ sched_info_queued(rq, p); ++} ++ ++static inline void requeue_task(struct task_struct *p) ++{ ++ sched_info_queued(task_rq(p), p); ++} ++ ++/* ++ * Returns the relative length of deadline all compared to the shortest ++ * deadline which is that of nice -20. ++ */ ++static inline int task_prio_ratio(struct task_struct *p) ++{ ++ return prio_ratios[TASK_USER_PRIO(p)]; ++} ++ ++/* ++ * task_timeslice - all tasks of all priorities get the exact same timeslice ++ * length. CPU distribution is handled by giving different deadlines to ++ * tasks of different priorities. Use 128 as the base value for fast shifts. ++ */ ++static inline int task_timeslice(struct task_struct *p) ++{ ++ return (rr_interval * task_prio_ratio(p) / 128); ++} ++ ++static void resched_task(struct task_struct *p); ++ ++static inline void resched_curr(struct rq *rq) ++{ ++ resched_task(rq->curr); ++} ++ ++/* ++ * qnr is the "queued but not running" count which is the total number of ++ * tasks on the global runqueue list waiting for cpu time but not actually ++ * currently running on a cpu. ++ */ ++static inline void inc_qnr(void) ++{ ++ grq.qnr++; ++} ++ ++static inline void dec_qnr(void) ++{ ++ grq.qnr--; ++} ++ ++static inline int queued_notrunning(void) ++{ ++ return grq.qnr; ++} ++ ++#ifdef CONFIG_SMP ++/* ++ * The cpu_idle_map stores a bitmap of all the CPUs currently idle to ++ * allow easy lookup of whether any suitable idle CPUs are available. ++ * It's cheaper to maintain a binary yes/no if there are any idle CPUs on the ++ * idle_cpus variable than to do a full bitmask check when we are busy. ++ */ ++static inline void set_cpuidle_map(int cpu) ++{ ++ if (likely(cpu_online(cpu))) { ++ cpumask_set_cpu(cpu, &grq.cpu_idle_map); ++ grq.idle_cpus = true; ++ } ++} ++ ++static inline void clear_cpuidle_map(int cpu) ++{ ++ cpumask_clear_cpu(cpu, &grq.cpu_idle_map); ++ if (cpumask_empty(&grq.cpu_idle_map)) ++ grq.idle_cpus = false; ++} ++ ++static bool suitable_idle_cpus(struct task_struct *p) ++{ ++ if (!grq.idle_cpus) ++ return false; ++ return (cpumask_intersects(&p->cpus_allowed, &grq.cpu_idle_map)); ++} ++ ++#define CPUIDLE_DIFF_THREAD (1) ++#define CPUIDLE_DIFF_CORE (2) ++#define CPUIDLE_CACHE_BUSY (4) ++#define CPUIDLE_DIFF_CPU (8) ++#define CPUIDLE_THREAD_BUSY (16) ++#define CPUIDLE_THROTTLED (32) ++#define CPUIDLE_DIFF_NODE (64) ++ ++static inline bool scaling_rq(struct rq *rq); ++ ++/* ++ * The best idle CPU is chosen according to the CPUIDLE ranking above where the ++ * lowest value would give the most suitable CPU to schedule p onto next. The ++ * order works out to be the following: ++ * ++ * Same core, idle or busy cache, idle or busy threads ++ * Other core, same cache, idle or busy cache, idle threads. ++ * Same node, other CPU, idle cache, idle threads. ++ * Same node, other CPU, busy cache, idle threads. ++ * Other core, same cache, busy threads. ++ * Same node, other CPU, busy threads. ++ * Other node, other CPU, idle cache, idle threads. ++ * Other node, other CPU, busy cache, idle threads. ++ * Other node, other CPU, busy threads. ++ */ ++static int best_mask_cpu(int best_cpu, struct rq *rq, cpumask_t *tmpmask) ++{ ++ int best_ranking = CPUIDLE_DIFF_NODE | CPUIDLE_THROTTLED | ++ CPUIDLE_THREAD_BUSY | CPUIDLE_DIFF_CPU | CPUIDLE_CACHE_BUSY | ++ CPUIDLE_DIFF_CORE | CPUIDLE_DIFF_THREAD; ++ int cpu_tmp; ++ ++ if (cpumask_test_cpu(best_cpu, tmpmask)) ++ goto out; ++ ++ for_each_cpu(cpu_tmp, tmpmask) { ++ int ranking, locality; ++ struct rq *tmp_rq; ++ ++ ranking = 0; ++ tmp_rq = cpu_rq(cpu_tmp); ++ ++ locality = rq->cpu_locality[cpu_tmp]; ++#ifdef CONFIG_NUMA ++ if (locality > 3) ++ ranking |= CPUIDLE_DIFF_NODE; ++ else ++#endif ++ if (locality > 2) ++ ranking |= CPUIDLE_DIFF_CPU; ++#ifdef CONFIG_SCHED_MC ++ else if (locality == 2) ++ ranking |= CPUIDLE_DIFF_CORE; ++ if (!(tmp_rq->cache_idle(cpu_tmp))) ++ ranking |= CPUIDLE_CACHE_BUSY; ++#endif ++#ifdef CONFIG_SCHED_SMT ++ if (locality == 1) ++ ranking |= CPUIDLE_DIFF_THREAD; ++ if (!(tmp_rq->siblings_idle(cpu_tmp))) ++ ranking |= CPUIDLE_THREAD_BUSY; ++#endif ++ if (scaling_rq(tmp_rq)) ++ ranking |= CPUIDLE_THROTTLED; ++ ++ if (ranking < best_ranking) { ++ best_cpu = cpu_tmp; ++ best_ranking = ranking; ++ } ++ } ++out: ++ return best_cpu; ++} ++ ++static void resched_best_mask(int best_cpu, struct rq *rq, cpumask_t *tmpmask) ++{ ++ best_cpu = best_mask_cpu(best_cpu, rq, tmpmask); ++ resched_curr(cpu_rq(best_cpu)); ++} ++ ++bool cpus_share_cache(int this_cpu, int that_cpu) ++{ ++ struct rq *this_rq = cpu_rq(this_cpu); ++ ++ return (this_rq->cpu_locality[that_cpu] < 3); ++} ++ ++#ifdef CONFIG_SCHED_SMT ++#ifdef CONFIG_SMT_NICE ++static const cpumask_t *thread_cpumask(int cpu); ++ ++/* Find the best real time priority running on any SMT siblings of cpu and if ++ * none are running, the static priority of the best deadline task running. ++ * The lookups to the other runqueues is done lockless as the occasional wrong ++ * value would be harmless. */ ++static int best_smt_bias(int cpu) ++{ ++ int other_cpu, best_bias = 0; ++ ++ for_each_cpu(other_cpu, thread_cpumask(cpu)) { ++ struct rq *rq; ++ ++ if (other_cpu == cpu) ++ continue; ++ rq = cpu_rq(other_cpu); ++ if (rq_idle(rq)) ++ continue; ++ if (!rq->online) ++ continue; ++ if (!rq->rq_mm) ++ continue; ++ if (likely(rq->rq_smt_bias > best_bias)) ++ best_bias = rq->rq_smt_bias; ++ } ++ return best_bias; ++} ++ ++static int task_prio_bias(struct task_struct *p) ++{ ++ if (rt_task(p)) ++ return 1 << 30; ++ else if (task_running_iso(p)) ++ return 1 << 29; ++ else if (task_running_idle(p)) ++ return 0; ++ return MAX_PRIO - p->static_prio; ++} ++ ++/* We've already decided p can run on CPU, now test if it shouldn't for SMT ++ * nice reasons. */ ++static bool smt_should_schedule(struct task_struct *p, int cpu) ++{ ++ int best_bias, task_bias; ++ ++ /* Kernel threads always run */ ++ if (unlikely(!p->mm)) ++ return true; ++ if (rt_task(p)) ++ return true; ++ if (!idleprio_suitable(p)) ++ return true; ++ best_bias = best_smt_bias(cpu); ++ /* The smt siblings are all idle or running IDLEPRIO */ ++ if (best_bias < 1) ++ return true; ++ task_bias = task_prio_bias(p); ++ if (task_bias < 1) ++ return false; ++ if (task_bias >= best_bias) ++ return true; ++ /* Dither 25% cpu of normal tasks regardless of nice difference */ ++ if (best_bias % 4 == 1) ++ return true; ++ /* Sorry, you lose */ ++ return false; ++} ++#endif ++#endif ++ ++static bool resched_best_idle(struct task_struct *p) ++{ ++ cpumask_t tmpmask; ++ int best_cpu; ++ ++ cpumask_and(&tmpmask, &p->cpus_allowed, &grq.cpu_idle_map); ++ best_cpu = best_mask_cpu(task_cpu(p), task_rq(p), &tmpmask); ++#ifdef CONFIG_SMT_NICE ++ if (!smt_should_schedule(p, best_cpu)) ++ return false; ++#endif ++ resched_curr(cpu_rq(best_cpu)); ++ return true; ++} ++ ++static inline void resched_suitable_idle(struct task_struct *p) ++{ ++ if (suitable_idle_cpus(p)) ++ resched_best_idle(p); ++} ++/* ++ * Flags to tell us whether this CPU is running a CPU frequency governor that ++ * has slowed its speed or not. No locking required as the very rare wrongly ++ * read value would be harmless. ++ */ ++void cpu_scaling(int cpu) ++{ ++ cpu_rq(cpu)->scaling = true; ++} ++ ++void cpu_nonscaling(int cpu) ++{ ++ cpu_rq(cpu)->scaling = false; ++} ++ ++static inline bool scaling_rq(struct rq *rq) ++{ ++ return rq->scaling; ++} ++ ++static inline int locality_diff(int cpu, struct rq *rq) ++{ ++ return rq->cpu_locality[cpu]; ++} ++#else /* CONFIG_SMP */ ++static inline void set_cpuidle_map(int cpu) ++{ ++} ++ ++static inline void clear_cpuidle_map(int cpu) ++{ ++} ++ ++static inline bool suitable_idle_cpus(struct task_struct *p) ++{ ++ return uprq->curr == uprq->idle; ++} ++ ++static inline void resched_suitable_idle(struct task_struct *p) ++{ ++} ++ ++void cpu_scaling(int __unused) ++{ ++} ++ ++void cpu_nonscaling(int __unused) ++{ ++} ++ ++/* ++ * Although CPUs can scale in UP, there is nowhere else for tasks to go so this ++ * always returns 0. ++ */ ++static inline bool scaling_rq(struct rq *rq) ++{ ++ return false; ++} ++ ++static inline int locality_diff(int cpu, struct rq *rq) ++{ ++ return 0; ++} ++#endif /* CONFIG_SMP */ ++EXPORT_SYMBOL_GPL(cpu_scaling); ++EXPORT_SYMBOL_GPL(cpu_nonscaling); ++ ++static inline int normal_prio(struct task_struct *p) ++{ ++ if (has_rt_policy(p)) ++ return MAX_RT_PRIO - 1 - p->rt_priority; ++ if (idleprio_task(p)) ++ return IDLE_PRIO; ++ if (iso_task(p)) ++ return ISO_PRIO; ++ return NORMAL_PRIO; ++} ++ ++/* ++ * Calculate the current priority, i.e. the priority ++ * taken into account by the scheduler. This value might ++ * be boosted by RT tasks as it will be RT if the task got ++ * RT-boosted. If not then it returns p->normal_prio. ++ */ ++static int effective_prio(struct task_struct *p) ++{ ++ p->normal_prio = normal_prio(p); ++ /* ++ * If we are RT tasks or we were boosted to RT priority, ++ * keep the priority unchanged. Otherwise, update priority ++ * to the normal priority: ++ */ ++ if (!rt_prio(p->prio)) ++ return p->normal_prio; ++ return p->prio; ++} ++ ++/* ++ * activate_task - move a task to the runqueue. Enter with grq locked. ++ */ ++static void activate_task(struct task_struct *p, struct rq *rq) ++{ ++ update_clocks(rq); ++ ++ /* ++ * Sleep time is in units of nanosecs, so shift by 20 to get a ++ * milliseconds-range estimation of the amount of time that the task ++ * spent sleeping: ++ */ ++ if (unlikely(prof_on == SLEEP_PROFILING)) { ++ if (p->state == TASK_UNINTERRUPTIBLE) ++ profile_hits(SLEEP_PROFILING, (void *)get_wchan(p), ++ (rq->clock_task - p->last_ran) >> 20); ++ } ++ ++ p->prio = effective_prio(p); ++ if (task_contributes_to_load(p)) ++ grq.nr_uninterruptible--; ++ enqueue_task(p, rq); ++ rq->soft_affined++; ++ p->on_rq = 1; ++ grq.nr_running++; ++ inc_qnr(); ++} ++ ++static inline void clear_sticky(struct task_struct *p); ++ ++/* ++ * deactivate_task - If it's running, it's not on the grq and we can just ++ * decrement the nr_running. Enter with grq locked. ++ */ ++static inline void deactivate_task(struct task_struct *p, struct rq *rq) ++{ ++ if (task_contributes_to_load(p)) ++ grq.nr_uninterruptible++; ++ rq->soft_affined--; ++ p->on_rq = 0; ++ grq.nr_running--; ++ clear_sticky(p); ++} ++ ++#ifdef CONFIG_SMP ++void set_task_cpu(struct task_struct *p, unsigned int cpu) ++{ ++#ifdef CONFIG_LOCKDEP ++ /* ++ * The caller should hold grq lock. ++ */ ++ WARN_ON_ONCE(debug_locks && !lockdep_is_held(&grq.lock)); ++#endif ++ if (task_cpu(p) == cpu) ++ return; ++ trace_sched_migrate_task(p, cpu); ++ perf_event_task_migrate(p); ++ ++ /* ++ * After ->cpu is set up to a new value, task_grq_lock(p, ...) can be ++ * successfully executed on another CPU. We must ensure that updates of ++ * per-task data have been completed by this moment. ++ */ ++ smp_wmb(); ++ if (p->on_rq) { ++ task_rq(p)->soft_affined--; ++ cpu_rq(cpu)->soft_affined++; ++ } ++ task_thread_info(p)->cpu = cpu; ++} ++ ++static inline void clear_sticky(struct task_struct *p) ++{ ++ p->sticky = false; ++} ++ ++static inline bool task_sticky(struct task_struct *p) ++{ ++ return p->sticky; ++} ++ ++/* Reschedule the best idle CPU that is not this one. */ ++static void ++resched_closest_idle(struct rq *rq, int cpu, struct task_struct *p) ++{ ++ cpumask_t tmpmask; ++ ++ cpumask_and(&tmpmask, &p->cpus_allowed, &grq.cpu_idle_map); ++ cpumask_clear_cpu(cpu, &tmpmask); ++ if (cpumask_empty(&tmpmask)) ++ return; ++ resched_best_mask(cpu, rq, &tmpmask); ++} ++ ++/* ++ * We set the sticky flag on a task that is descheduled involuntarily meaning ++ * it is awaiting further CPU time. If the last sticky task is still sticky ++ * but unlucky enough to not be the next task scheduled, we unstick it and try ++ * to find it an idle CPU. Realtime tasks do not stick to minimise their ++ * latency at all times. ++ */ ++static inline void ++swap_sticky(struct rq *rq, int cpu, struct task_struct *p) ++{ ++ if (rq->sticky_task) { ++ if (rq->sticky_task == p) { ++ p->sticky = true; ++ return; ++ } ++ if (task_sticky(rq->sticky_task)) { ++ clear_sticky(rq->sticky_task); ++ resched_closest_idle(rq, cpu, rq->sticky_task); ++ } ++ } ++ if (!rt_task(p)) { ++ p->sticky = true; ++ rq->sticky_task = p; ++ } else { ++ resched_closest_idle(rq, cpu, p); ++ rq->sticky_task = NULL; ++ } ++} ++ ++static inline void unstick_task(struct rq *rq, struct task_struct *p) ++{ ++ rq->sticky_task = NULL; ++ clear_sticky(p); ++} ++#else ++static inline void clear_sticky(struct task_struct *p) ++{ ++} ++ ++static inline bool task_sticky(struct task_struct *p) ++{ ++ return false; ++} ++ ++static inline void ++swap_sticky(struct rq *rq, int cpu, struct task_struct *p) ++{ ++} ++ ++static inline void unstick_task(struct rq *rq, struct task_struct *p) ++{ ++} ++#endif ++ ++/* ++ * Move a task off the global queue and take it to a cpu for it will ++ * become the running task. ++ */ ++static inline void take_task(int cpu, struct task_struct *p) ++{ ++ set_task_cpu(p, cpu); ++ dequeue_task(p); ++ clear_sticky(p); ++ dec_qnr(); ++} ++ ++/* ++ * Returns a descheduling task to the grq runqueue unless it is being ++ * deactivated. ++ */ ++static inline void return_task(struct task_struct *p, struct rq *rq, bool deactivate) ++{ ++ if (deactivate) ++ deactivate_task(p, rq); ++ else { ++ inc_qnr(); ++ enqueue_task(p, rq); ++ } ++} ++ ++/* Enter with grq lock held. We know p is on the local cpu */ ++static inline void __set_tsk_resched(struct task_struct *p) ++{ ++ set_tsk_need_resched(p); ++ set_preempt_need_resched(); ++} ++ ++/* ++ * resched_task - mark a task 'to be rescheduled now'. ++ * ++ * On UP this means the setting of the need_resched flag, on SMP it ++ * might also involve a cross-CPU call to trigger the scheduler on ++ * the target CPU. ++ */ ++void resched_task(struct task_struct *p) ++{ ++ int cpu; ++ ++ lockdep_assert_held(&grq.lock); ++ ++ if (test_tsk_need_resched(p)) ++ return; ++ ++ set_tsk_need_resched(p); ++ ++ cpu = task_cpu(p); ++ if (cpu == smp_processor_id()) { ++ set_preempt_need_resched(); ++ return; ++ } ++ ++ smp_send_reschedule(cpu); ++} ++ ++/** ++ * task_curr - is this task currently executing on a CPU? ++ * @p: the task in question. ++ * ++ * Return: 1 if the task is currently executing. 0 otherwise. ++ */ ++inline int task_curr(const struct task_struct *p) ++{ ++ return cpu_curr(task_cpu(p)) == p; ++} ++ ++#ifdef CONFIG_SMP ++struct migration_req { ++ struct task_struct *task; ++ int dest_cpu; ++}; ++ ++/* ++ * wait_task_inactive - wait for a thread to unschedule. ++ * ++ * If @match_state is nonzero, it's the @p->state value just checked and ++ * not expected to change. If it changes, i.e. @p might have woken up, ++ * then return zero. When we succeed in waiting for @p to be off its CPU, ++ * we return a positive number (its total switch count). If a second call ++ * a short while later returns the same number, the caller can be sure that ++ * @p has remained unscheduled the whole time. ++ * ++ * The caller must ensure that the task *will* unschedule sometime soon, ++ * else this function might spin for a *long* time. This function can't ++ * be called with interrupts off, or it may introduce deadlock with ++ * smp_call_function() if an IPI is sent by the same process we are ++ * waiting to become inactive. ++ */ ++unsigned long wait_task_inactive(struct task_struct *p, long match_state) ++{ ++ unsigned long flags; ++ bool running, on_rq; ++ unsigned long ncsw; ++ struct rq *rq; ++ ++ for (;;) { ++ rq = task_rq(p); ++ ++ /* ++ * If the task is actively running on another CPU ++ * still, just relax and busy-wait without holding ++ * any locks. ++ * ++ * NOTE! Since we don't hold any locks, it's not ++ * even sure that "rq" stays as the right runqueue! ++ * But we don't care, since this will return false ++ * if the runqueue has changed and p is actually now ++ * running somewhere else! ++ */ ++ while (task_running(p) && p == rq->curr) { ++ if (match_state && unlikely(p->state != match_state)) ++ return 0; ++ cpu_relax(); ++ } ++ ++ /* ++ * Ok, time to look more closely! We need the grq ++ * lock now, to be *sure*. If we're wrong, we'll ++ * just go back and repeat. ++ */ ++ rq = task_grq_lock(p, &flags); ++ trace_sched_wait_task(p); ++ running = task_running(p); ++ on_rq = p->on_rq; ++ ncsw = 0; ++ if (!match_state || p->state == match_state) ++ ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ ++ task_grq_unlock(&flags); ++ ++ /* ++ * If it changed from the expected state, bail out now. ++ */ ++ if (unlikely(!ncsw)) ++ break; ++ ++ /* ++ * Was it really running after all now that we ++ * checked with the proper locks actually held? ++ * ++ * Oops. Go back and try again.. ++ */ ++ if (unlikely(running)) { ++ cpu_relax(); ++ continue; ++ } ++ ++ /* ++ * It's not enough that it's not actively running, ++ * it must be off the runqueue _entirely_, and not ++ * preempted! ++ * ++ * So if it was still runnable (but just not actively ++ * running right now), it's preempted, and we should ++ * yield - it could be a while. ++ */ ++ if (unlikely(on_rq)) { ++ ktime_t to = ktime_set(0, NSEC_PER_SEC / HZ); ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ schedule_hrtimeout(&to, HRTIMER_MODE_REL); ++ continue; ++ } ++ ++ /* ++ * Ahh, all good. It wasn't running, and it wasn't ++ * runnable, which means that it will never become ++ * running in the future either. We're all done! ++ */ ++ break; ++ } ++ ++ return ncsw; ++} ++ ++/*** ++ * kick_process - kick a running thread to enter/exit the kernel ++ * @p: the to-be-kicked thread ++ * ++ * Cause a process which is running on another CPU to enter ++ * kernel-mode, without any delay. (to get signals handled.) ++ * ++ * NOTE: this function doesn't have to take the runqueue lock, ++ * because all it wants to ensure is that the remote task enters ++ * the kernel. If the IPI races and the task has been migrated ++ * to another CPU then no harm is done and the purpose has been ++ * achieved as well. ++ */ ++void kick_process(struct task_struct *p) ++{ ++ int cpu; ++ ++ preempt_disable(); ++ cpu = task_cpu(p); ++ if ((cpu != smp_processor_id()) && task_curr(p)) ++ smp_send_reschedule(cpu); ++ preempt_enable(); ++} ++EXPORT_SYMBOL_GPL(kick_process); ++#endif ++ ++/* ++ * RT tasks preempt purely on priority. SCHED_NORMAL tasks preempt on the ++ * basis of earlier deadlines. SCHED_IDLEPRIO don't preempt anything else or ++ * between themselves, they cooperatively multitask. An idle rq scores as ++ * prio PRIO_LIMIT so it is always preempted. ++ */ ++static inline bool ++can_preempt(struct task_struct *p, int prio, u64 deadline) ++{ ++ /* Better static priority RT task or better policy preemption */ ++ if (p->prio < prio) ++ return true; ++ if (p->prio > prio) ++ return false; ++ /* SCHED_NORMAL, BATCH and ISO will preempt based on deadline */ ++ if (!deadline_before(p->deadline, deadline)) ++ return false; ++ return true; ++} ++ ++#ifdef CONFIG_SMP ++#define cpu_online_map (*(cpumask_t *)cpu_online_mask) ++#ifdef CONFIG_HOTPLUG_CPU ++/* ++ * Check to see if there is a task that is affined only to offline CPUs but ++ * still wants runtime. This happens to kernel threads during suspend/halt and ++ * disabling of CPUs. ++ */ ++static inline bool online_cpus(struct task_struct *p) ++{ ++ return (likely(cpumask_intersects(&cpu_online_map, &p->cpus_allowed))); ++} ++#else /* CONFIG_HOTPLUG_CPU */ ++/* All available CPUs are always online without hotplug. */ ++static inline bool online_cpus(struct task_struct *p) ++{ ++ return true; ++} ++#endif ++ ++/* ++ * Check to see if p can run on cpu, and if not, whether there are any online ++ * CPUs it can run on instead. ++ */ ++static inline bool needs_other_cpu(struct task_struct *p, int cpu) ++{ ++ if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed))) ++ return true; ++ return false; ++} ++ ++/* ++ * When all else is equal, still prefer this_rq. ++ */ ++static void try_preempt(struct task_struct *p, struct rq *this_rq) ++{ ++ struct rq *highest_prio_rq = NULL; ++ int cpu, highest_prio; ++ u64 latest_deadline; ++ cpumask_t tmp; ++ ++ /* ++ * We clear the sticky flag here because for a task to have called ++ * try_preempt with the sticky flag enabled means some complicated ++ * re-scheduling has occurred and we should ignore the sticky flag. ++ */ ++ clear_sticky(p); ++ ++ if (suitable_idle_cpus(p) && resched_best_idle(p)) ++ return; ++ ++ /* IDLEPRIO tasks never preempt anything but idle */ ++ if (p->policy == SCHED_IDLEPRIO) ++ return; ++ ++ if (likely(online_cpus(p))) ++ cpumask_and(&tmp, &cpu_online_map, &p->cpus_allowed); ++ else ++ return; ++ ++ highest_prio = latest_deadline = 0; ++ ++ for_each_cpu(cpu, &tmp) { ++ struct rq *rq; ++ int rq_prio; ++ ++ rq = cpu_rq(cpu); ++ rq_prio = rq->rq_prio; ++ if (rq_prio < highest_prio) ++ continue; ++ ++ if (rq_prio > highest_prio || ++ deadline_after(rq->rq_deadline, latest_deadline)) { ++ latest_deadline = rq->rq_deadline; ++ highest_prio = rq_prio; ++ highest_prio_rq = rq; ++ } ++ } ++ ++ if (likely(highest_prio_rq)) { ++#ifdef CONFIG_SMT_NICE ++ cpu = cpu_of(highest_prio_rq); ++ if (!smt_should_schedule(p, cpu)) ++ return; ++#endif ++ if (can_preempt(p, highest_prio, highest_prio_rq->rq_deadline)) ++ resched_curr(highest_prio_rq); ++ } ++} ++static int __set_cpus_allowed_ptr(struct task_struct *p, ++ const struct cpumask *new_mask, bool check); ++#else /* CONFIG_SMP */ ++static inline bool needs_other_cpu(struct task_struct *p, int cpu) ++{ ++ return false; ++} ++ ++static void try_preempt(struct task_struct *p, struct rq *this_rq) ++{ ++ if (p->policy == SCHED_IDLEPRIO) ++ return; ++ if (can_preempt(p, uprq->rq_prio, uprq->rq_deadline)) ++ resched_curr(uprq); ++} ++ ++static inline int __set_cpus_allowed_ptr(struct task_struct *p, ++ const struct cpumask *new_mask, bool check) ++{ ++ return set_cpus_allowed_ptr(p, new_mask); ++} ++#endif /* CONFIG_SMP */ ++ ++static void ++ttwu_stat(struct task_struct *p, int cpu, int wake_flags) ++{ ++#ifdef CONFIG_SCHEDSTATS ++ struct rq *rq = this_rq(); ++ ++#ifdef CONFIG_SMP ++ int this_cpu = smp_processor_id(); ++ ++ if (cpu == this_cpu) ++ schedstat_inc(rq, ttwu_local); ++ else { ++ struct sched_domain *sd; ++ ++ rcu_read_lock(); ++ for_each_domain(this_cpu, sd) { ++ if (cpumask_test_cpu(cpu, sched_domain_span(sd))) { ++ schedstat_inc(sd, ttwu_wake_remote); ++ break; ++ } ++ } ++ rcu_read_unlock(); ++ } ++ ++#endif /* CONFIG_SMP */ ++ ++ schedstat_inc(rq, ttwu_count); ++#endif /* CONFIG_SCHEDSTATS */ ++} ++ ++void wake_up_if_idle(int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ rcu_read_lock(); ++ ++ if (!is_idle_task(rcu_dereference(rq->curr))) ++ goto out; ++ ++ grq_lock_irqsave(&flags); ++ if (likely(is_idle_task(rq->curr))) ++ smp_send_reschedule(cpu); ++ /* Else cpu is not in idle, do nothing here */ ++ grq_unlock_irqrestore(&flags); ++ ++out: ++ rcu_read_unlock(); ++} ++ ++#ifdef CONFIG_SMP ++void scheduler_ipi(void) ++{ ++ /* ++ * Fold TIF_NEED_RESCHED into the preempt_count; anybody setting ++ * TIF_NEED_RESCHED remotely (for the first time) will also send ++ * this IPI. ++ */ ++ preempt_fold_need_resched(); ++} ++#endif ++ ++static inline void ttwu_activate(struct task_struct *p, struct rq *rq, ++ bool is_sync) ++{ ++ activate_task(p, rq); ++ ++ /* ++ * Sync wakeups (i.e. those types of wakeups where the waker ++ * has indicated that it will leave the CPU in short order) ++ * don't trigger a preemption if there are no idle cpus, ++ * instead waiting for current to deschedule. ++ */ ++ if (!is_sync || suitable_idle_cpus(p)) ++ try_preempt(p, rq); ++} ++ ++static inline void ttwu_post_activation(struct task_struct *p, struct rq *rq, ++ bool success) ++{ ++ trace_sched_wakeup(p); ++ p->state = TASK_RUNNING; ++ ++ /* ++ * if a worker is waking up, notify workqueue. Note that on BFS, we ++ * don't really know what cpu it will be, so we fake it for ++ * wq_worker_waking_up :/ ++ */ ++ if ((p->flags & PF_WQ_WORKER) && success) ++ wq_worker_waking_up(p, cpu_of(rq)); ++} ++ ++/* ++ * wake flags ++ */ ++#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ ++#define WF_FORK 0x02 /* child wakeup after fork */ ++#define WF_MIGRATED 0x4 /* internal use, task got migrated */ ++ ++/*** ++ * try_to_wake_up - wake up a thread ++ * @p: the thread to be awakened ++ * @state: the mask of task states that can be woken ++ * @wake_flags: wake modifier flags (WF_*) ++ * ++ * Put it on the run-queue if it's not already there. The "current" ++ * thread is always on the run-queue (except when the actual ++ * re-schedule is in progress), and as such you're allowed to do ++ * the simpler "current->state = TASK_RUNNING" to mark yourself ++ * runnable without the overhead of this. ++ * ++ * Return: %true if @p was woken up, %false if it was already running. ++ * or @state didn't match @p's state. ++ */ ++static bool try_to_wake_up(struct task_struct *p, unsigned int state, ++ int wake_flags) ++{ ++ bool success = false; ++ unsigned long flags; ++ struct rq *rq; ++ int cpu; ++ ++ get_cpu(); ++ ++ /* ++ * If we are going to wake up a thread waiting for CONDITION we ++ * need to ensure that CONDITION=1 done by the caller can not be ++ * reordered with p->state check below. This pairs with mb() in ++ * set_current_state() the waiting thread does. ++ */ ++ smp_mb__before_spinlock(); ++ ++ /* ++ * No need to do time_lock_grq as we only need to update the rq clock ++ * if we activate the task ++ */ ++ rq = task_grq_lock(p, &flags); ++ cpu = task_cpu(p); ++ ++ /* state is a volatile long, どうして、分からない */ ++ if (!((unsigned int)p->state & state)) ++ goto out_unlock; ++ ++ trace_sched_waking(p); ++ ++ if (task_queued(p) || task_running(p)) ++ goto out_running; ++ ++ ttwu_activate(p, rq, wake_flags & WF_SYNC); ++ success = true; ++ ++out_running: ++ ttwu_post_activation(p, rq, success); ++out_unlock: ++ task_grq_unlock(&flags); ++ ++ ttwu_stat(p, cpu, wake_flags); ++ ++ put_cpu(); ++ ++ return success; ++} ++ ++/** ++ * try_to_wake_up_local - try to wake up a local task with grq lock held ++ * @p: the thread to be awakened ++ * ++ * Put @p on the run-queue if it's not already there. The caller must ++ * ensure that grq is locked and, @p is not the current task. ++ * grq stays locked over invocation. ++ */ ++static void try_to_wake_up_local(struct task_struct *p) ++{ ++ struct rq *rq = task_rq(p); ++ bool success = false; ++ ++ lockdep_assert_held(&grq.lock); ++ ++ if (!(p->state & TASK_NORMAL)) ++ return; ++ ++ trace_sched_waking(p); ++ ++ if (!task_queued(p)) { ++ if (likely(!task_running(p))) { ++ schedstat_inc(rq, ttwu_count); ++ schedstat_inc(rq, ttwu_local); ++ } ++ ttwu_activate(p, rq, false); ++ ttwu_stat(p, smp_processor_id(), 0); ++ success = true; ++ } ++ ttwu_post_activation(p, rq, success); ++} ++ ++/** ++ * wake_up_process - Wake up a specific process ++ * @p: The process to be woken up. ++ * ++ * Attempt to wake up the nominated process and move it to the set of runnable ++ * processes. ++ * ++ * Return: 1 if the process was woken up, 0 if it was already running. ++ * ++ * It may be assumed that this function implies a write memory barrier before ++ * changing the task state if and only if any tasks are woken up. ++ */ ++int wake_up_process(struct task_struct *p) ++{ ++ return try_to_wake_up(p, TASK_NORMAL, 0); ++} ++EXPORT_SYMBOL(wake_up_process); ++ ++int wake_up_state(struct task_struct *p, unsigned int state) ++{ ++ return try_to_wake_up(p, state, 0); ++} ++ ++static void time_slice_expired(struct task_struct *p); ++ ++/* ++ * Perform scheduler related setup for a newly forked process p. ++ * p is forked by current. ++ */ ++int sched_fork(unsigned long __maybe_unused clone_flags, struct task_struct *p) ++{ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ INIT_HLIST_HEAD(&p->preempt_notifiers); ++#endif ++ /* ++ * The process state is set to the same value of the process executing ++ * do_fork() code. That is running. This guarantees that nobody will ++ * actually run it, and a signal or other external event cannot wake ++ * it up and insert it on the runqueue either. ++ */ ++ ++ /* Should be reset in fork.c but done here for ease of bfs patching */ ++ p->on_rq = ++ p->utime = ++ p->stime = ++ p->utimescaled = ++ p->stimescaled = ++ p->sched_time = ++ p->stime_pc = ++ p->utime_pc = 0; ++ ++ /* ++ * Revert to default priority/policy on fork if requested. ++ */ ++ if (unlikely(p->sched_reset_on_fork)) { ++ if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) { ++ p->policy = SCHED_NORMAL; ++ p->normal_prio = normal_prio(p); ++ } ++ ++ if (PRIO_TO_NICE(p->static_prio) < 0) { ++ p->static_prio = NICE_TO_PRIO(0); ++ p->normal_prio = p->static_prio; ++ } ++ ++ /* ++ * We don't need the reset flag anymore after the fork. It has ++ * fulfilled its duty: ++ */ ++ p->sched_reset_on_fork = 0; ++ } ++ ++ INIT_LIST_HEAD(&p->run_list); ++#ifdef CONFIG_SCHED_INFO ++ if (unlikely(sched_info_on())) ++ memset(&p->sched_info, 0, sizeof(p->sched_info)); ++#endif ++ p->on_cpu = false; ++ clear_sticky(p); ++ init_task_preempt_count(p); ++ return 0; ++} ++ ++/* ++ * wake_up_new_task - wake up a newly created task for the first time. ++ * ++ * This function will do some initial scheduler statistics housekeeping ++ * that must be done for every newly created context, then puts the task ++ * on the runqueue and wakes it. ++ */ ++void wake_up_new_task(struct task_struct *p) ++{ ++ struct task_struct *parent; ++ unsigned long flags; ++ struct rq *rq; ++ ++ parent = p->parent; ++ rq = task_grq_lock(p, &flags); ++ ++ /* ++ * Reinit new task deadline as its creator deadline could have changed ++ * since call to dup_task_struct(). ++ */ ++ p->deadline = rq->rq_deadline; ++ ++ /* ++ * If the task is a new process, current and parent are the same. If ++ * the task is a new thread in the thread group, it will have much more ++ * in common with current than with the parent. ++ */ ++ set_task_cpu(p, task_cpu(rq->curr)); ++ ++ /* ++ * Make sure we do not leak PI boosting priority to the child. ++ */ ++ p->prio = rq->curr->normal_prio; ++ ++ activate_task(p, rq); ++ trace_sched_wakeup_new(p); ++ if (unlikely(p->policy == SCHED_FIFO)) ++ goto after_ts_init; ++ ++ /* ++ * Share the timeslice between parent and child, thus the ++ * total amount of pending timeslices in the system doesn't change, ++ * resulting in more scheduling fairness. If it's negative, it won't ++ * matter since that's the same as being 0. current's time_slice is ++ * actually in rq_time_slice when it's running, as is its last_ran ++ * value. rq->rq_deadline is only modified within schedule() so it ++ * is always equal to current->deadline. ++ */ ++ p->last_ran = rq->rq_last_ran; ++ if (likely(rq->rq_time_slice >= RESCHED_US * 2)) { ++ rq->rq_time_slice /= 2; ++ p->time_slice = rq->rq_time_slice; ++after_ts_init: ++ if (rq->curr == parent && !suitable_idle_cpus(p)) { ++ /* ++ * The VM isn't cloned, so we're in a good position to ++ * do child-runs-first in anticipation of an exec. This ++ * usually avoids a lot of COW overhead. ++ */ ++ __set_tsk_resched(parent); ++ } else ++ try_preempt(p, rq); ++ } else { ++ if (rq->curr == parent) { ++ /* ++ * Forking task has run out of timeslice. Reschedule it and ++ * start its child with a new time slice and deadline. The ++ * child will end up running first because its deadline will ++ * be slightly earlier. ++ */ ++ rq->rq_time_slice = 0; ++ __set_tsk_resched(parent); ++ } ++ time_slice_expired(p); ++ } ++ task_grq_unlock(&flags); ++} ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ ++static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE; ++ ++void preempt_notifier_inc(void) ++{ ++ static_key_slow_inc(&preempt_notifier_key); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_inc); ++ ++void preempt_notifier_dec(void) ++{ ++ static_key_slow_dec(&preempt_notifier_key); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_dec); ++ ++/** ++ * preempt_notifier_register - tell me when current is being preempted & rescheduled ++ * @notifier: notifier struct to register ++ */ ++void preempt_notifier_register(struct preempt_notifier *notifier) ++{ ++ if (!static_key_false(&preempt_notifier_key)) ++ WARN(1, "registering preempt_notifier while notifiers disabled\n"); ++ ++ hlist_add_head(¬ifier->link, ¤t->preempt_notifiers); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_register); ++ ++/** ++ * preempt_notifier_unregister - no longer interested in preemption notifications ++ * @notifier: notifier struct to unregister ++ * ++ * This is *not* safe to call from within a preemption notifier. ++ */ ++void preempt_notifier_unregister(struct preempt_notifier *notifier) ++{ ++ hlist_del(¬ifier->link); ++} ++EXPORT_SYMBOL_GPL(preempt_notifier_unregister); ++ ++static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++ struct preempt_notifier *notifier; ++ ++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) ++ notifier->ops->sched_in(notifier, raw_smp_processor_id()); ++} ++ ++static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++ if (static_key_false(&preempt_notifier_key)) ++ __fire_sched_in_preempt_notifiers(curr); ++} ++ ++static void ++__fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++ struct preempt_notifier *notifier; ++ ++ hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) ++ notifier->ops->sched_out(notifier, next); ++} ++ ++static __always_inline void ++fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++ if (static_key_false(&preempt_notifier_key)) ++ __fire_sched_out_preempt_notifiers(curr, next); ++} ++ ++#else /* !CONFIG_PREEMPT_NOTIFIERS */ ++ ++static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) ++{ ++} ++ ++static inline void ++fire_sched_out_preempt_notifiers(struct task_struct *curr, ++ struct task_struct *next) ++{ ++} ++ ++#endif /* CONFIG_PREEMPT_NOTIFIERS */ ++ ++/** ++ * prepare_task_switch - prepare to switch tasks ++ * @rq: the runqueue preparing to switch ++ * @next: the task we are going to switch to. ++ * ++ * This is called with the rq lock held and interrupts off. It must ++ * be paired with a subsequent finish_task_switch after the context ++ * switch. ++ * ++ * prepare_task_switch sets up locking and calls architecture specific ++ * hooks. ++ */ ++static inline void ++prepare_task_switch(struct rq *rq, struct task_struct *prev, ++ struct task_struct *next) ++{ ++ sched_info_switch(rq, prev, next); ++ perf_event_task_sched_out(prev, next); ++ fire_sched_out_preempt_notifiers(prev, next); ++ prepare_lock_switch(rq, next); ++ prepare_arch_switch(next); ++} ++ ++/** ++ * finish_task_switch - clean up after a task-switch ++ * @rq: runqueue associated with task-switch ++ * @prev: the thread we just switched away from. ++ * ++ * finish_task_switch must be called after the context switch, paired ++ * with a prepare_task_switch call before the context switch. ++ * finish_task_switch will reconcile locking set up by prepare_task_switch, ++ * and do any other architecture-specific cleanup actions. ++ * ++ * Note that we may have delayed dropping an mm in context_switch(). If ++ * so, we finish that here outside of the runqueue lock. (Doing it ++ * with the lock held can cause deadlocks; see schedule() for ++ * details.) ++ * ++ * The context switch have flipped the stack from under us and restored the ++ * local variables which were saved when this task called schedule() in the ++ * past. prev == current is still correct but we need to recalculate this_rq ++ * because prev may have moved to another CPU. ++ */ ++static struct rq *finish_task_switch(struct task_struct *prev) ++ __releases(grq.lock) ++{ ++ struct rq *rq = this_rq(); ++ struct mm_struct *mm = rq->prev_mm; ++ long prev_state; ++ ++ /* ++ * The previous task will have left us with a preempt_count of 2 ++ * because it left us after: ++ * ++ * schedule() ++ * preempt_disable(); // 1 ++ * __schedule() ++ * raw_spin_lock_irq(&rq->lock) // 2 ++ * ++ * Also, see FORK_PREEMPT_COUNT. ++ */ ++ if (WARN_ONCE(preempt_count() != 2*PREEMPT_DISABLE_OFFSET, ++ "corrupted preempt_count: %s/%d/0x%x\n", ++ current->comm, current->pid, preempt_count())) ++ preempt_count_set(FORK_PREEMPT_COUNT); ++ ++ rq->prev_mm = NULL; ++ ++ /* ++ * A task struct has one reference for the use as "current". ++ * If a task dies, then it sets TASK_DEAD in tsk->state and calls ++ * schedule one last time. The schedule call will never return, and ++ * the scheduled task must drop that reference. ++ * ++ * We must observe prev->state before clearing prev->on_cpu (in ++ * finish_lock_switch), otherwise a concurrent wakeup can get prev ++ * running on another CPU and we could rave with its RUNNING -> DEAD ++ * transition, resulting in a double drop. ++ */ ++ prev_state = prev->state; ++ vtime_task_switch(prev); ++ perf_event_task_sched_in(prev, current); ++ finish_lock_switch(rq, prev); ++ finish_arch_post_lock_switch(); ++ ++ fire_sched_in_preempt_notifiers(current); ++ if (mm) ++ mmdrop(mm); ++ if (unlikely(prev_state == TASK_DEAD)) { ++ /* ++ * Remove function-return probe instances associated with this ++ * task and put them back on the free list. ++ */ ++ kprobe_flush_task(prev); ++ put_task_struct(prev); ++ } ++ return rq; ++} ++ ++/** ++ * schedule_tail - first thing a freshly forked thread must call. ++ * @prev: the thread we just switched away from. ++ */ ++asmlinkage __visible void schedule_tail(struct task_struct *prev) ++ __releases(grq.lock) ++{ ++ struct rq *rq; ++ ++ /* ++ * New tasks start with FORK_PREEMPT_COUNT, see there and ++ * finish_task_switch() for details. ++ * ++ * finish_task_switch() will drop rq->lock() and lower preempt_count ++ * and the preempt_enable() will end up enabling preemption (on ++ * PREEMPT_COUNT kernels). ++ */ ++ ++ rq = finish_task_switch(prev); ++ preempt_enable(); ++ ++ if (current->set_child_tid) ++ put_user(task_pid_vnr(current), current->set_child_tid); ++} ++ ++/* ++ * context_switch - switch to the new MM and the new thread's register state. ++ */ ++static inline struct rq * ++context_switch(struct rq *rq, struct task_struct *prev, ++ struct task_struct *next) ++{ ++ struct mm_struct *mm, *oldmm; ++ ++ prepare_task_switch(rq, prev, next); ++ ++ mm = next->mm; ++ oldmm = prev->active_mm; ++ /* ++ * For paravirt, this is coupled with an exit in switch_to to ++ * combine the page table reload and the switch backend into ++ * one hypercall. ++ */ ++ arch_start_context_switch(prev); ++ ++ if (!mm) { ++ next->active_mm = oldmm; ++ atomic_inc(&oldmm->mm_count); ++ enter_lazy_tlb(oldmm, next); ++ } else ++ switch_mm(oldmm, mm, next); ++ ++ if (!prev->mm) { ++ prev->active_mm = NULL; ++ rq->prev_mm = oldmm; ++ } ++ /* ++ * Since the runqueue lock will be released by the next ++ * task (which is an invalid locking op but in the case ++ * of the scheduler it's an obvious special-case), so we ++ * do an early lockdep release here: ++ */ ++ spin_release(&grq.lock.dep_map, 1, _THIS_IP_); ++ ++ /* Here we just switch the register state and the stack. */ ++ switch_to(prev, next, prev); ++ barrier(); ++ ++ return finish_task_switch(prev); ++} ++ ++/* ++ * nr_running, nr_uninterruptible and nr_context_switches: ++ * ++ * externally visible scheduler statistics: current number of runnable ++ * threads, total number of context switches performed since bootup. All are ++ * measured without grabbing the grq lock but the occasional inaccurate result ++ * doesn't matter so long as it's positive. ++ */ ++unsigned long nr_running(void) ++{ ++ long nr = grq.nr_running; ++ ++ if (unlikely(nr < 0)) ++ nr = 0; ++ return (unsigned long)nr; ++} ++ ++static unsigned long nr_uninterruptible(void) ++{ ++ long nu = grq.nr_uninterruptible; ++ ++ if (unlikely(nu < 0)) ++ nu = 0; ++ return nu; ++} ++ ++/* ++ * Check if only the current task is running on the cpu. ++ * ++ * Caution: this function does not check that the caller has disabled ++ * preemption, thus the result might have a time-of-check-to-time-of-use ++ * race. The caller is responsible to use it correctly, for example: ++ * ++ * - from a non-preemptable section (of course) ++ * ++ * - from a thread that is bound to a single CPU ++ * ++ * - in a loop with very short iterations (e.g. a polling loop) ++ */ ++bool single_task_running(void) ++{ ++ if (cpu_rq(smp_processor_id())->soft_affined == 1) ++ return true; ++ else ++ return false; ++} ++EXPORT_SYMBOL(single_task_running); ++ ++unsigned long long nr_context_switches(void) ++{ ++ long long ns = grq.nr_switches; ++ ++ /* This is of course impossible */ ++ if (unlikely(ns < 0)) ++ ns = 1; ++ return (unsigned long long)ns; ++} ++ ++unsigned long nr_iowait(void) ++{ ++ unsigned long i, sum = 0; ++ ++ for_each_possible_cpu(i) ++ sum += atomic_read(&cpu_rq(i)->nr_iowait); ++ ++ return sum; ++} ++ ++unsigned long nr_iowait_cpu(int cpu) ++{ ++ struct rq *this = cpu_rq(cpu); ++ return atomic_read(&this->nr_iowait); ++} ++ ++unsigned long nr_active(void) ++{ ++ return nr_running() + nr_uninterruptible(); ++} ++ ++/* Beyond a task running on this CPU, load is equal everywhere on BFS, so we ++ * base it on the number of running or queued tasks with their ->rq pointer ++ * set to this cpu as being the CPU they're more likely to run on. */ ++void get_iowait_load(unsigned long *nr_waiters, unsigned long *load) ++{ ++ struct rq *rq = this_rq(); ++ ++ *nr_waiters = atomic_read(&rq->nr_iowait); ++ *load = rq->soft_affined; ++} ++ ++/* Variables and functions for calc_load */ ++static unsigned long calc_load_update; ++unsigned long avenrun[3]; ++EXPORT_SYMBOL(avenrun); ++ ++/** ++ * get_avenrun - get the load average array ++ * @loads: pointer to dest load array ++ * @offset: offset to add ++ * @shift: shift count to shift the result left ++ * ++ * These values are estimates at best, so no need for locking. ++ */ ++void get_avenrun(unsigned long *loads, unsigned long offset, int shift) ++{ ++ loads[0] = (avenrun[0] + offset) << shift; ++ loads[1] = (avenrun[1] + offset) << shift; ++ loads[2] = (avenrun[2] + offset) << shift; ++} ++ ++static unsigned long ++calc_load(unsigned long load, unsigned long exp, unsigned long active) ++{ ++ load *= exp; ++ load += active * (FIXED_1 - exp); ++ return load >> FSHIFT; ++} ++ ++/* ++ * calc_load - update the avenrun load estimates every LOAD_FREQ seconds. ++ */ ++void calc_global_load(unsigned long ticks) ++{ ++ long active; ++ ++ if (time_before(jiffies, calc_load_update)) ++ return; ++ active = nr_active() * FIXED_1; ++ ++ avenrun[0] = calc_load(avenrun[0], EXP_1, active); ++ avenrun[1] = calc_load(avenrun[1], EXP_5, active); ++ avenrun[2] = calc_load(avenrun[2], EXP_15, active); ++ ++ calc_load_update = jiffies + LOAD_FREQ; ++} ++ ++DEFINE_PER_CPU(struct kernel_stat, kstat); ++DEFINE_PER_CPU(struct kernel_cpustat, kernel_cpustat); ++ ++EXPORT_PER_CPU_SYMBOL(kstat); ++EXPORT_PER_CPU_SYMBOL(kernel_cpustat); ++ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++ ++/* ++ * There are no locks covering percpu hardirq/softirq time. ++ * They are only modified in account_system_vtime, on corresponding CPU ++ * with interrupts disabled. So, writes are safe. ++ * They are read and saved off onto struct rq in update_rq_clock(). ++ * This may result in other CPU reading this CPU's irq time and can ++ * race with irq/account_system_vtime on this CPU. We would either get old ++ * or new value with a side effect of accounting a slice of irq time to wrong ++ * task when irq is in progress while we read rq->clock. That is a worthy ++ * compromise in place of having locks on each irq in account_system_time. ++ */ ++static DEFINE_PER_CPU(u64, cpu_hardirq_time); ++static DEFINE_PER_CPU(u64, cpu_softirq_time); ++ ++static DEFINE_PER_CPU(u64, irq_start_time); ++static int sched_clock_irqtime; ++ ++void enable_sched_clock_irqtime(void) ++{ ++ sched_clock_irqtime = 1; ++} ++ ++void disable_sched_clock_irqtime(void) ++{ ++ sched_clock_irqtime = 0; ++} ++ ++#ifndef CONFIG_64BIT ++static DEFINE_PER_CPU(seqcount_t, irq_time_seq); ++ ++static inline void irq_time_write_begin(void) ++{ ++ __this_cpu_inc(irq_time_seq.sequence); ++ smp_wmb(); ++} ++ ++static inline void irq_time_write_end(void) ++{ ++ smp_wmb(); ++ __this_cpu_inc(irq_time_seq.sequence); ++} ++ ++static inline u64 irq_time_read(int cpu) ++{ ++ u64 irq_time; ++ unsigned seq; ++ ++ do { ++ seq = read_seqcount_begin(&per_cpu(irq_time_seq, cpu)); ++ irq_time = per_cpu(cpu_softirq_time, cpu) + ++ per_cpu(cpu_hardirq_time, cpu); ++ } while (read_seqcount_retry(&per_cpu(irq_time_seq, cpu), seq)); ++ ++ return irq_time; ++} ++#else /* CONFIG_64BIT */ ++static inline void irq_time_write_begin(void) ++{ ++} ++ ++static inline void irq_time_write_end(void) ++{ ++} ++ ++static inline u64 irq_time_read(int cpu) ++{ ++ return per_cpu(cpu_softirq_time, cpu) + per_cpu(cpu_hardirq_time, cpu); ++} ++#endif /* CONFIG_64BIT */ ++ ++/* ++ * Called before incrementing preempt_count on {soft,}irq_enter ++ * and before decrementing preempt_count on {soft,}irq_exit. ++ */ ++void irqtime_account_irq(struct task_struct *curr) ++{ ++ unsigned long flags; ++ s64 delta; ++ int cpu; ++ ++ if (!sched_clock_irqtime) ++ return; ++ ++ local_irq_save(flags); ++ ++ cpu = smp_processor_id(); ++ delta = sched_clock_cpu(cpu) - __this_cpu_read(irq_start_time); ++ __this_cpu_add(irq_start_time, delta); ++ ++ irq_time_write_begin(); ++ /* ++ * We do not account for softirq time from ksoftirqd here. ++ * We want to continue accounting softirq time to ksoftirqd thread ++ * in that case, so as not to confuse scheduler with a special task ++ * that do not consume any time, but still wants to run. ++ */ ++ if (hardirq_count()) ++ __this_cpu_add(cpu_hardirq_time, delta); ++ else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) ++ __this_cpu_add(cpu_softirq_time, delta); ++ ++ irq_time_write_end(); ++ local_irq_restore(flags); ++} ++EXPORT_SYMBOL_GPL(irqtime_account_irq); ++ ++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ ++ ++#ifdef CONFIG_PARAVIRT ++static inline u64 steal_ticks(u64 steal) ++{ ++ if (unlikely(steal > NSEC_PER_SEC)) ++ return div_u64(steal, TICK_NSEC); ++ ++ return __iter_div_u64_rem(steal, TICK_NSEC, &steal); ++} ++#endif ++ ++static void update_rq_clock_task(struct rq *rq, s64 delta) ++{ ++/* ++ * In theory, the compile should just see 0 here, and optimize out the call ++ * to sched_rt_avg_update. But I don't trust it... ++ */ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++ s64 irq_delta = irq_time_read(cpu_of(rq)) - rq->prev_irq_time; ++ ++ /* ++ * Since irq_time is only updated on {soft,}irq_exit, we might run into ++ * this case when a previous update_rq_clock() happened inside a ++ * {soft,}irq region. ++ * ++ * When this happens, we stop ->clock_task and only update the ++ * prev_irq_time stamp to account for the part that fit, so that a next ++ * update will consume the rest. This ensures ->clock_task is ++ * monotonic. ++ * ++ * It does however cause some slight miss-attribution of {soft,}irq ++ * time, a more accurate solution would be to update the irq_time using ++ * the current rq->clock timestamp, except that would require using ++ * atomic ops. ++ */ ++ if (irq_delta > delta) ++ irq_delta = delta; ++ ++ rq->prev_irq_time += irq_delta; ++ delta -= irq_delta; ++#endif ++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING ++ if (static_key_false((¶virt_steal_rq_enabled))) { ++ s64 steal = paravirt_steal_clock(cpu_of(rq)); ++ ++ steal -= rq->prev_steal_time_rq; ++ ++ if (unlikely(steal > delta)) ++ steal = delta; ++ ++ rq->prev_steal_time_rq += steal; ++ ++ delta -= steal; ++ } ++#endif ++ ++ rq->clock_task += delta; ++} ++ ++#ifndef nsecs_to_cputime ++# define nsecs_to_cputime(__nsecs) nsecs_to_jiffies(__nsecs) ++#endif ++ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++static void irqtime_account_hi_si(void) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ u64 latest_ns; ++ ++ latest_ns = nsecs_to_cputime64(this_cpu_read(cpu_hardirq_time)); ++ if (latest_ns > cpustat[CPUTIME_IRQ]) ++ cpustat[CPUTIME_IRQ] += (__force u64)cputime_one_jiffy; ++ ++ latest_ns = nsecs_to_cputime64(this_cpu_read(cpu_softirq_time)); ++ if (latest_ns > cpustat[CPUTIME_SOFTIRQ]) ++ cpustat[CPUTIME_SOFTIRQ] += (__force u64)cputime_one_jiffy; ++} ++#else /* CONFIG_IRQ_TIME_ACCOUNTING */ ++ ++#define sched_clock_irqtime (0) ++ ++static inline void irqtime_account_hi_si(void) ++{ ++} ++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ ++ ++static __always_inline bool steal_account_process_tick(void) ++{ ++#ifdef CONFIG_PARAVIRT ++ if (static_key_false(¶virt_steal_enabled)) { ++ u64 steal; ++ cputime_t steal_ct; ++ ++ steal = paravirt_steal_clock(smp_processor_id()); ++ steal -= this_rq()->prev_steal_time; ++ ++ /* ++ * cputime_t may be less precise than nsecs (eg: if it's ++ * based on jiffies). Lets cast the result to cputime ++ * granularity and account the rest on the next rounds. ++ */ ++ steal_ct = nsecs_to_cputime(steal); ++ this_rq()->prev_steal_time += cputime_to_nsecs(steal_ct); ++ ++ account_steal_time(steal_ct); ++ return steal_ct; ++ } ++#endif ++ return false; ++} ++ ++/* ++ * Accumulate raw cputime values of dead tasks (sig->[us]time) and live ++ * tasks (sum on group iteration) belonging to @tsk's group. ++ */ ++void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) ++{ ++ struct signal_struct *sig = tsk->signal; ++ cputime_t utime, stime; ++ struct task_struct *t; ++ unsigned int seq, nextseq; ++ unsigned long flags; ++ ++ rcu_read_lock(); ++ /* Attempt a lockless read on the first round. */ ++ nextseq = 0; ++ do { ++ seq = nextseq; ++ flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq); ++ times->utime = sig->utime; ++ times->stime = sig->stime; ++ times->sum_exec_runtime = sig->sum_sched_runtime; ++ ++ for_each_thread(tsk, t) { ++ task_cputime(t, &utime, &stime); ++ times->utime += utime; ++ times->stime += stime; ++ times->sum_exec_runtime += task_sched_runtime(t); ++ } ++ /* If lockless access failed, take the lock. */ ++ nextseq = 1; ++ } while (need_seqretry(&sig->stats_lock, seq)); ++ done_seqretry_irqrestore(&sig->stats_lock, seq, flags); ++ rcu_read_unlock(); ++} ++ ++/* ++ * On each tick, see what percentage of that tick was attributed to each ++ * component and add the percentage to the _pc values. Once a _pc value has ++ * accumulated one tick's worth, account for that. This means the total ++ * percentage of load components will always be 128 (pseudo 100) per tick. ++ */ ++static void pc_idle_time(struct rq *rq, struct task_struct *idle, unsigned long pc) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ ++ if (atomic_read(&rq->nr_iowait) > 0) { ++ rq->iowait_pc += pc; ++ if (rq->iowait_pc >= 128) { ++ cpustat[CPUTIME_IOWAIT] += (__force u64)cputime_one_jiffy * rq->iowait_pc / 128; ++ rq->iowait_pc %= 128; ++ } ++ } else { ++ rq->idle_pc += pc; ++ if (rq->idle_pc >= 128) { ++ cpustat[CPUTIME_IDLE] += (__force u64)cputime_one_jiffy * rq->idle_pc / 128; ++ rq->idle_pc %= 128; ++ } ++ } ++ acct_update_integrals(idle); ++} ++ ++static void ++pc_system_time(struct rq *rq, struct task_struct *p, int hardirq_offset, ++ unsigned long pc, unsigned long ns) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); ++ ++ p->stime_pc += pc; ++ if (p->stime_pc >= 128) { ++ int jiffs = p->stime_pc / 128; ++ ++ p->stime_pc %= 128; ++ p->stime += (__force u64)cputime_one_jiffy * jiffs; ++ p->stimescaled += one_jiffy_scaled * jiffs; ++ account_group_system_time(p, cputime_one_jiffy * jiffs); ++ } ++ p->sched_time += ns; ++ account_group_exec_runtime(p, ns); ++ ++ if (hardirq_count() - hardirq_offset) { ++ rq->irq_pc += pc; ++ if (rq->irq_pc >= 128) { ++ cpustat[CPUTIME_IRQ] += (__force u64)cputime_one_jiffy * rq->irq_pc / 128; ++ rq->irq_pc %= 128; ++ } ++ } else if (in_serving_softirq()) { ++ rq->softirq_pc += pc; ++ if (rq->softirq_pc >= 128) { ++ cpustat[CPUTIME_SOFTIRQ] += (__force u64)cputime_one_jiffy * rq->softirq_pc / 128; ++ rq->softirq_pc %= 128; ++ } ++ } else { ++ rq->system_pc += pc; ++ if (rq->system_pc >= 128) { ++ cpustat[CPUTIME_SYSTEM] += (__force u64)cputime_one_jiffy * rq->system_pc / 128; ++ rq->system_pc %= 128; ++ } ++ } ++ acct_update_integrals(p); ++} ++ ++static void pc_user_time(struct rq *rq, struct task_struct *p, ++ unsigned long pc, unsigned long ns) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ cputime_t one_jiffy_scaled = cputime_to_scaled(cputime_one_jiffy); ++ ++ p->utime_pc += pc; ++ if (p->utime_pc >= 128) { ++ int jiffs = p->utime_pc / 128; ++ ++ p->utime_pc %= 128; ++ p->utime += (__force u64)cputime_one_jiffy * jiffs; ++ p->utimescaled += one_jiffy_scaled * jiffs; ++ account_group_user_time(p, cputime_one_jiffy * jiffs); ++ } ++ p->sched_time += ns; ++ account_group_exec_runtime(p, ns); ++ ++ if (this_cpu_ksoftirqd() == p) { ++ /* ++ * ksoftirqd time do not get accounted in cpu_softirq_time. ++ * So, we have to handle it separately here. ++ */ ++ rq->softirq_pc += pc; ++ if (rq->softirq_pc >= 128) { ++ cpustat[CPUTIME_SOFTIRQ] += (__force u64)cputime_one_jiffy * rq->softirq_pc / 128; ++ rq->softirq_pc %= 128; ++ } ++ } ++ ++ if (task_nice(p) > 0 || idleprio_task(p)) { ++ rq->nice_pc += pc; ++ if (rq->nice_pc >= 128) { ++ cpustat[CPUTIME_NICE] += (__force u64)cputime_one_jiffy * rq->nice_pc / 128; ++ rq->nice_pc %= 128; ++ } ++ } else { ++ rq->user_pc += pc; ++ if (rq->user_pc >= 128) { ++ cpustat[CPUTIME_USER] += (__force u64)cputime_one_jiffy * rq->user_pc / 128; ++ rq->user_pc %= 128; ++ } ++ } ++ acct_update_integrals(p); ++} ++ ++/* ++ * Convert nanoseconds to pseudo percentage of one tick. Use 128 for fast ++ * shifts instead of 100 ++ */ ++#define NS_TO_PC(NS) (NS * 128 / JIFFY_NS) ++ ++/* ++ * This is called on clock ticks. ++ * Bank in p->sched_time the ns elapsed since the last tick or switch. ++ * CPU scheduler quota accounting is also performed here in microseconds. ++ */ ++static void ++update_cpu_clock_tick(struct rq *rq, struct task_struct *p) ++{ ++ long account_ns = rq->clock_task - rq->rq_last_ran; ++ struct task_struct *idle = rq->idle; ++ unsigned long account_pc; ++ ++ if (unlikely(account_ns < 0) || steal_account_process_tick()) ++ goto ts_account; ++ ++ account_pc = NS_TO_PC(account_ns); ++ ++ /* Accurate tick timekeeping */ ++ if (user_mode(get_irq_regs())) ++ pc_user_time(rq, p, account_pc, account_ns); ++ else if (p != idle || (irq_count() != HARDIRQ_OFFSET)) ++ pc_system_time(rq, p, HARDIRQ_OFFSET, ++ account_pc, account_ns); ++ else ++ pc_idle_time(rq, idle, account_pc); ++ ++ if (sched_clock_irqtime) ++ irqtime_account_hi_si(); ++ ++ts_account: ++ /* time_slice accounting is done in usecs to avoid overflow on 32bit */ ++ if (rq->rq_policy != SCHED_FIFO && p != idle) { ++ s64 time_diff = rq->clock - rq->timekeep_clock; ++ ++ niffy_diff(&time_diff, 1); ++ rq->rq_time_slice -= NS_TO_US(time_diff); ++ } ++ ++ rq->rq_last_ran = rq->clock_task; ++ rq->timekeep_clock = rq->clock; ++} ++ ++/* ++ * This is called on context switches. ++ * Bank in p->sched_time the ns elapsed since the last tick or switch. ++ * CPU scheduler quota accounting is also performed here in microseconds. ++ */ ++static void ++update_cpu_clock_switch(struct rq *rq, struct task_struct *p) ++{ ++ long account_ns = rq->clock_task - rq->rq_last_ran; ++ struct task_struct *idle = rq->idle; ++ unsigned long account_pc; ++ ++ if (unlikely(account_ns < 0)) ++ goto ts_account; ++ ++ account_pc = NS_TO_PC(account_ns); ++ ++ /* Accurate subtick timekeeping */ ++ if (p != idle) { ++ pc_user_time(rq, p, account_pc, account_ns); ++ } ++ else ++ pc_idle_time(rq, idle, account_pc); ++ ++ts_account: ++ /* time_slice accounting is done in usecs to avoid overflow on 32bit */ ++ if (rq->rq_policy != SCHED_FIFO && p != idle) { ++ s64 time_diff = rq->clock - rq->timekeep_clock; ++ ++ niffy_diff(&time_diff, 1); ++ rq->rq_time_slice -= NS_TO_US(time_diff); ++ } ++ ++ rq->rq_last_ran = rq->clock_task; ++ rq->timekeep_clock = rq->clock; ++} ++ ++/* ++ * Return any ns on the sched_clock that have not yet been accounted in ++ * @p in case that task is currently running. ++ * ++ * Called with task_grq_lock() held. ++ */ ++static inline u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) ++{ ++ u64 ns = 0; ++ ++ /* ++ * Must be ->curr _and_ ->on_rq. If dequeued, we would ++ * project cycles that may never be accounted to this ++ * thread, breaking clock_gettime(). ++ */ ++ if (p == rq->curr && p->on_rq) { ++ update_clocks(rq); ++ ns = rq->clock_task - rq->rq_last_ran; ++ if (unlikely((s64)ns < 0)) ++ ns = 0; ++ } ++ ++ return ns; ++} ++ ++/* ++ * Return accounted runtime for the task. ++ * Return separately the current's pending runtime that have not been ++ * accounted yet. ++ * ++ */ ++unsigned long long task_sched_runtime(struct task_struct *p) ++{ ++ unsigned long flags; ++ struct rq *rq; ++ u64 ns; ++ ++#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) ++ /* ++ * 64-bit doesn't need locks to atomically read a 64bit value. ++ * So we have a optimization chance when the task's delta_exec is 0. ++ * Reading ->on_cpu is racy, but this is ok. ++ * ++ * If we race with it leaving cpu, we'll take a lock. So we're correct. ++ * If we race with it entering cpu, unaccounted time is 0. This is ++ * indistinguishable from the read occurring a few cycles earlier. ++ * If we see ->on_cpu without ->on_rq, the task is leaving, and has ++ * been accounted, so we're correct here as well. ++ */ ++ if (!p->on_cpu || !p->on_rq) ++ return tsk_seruntime(p); ++#endif ++ ++ rq = task_grq_lock(p, &flags); ++ ns = p->sched_time + do_task_delta_exec(p, rq); ++ task_grq_unlock(&flags); ++ ++ return ns; ++} ++ ++/* Compatibility crap */ ++void account_user_time(struct task_struct *p, cputime_t cputime, ++ cputime_t cputime_scaled) ++{ ++} ++ ++void account_idle_time(cputime_t cputime) ++{ ++} ++ ++/* ++ * Account guest cpu time to a process. ++ * @p: the process that the cpu time gets accounted to ++ * @cputime: the cpu time spent in virtual machine since the last update ++ * @cputime_scaled: cputime scaled by cpu frequency ++ */ ++static void account_guest_time(struct task_struct *p, cputime_t cputime, ++ cputime_t cputime_scaled) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ ++ /* Add guest time to process. */ ++ p->utime += (__force u64)cputime; ++ p->utimescaled += (__force u64)cputime_scaled; ++ account_group_user_time(p, cputime); ++ p->gtime += (__force u64)cputime; ++ ++ /* Add guest time to cpustat. */ ++ if (task_nice(p) > 0) { ++ cpustat[CPUTIME_NICE] += (__force u64)cputime; ++ cpustat[CPUTIME_GUEST_NICE] += (__force u64)cputime; ++ } else { ++ cpustat[CPUTIME_USER] += (__force u64)cputime; ++ cpustat[CPUTIME_GUEST] += (__force u64)cputime; ++ } ++} ++ ++/* ++ * Account system cpu time to a process and desired cpustat field ++ * @p: the process that the cpu time gets accounted to ++ * @cputime: the cpu time spent in kernel space since the last update ++ * @cputime_scaled: cputime scaled by cpu frequency ++ * @target_cputime64: pointer to cpustat field that has to be updated ++ */ ++static inline ++void __account_system_time(struct task_struct *p, cputime_t cputime, ++ cputime_t cputime_scaled, cputime64_t *target_cputime64) ++{ ++ /* Add system time to process. */ ++ p->stime += (__force u64)cputime; ++ p->stimescaled += (__force u64)cputime_scaled; ++ account_group_system_time(p, cputime); ++ ++ /* Add system time to cpustat. */ ++ *target_cputime64 += (__force u64)cputime; ++ ++ /* Account for system time used */ ++ acct_update_integrals(p); ++} ++ ++/* ++ * Account system cpu time to a process. ++ * @p: the process that the cpu time gets accounted to ++ * @hardirq_offset: the offset to subtract from hardirq_count() ++ * @cputime: the cpu time spent in kernel space since the last update ++ * @cputime_scaled: cputime scaled by cpu frequency ++ * This is for guest only now. ++ */ ++void account_system_time(struct task_struct *p, int hardirq_offset, ++ cputime_t cputime, cputime_t cputime_scaled) ++{ ++ ++ if ((p->flags & PF_VCPU) && (irq_count() - hardirq_offset == 0)) ++ account_guest_time(p, cputime, cputime_scaled); ++} ++ ++/* ++ * Account for involuntary wait time. ++ * @steal: the cpu time spent in involuntary wait ++ */ ++void account_steal_time(cputime_t cputime) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ ++ cpustat[CPUTIME_STEAL] += (__force u64)cputime; ++} ++ ++/* ++ * Account for idle time. ++ * @cputime: the cpu time spent in idle wait ++ */ ++static void account_idle_times(cputime_t cputime) ++{ ++ u64 *cpustat = kcpustat_this_cpu->cpustat; ++ struct rq *rq = this_rq(); ++ ++ if (atomic_read(&rq->nr_iowait) > 0) ++ cpustat[CPUTIME_IOWAIT] += (__force u64)cputime; ++ else ++ cpustat[CPUTIME_IDLE] += (__force u64)cputime; ++} ++ ++#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE ++ ++void account_process_tick(struct task_struct *p, int user_tick) ++{ ++} ++ ++/* ++ * Account multiple ticks of steal time. ++ * @p: the process from which the cpu time has been stolen ++ * @ticks: number of stolen ticks ++ */ ++void account_steal_ticks(unsigned long ticks) ++{ ++ account_steal_time(jiffies_to_cputime(ticks)); ++} ++ ++/* ++ * Account multiple ticks of idle time. ++ * @ticks: number of stolen ticks ++ */ ++void account_idle_ticks(unsigned long ticks) ++{ ++ account_idle_times(jiffies_to_cputime(ticks)); ++} ++#endif ++ ++static inline void grq_iso_lock(void) ++ __acquires(grq.iso_lock) ++{ ++ raw_spin_lock(&grq.iso_lock); ++} ++ ++static inline void grq_iso_unlock(void) ++ __releases(grq.iso_lock) ++{ ++ raw_spin_unlock(&grq.iso_lock); ++} ++ ++/* ++ * Functions to test for when SCHED_ISO tasks have used their allocated ++ * quota as real time scheduling and convert them back to SCHED_NORMAL. ++ * Where possible, the data is tested lockless, to avoid grabbing iso_lock ++ * because the occasional inaccurate result won't matter. However the ++ * tick data is only ever modified under lock. iso_refractory is only simply ++ * set to 0 or 1 so it's not worth grabbing the lock yet again for that. ++ */ ++static bool set_iso_refractory(void) ++{ ++ grq.iso_refractory = true; ++ return grq.iso_refractory; ++} ++ ++static bool clear_iso_refractory(void) ++{ ++ grq.iso_refractory = false; ++ return grq.iso_refractory; ++} ++ ++/* ++ * Test if SCHED_ISO tasks have run longer than their alloted period as RT ++ * tasks and set the refractory flag if necessary. There is 10% hysteresis ++ * for unsetting the flag. 115/128 is ~90/100 as a fast shift instead of a ++ * slow division. ++ */ ++static bool test_ret_isorefractory(struct rq *rq) ++{ ++ if (likely(!grq.iso_refractory)) { ++ if (grq.iso_ticks > ISO_PERIOD * sched_iso_cpu) ++ return set_iso_refractory(); ++ } else { ++ if (grq.iso_ticks < ISO_PERIOD * (sched_iso_cpu * 115 / 128)) ++ return clear_iso_refractory(); ++ } ++ return grq.iso_refractory; ++} ++ ++static void iso_tick(void) ++{ ++ grq_iso_lock(); ++ grq.iso_ticks += 100; ++ grq_iso_unlock(); ++} ++ ++/* No SCHED_ISO task was running so decrease rq->iso_ticks */ ++static inline void no_iso_tick(void) ++{ ++ if (grq.iso_ticks) { ++ grq_iso_lock(); ++ grq.iso_ticks -= grq.iso_ticks / ISO_PERIOD + 1; ++ if (unlikely(grq.iso_refractory && grq.iso_ticks < ++ ISO_PERIOD * (sched_iso_cpu * 115 / 128))) ++ clear_iso_refractory(); ++ grq_iso_unlock(); ++ } ++} ++ ++/* This manages tasks that have run out of timeslice during a scheduler_tick */ ++static void task_running_tick(struct rq *rq) ++{ ++ struct task_struct *p; ++ ++ /* ++ * If a SCHED_ISO task is running we increment the iso_ticks. In ++ * order to prevent SCHED_ISO tasks from causing starvation in the ++ * presence of true RT tasks we account those as iso_ticks as well. ++ */ ++ if ((rt_queue(rq) || (iso_queue(rq) && !grq.iso_refractory))) { ++ if (grq.iso_ticks <= (ISO_PERIOD * 128) - 128) ++ iso_tick(); ++ } else ++ no_iso_tick(); ++ ++ if (iso_queue(rq)) { ++ if (unlikely(test_ret_isorefractory(rq))) { ++ if (rq_running_iso(rq)) { ++ /* ++ * SCHED_ISO task is running as RT and limit ++ * has been hit. Force it to reschedule as ++ * SCHED_NORMAL by zeroing its time_slice ++ */ ++ rq->rq_time_slice = 0; ++ } ++ } ++ } ++ ++ /* SCHED_FIFO tasks never run out of timeslice. */ ++ if (rq->rq_policy == SCHED_FIFO) ++ return; ++ /* ++ * Tasks that were scheduled in the first half of a tick are not ++ * allowed to run into the 2nd half of the next tick if they will ++ * run out of time slice in the interim. Otherwise, if they have ++ * less than RESCHED_US μs of time slice left they will be rescheduled. ++ */ ++ if (rq->dither) { ++ if (rq->rq_time_slice > HALF_JIFFY_US) ++ return; ++ else ++ rq->rq_time_slice = 0; ++ } else if (rq->rq_time_slice >= RESCHED_US) ++ return; ++ ++ /* p->time_slice < RESCHED_US. We only modify task_struct under grq lock */ ++ p = rq->curr; ++ ++ grq_lock(); ++ requeue_task(p); ++ __set_tsk_resched(p); ++ grq_unlock(); ++} ++ ++/* ++ * This function gets called by the timer code, with HZ frequency. ++ * We call it with interrupts disabled. The data modified is all ++ * local to struct rq so we don't need to grab grq lock. ++ */ ++void scheduler_tick(void) ++{ ++ int cpu __maybe_unused = smp_processor_id(); ++ struct rq *rq = cpu_rq(cpu); ++ ++ sched_clock_tick(); ++ /* grq lock not grabbed, so only update rq clock */ ++ update_rq_clock(rq); ++ update_cpu_clock_tick(rq, rq->curr); ++ if (!rq_idle(rq)) ++ task_running_tick(rq); ++ else ++ no_iso_tick(); ++ rq->last_tick = rq->clock; ++ perf_event_task_tick(); ++} ++ ++notrace unsigned long get_parent_ip(unsigned long addr) ++{ ++ if (in_lock_functions(addr)) { ++ addr = CALLER_ADDR2; ++ if (in_lock_functions(addr)) ++ addr = CALLER_ADDR3; ++ } ++ return addr; ++} ++ ++#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ ++ defined(CONFIG_PREEMPT_TRACER)) ++void preempt_count_add(int val) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Underflow? ++ */ ++ if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0))) ++ return; ++#endif ++ __preempt_count_add(val); ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Spinlock count overflowing soon? ++ */ ++ DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= ++ PREEMPT_MASK - 10); ++#endif ++ if (preempt_count() == val) { ++ unsigned long ip = get_parent_ip(CALLER_ADDR1); ++#ifdef CONFIG_DEBUG_PREEMPT ++ current->preempt_disable_ip = ip; ++#endif ++ trace_preempt_off(CALLER_ADDR0, ip); ++ } ++} ++EXPORT_SYMBOL(preempt_count_add); ++NOKPROBE_SYMBOL(preempt_count_add); ++ ++void preempt_count_sub(int val) ++{ ++#ifdef CONFIG_DEBUG_PREEMPT ++ /* ++ * Underflow? ++ */ ++ if (DEBUG_LOCKS_WARN_ON(val > preempt_count())) ++ return; ++ /* ++ * Is the spinlock portion underflowing? ++ */ ++ if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) && ++ !(preempt_count() & PREEMPT_MASK))) ++ return; ++#endif ++ ++ if (preempt_count() == val) ++ trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); ++ __preempt_count_sub(val); ++} ++EXPORT_SYMBOL(preempt_count_sub); ++NOKPROBE_SYMBOL(preempt_count_sub); ++#endif ++ ++/* ++ * Deadline is "now" in niffies + (offset by priority). Setting the deadline ++ * is the key to everything. It distributes cpu fairly amongst tasks of the ++ * same nice value, it proportions cpu according to nice level, it means the ++ * task that last woke up the longest ago has the earliest deadline, thus ++ * ensuring that interactive tasks get low latency on wake up. The CPU ++ * proportion works out to the square of the virtual deadline difference, so ++ * this equation will give nice 19 3% CPU compared to nice 0. ++ */ ++static inline u64 prio_deadline_diff(int user_prio) ++{ ++ return (prio_ratios[user_prio] * rr_interval * (MS_TO_NS(1) / 128)); ++} ++ ++static inline u64 task_deadline_diff(struct task_struct *p) ++{ ++ return prio_deadline_diff(TASK_USER_PRIO(p)); ++} ++ ++static inline u64 static_deadline_diff(int static_prio) ++{ ++ return prio_deadline_diff(USER_PRIO(static_prio)); ++} ++ ++static inline int longest_deadline_diff(void) ++{ ++ return prio_deadline_diff(39); ++} ++ ++static inline int ms_longest_deadline_diff(void) ++{ ++ return NS_TO_MS(longest_deadline_diff()); ++} ++ ++/* ++ * The time_slice is only refilled when it is empty and that is when we set a ++ * new deadline. ++ */ ++static void time_slice_expired(struct task_struct *p) ++{ ++ p->time_slice = timeslice(); ++ p->deadline = grq.niffies + task_deadline_diff(p); ++#ifdef CONFIG_SMT_NICE ++ if (!p->mm) ++ p->smt_bias = 0; ++ else if (rt_task(p)) ++ p->smt_bias = 1 << 30; ++ else if (task_running_iso(p)) ++ p->smt_bias = 1 << 29; ++ else if (idleprio_task(p)) { ++ if (task_running_idle(p)) ++ p->smt_bias = 0; ++ else ++ p->smt_bias = 1; ++ } else if (--p->smt_bias < 1) ++ p->smt_bias = MAX_PRIO - p->static_prio; ++#endif ++} ++ ++/* ++ * Timeslices below RESCHED_US are considered as good as expired as there's no ++ * point rescheduling when there's so little time left. SCHED_BATCH tasks ++ * have been flagged be not latency sensitive and likely to be fully CPU ++ * bound so every time they're rescheduled they have their time_slice ++ * refilled, but get a new later deadline to have little effect on ++ * SCHED_NORMAL tasks. ++ ++ */ ++static inline void check_deadline(struct task_struct *p) ++{ ++ if (p->time_slice < RESCHED_US || batch_task(p)) ++ time_slice_expired(p); ++} ++ ++#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) ++ ++/* ++ * Scheduler queue bitmap specific find next bit. ++ */ ++static inline unsigned long ++next_sched_bit(const unsigned long *addr, unsigned long offset) ++{ ++ const unsigned long *p; ++ unsigned long result; ++ unsigned long size; ++ unsigned long tmp; ++ ++ size = PRIO_LIMIT; ++ if (offset >= size) ++ return size; ++ ++ p = addr + BITOP_WORD(offset); ++ result = offset & ~(BITS_PER_LONG-1); ++ size -= result; ++ offset %= BITS_PER_LONG; ++ if (offset) { ++ tmp = *(p++); ++ tmp &= (~0UL << offset); ++ if (size < BITS_PER_LONG) ++ goto found_first; ++ if (tmp) ++ goto found_middle; ++ size -= BITS_PER_LONG; ++ result += BITS_PER_LONG; ++ } ++ while (size & ~(BITS_PER_LONG-1)) { ++ if ((tmp = *(p++))) ++ goto found_middle; ++ result += BITS_PER_LONG; ++ size -= BITS_PER_LONG; ++ } ++ if (!size) ++ return result; ++ tmp = *p; ++ ++found_first: ++ tmp &= (~0UL >> (BITS_PER_LONG - size)); ++ if (tmp == 0UL) /* Are any bits set? */ ++ return result + size; /* Nope. */ ++found_middle: ++ return result + __ffs(tmp); ++} ++ ++/* ++ * O(n) lookup of all tasks in the global runqueue. The real brainfuck ++ * of lock contention and O(n). It's not really O(n) as only the queued, ++ * but not running tasks are scanned, and is O(n) queued in the worst case ++ * scenario only because the right task can be found before scanning all of ++ * them. ++ * Tasks are selected in this order: ++ * Real time tasks are selected purely by their static priority and in the ++ * order they were queued, so the lowest value idx, and the first queued task ++ * of that priority value is chosen. ++ * If no real time tasks are found, the SCHED_ISO priority is checked, and ++ * all SCHED_ISO tasks have the same priority value, so they're selected by ++ * the earliest deadline value. ++ * If no SCHED_ISO tasks are found, SCHED_NORMAL tasks are selected by the ++ * earliest deadline. ++ * Finally if no SCHED_NORMAL tasks are found, SCHED_IDLEPRIO tasks are ++ * selected by the earliest deadline. ++ */ ++static inline struct ++task_struct *earliest_deadline_task(struct rq *rq, int cpu, struct task_struct *idle) ++{ ++ struct task_struct *edt = NULL; ++ unsigned long idx = -1; ++ ++ do { ++ struct list_head *queue; ++ struct task_struct *p; ++ u64 earliest_deadline; ++ ++ idx = next_sched_bit(grq.prio_bitmap, ++idx); ++ if (idx >= PRIO_LIMIT) ++ return idle; ++ queue = grq.queue + idx; ++ ++ if (idx < MAX_RT_PRIO) { ++ /* We found an rt task */ ++ list_for_each_entry(p, queue, run_list) { ++ /* Make sure cpu affinity is ok */ ++ if (needs_other_cpu(p, cpu)) ++ continue; ++ edt = p; ++ goto out_take; ++ } ++ /* ++ * None of the RT tasks at this priority can run on ++ * this cpu ++ */ ++ continue; ++ } ++ ++ /* ++ * No rt tasks. Find the earliest deadline task. Now we're in ++ * O(n) territory. ++ */ ++ earliest_deadline = ~0ULL; ++ list_for_each_entry(p, queue, run_list) { ++ u64 dl; ++ ++ /* Make sure cpu affinity is ok */ ++ if (needs_other_cpu(p, cpu)) ++ continue; ++ ++#ifdef CONFIG_SMT_NICE ++ if (!smt_should_schedule(p, cpu)) ++ continue; ++#endif ++ /* ++ * Soft affinity happens here by not scheduling a task ++ * with its sticky flag set that ran on a different CPU ++ * last when the CPU is scaling, or by greatly biasing ++ * against its deadline when not, based on cpu cache ++ * locality. ++ */ ++ if (sched_interactive) ++ dl = p->deadline; ++ else { ++ int tcpu = task_cpu(p); ++ ++ if (tcpu != cpu && task_sticky(p) && scaling_rq(rq)) ++ continue; ++ dl = p->deadline << locality_diff(tcpu, rq); ++ } ++ ++ if (deadline_before(dl, earliest_deadline)) { ++ earliest_deadline = dl; ++ edt = p; ++ } ++ } ++ } while (!edt); ++ ++out_take: ++ take_task(cpu, edt); ++ return edt; ++} ++ ++ ++/* ++ * Print scheduling while atomic bug: ++ */ ++static noinline void __schedule_bug(struct task_struct *prev) ++{ ++ if (oops_in_progress) ++ return; ++ ++ printk(KERN_ERR "BUG: scheduling while atomic: %s/%d/0x%08x\n", ++ prev->comm, prev->pid, preempt_count()); ++ ++ debug_show_held_locks(prev); ++ print_modules(); ++ if (irqs_disabled()) ++ print_irqtrace_events(prev); ++#ifdef CONFIG_DEBUG_PREEMPT ++ if (in_atomic_preempt_off()) { ++ pr_err("Preemption disabled at:"); ++ print_ip_sym(current->preempt_disable_ip); ++ pr_cont("\n"); ++ } ++#endif ++ dump_stack(); ++ add_taint(TAINT_WARN, LOCKDEP_STILL_OK); ++} ++ ++/* ++ * Various schedule()-time debugging checks and statistics: ++ */ ++static inline void schedule_debug(struct task_struct *prev) ++{ ++#ifdef CONFIG_SCHED_STACK_END_CHECK ++ BUG_ON(task_stack_end_corrupted(prev)); ++#endif ++ ++ if (unlikely(in_atomic_preempt_off())) { ++ __schedule_bug(prev); ++ preempt_count_set(PREEMPT_DISABLED); ++ } ++ rcu_sleep_check(); ++ ++ profile_hit(SCHED_PROFILING, __builtin_return_address(0)); ++ ++ schedstat_inc(this_rq(), sched_count); ++} ++ ++/* ++ * The currently running task's information is all stored in rq local data ++ * which is only modified by the local CPU, thereby allowing the data to be ++ * changed without grabbing the grq lock. ++ */ ++static inline void set_rq_task(struct rq *rq, struct task_struct *p) ++{ ++ rq->rq_time_slice = p->time_slice; ++ rq->rq_deadline = p->deadline; ++ rq->rq_last_ran = p->last_ran = rq->clock_task; ++ rq->rq_policy = p->policy; ++ rq->rq_prio = p->prio; ++#ifdef CONFIG_SMT_NICE ++ rq->rq_mm = p->mm; ++ rq->rq_smt_bias = p->smt_bias; ++#endif ++ if (p != rq->idle) ++ rq->rq_running = true; ++ else ++ rq->rq_running = false; ++} ++ ++static void reset_rq_task(struct rq *rq, struct task_struct *p) ++{ ++ rq->rq_policy = p->policy; ++ rq->rq_prio = p->prio; ++#ifdef CONFIG_SMT_NICE ++ rq->rq_smt_bias = p->smt_bias; ++#endif ++} ++ ++#ifdef CONFIG_SMT_NICE ++/* Iterate over smt siblings when we've scheduled a process on cpu and decide ++ * whether they should continue running or be descheduled. */ ++static void check_smt_siblings(int cpu) ++{ ++ int other_cpu; ++ ++ for_each_cpu(other_cpu, thread_cpumask(cpu)) { ++ struct task_struct *p; ++ struct rq *rq; ++ ++ if (other_cpu == cpu) ++ continue; ++ rq = cpu_rq(other_cpu); ++ if (rq_idle(rq)) ++ continue; ++ if (!rq->online) ++ continue; ++ p = rq->curr; ++ if (!smt_should_schedule(p, cpu)) { ++ set_tsk_need_resched(p); ++ smp_send_reschedule(other_cpu); ++ } ++ } ++} ++ ++static void wake_smt_siblings(int cpu) ++{ ++ int other_cpu; ++ ++ if (!queued_notrunning()) ++ return; ++ ++ for_each_cpu(other_cpu, thread_cpumask(cpu)) { ++ struct rq *rq; ++ ++ if (other_cpu == cpu) ++ continue; ++ rq = cpu_rq(other_cpu); ++ if (rq_idle(rq)) { ++ struct task_struct *p = rq->curr; ++ ++ set_tsk_need_resched(p); ++ smp_send_reschedule(other_cpu); ++ } ++ } ++} ++#else ++static void check_smt_siblings(int __maybe_unused cpu) {} ++static void wake_smt_siblings(int __maybe_unused cpu) {} ++#endif ++ ++/* ++ * schedule() is the main scheduler function. ++ * ++ * The main means of driving the scheduler and thus entering this function are: ++ * ++ * 1. Explicit blocking: mutex, semaphore, waitqueue, etc. ++ * ++ * 2. TIF_NEED_RESCHED flag is checked on interrupt and userspace return ++ * paths. For example, see arch/x86/entry_64.S. ++ * ++ * To drive preemption between tasks, the scheduler sets the flag in timer ++ * interrupt handler scheduler_tick(). ++ * ++ * 3. Wakeups don't really cause entry into schedule(). They add a ++ * task to the run-queue and that's it. ++ * ++ * Now, if the new task added to the run-queue preempts the current ++ * task, then the wakeup sets TIF_NEED_RESCHED and schedule() gets ++ * called on the nearest possible occasion: ++ * ++ * - If the kernel is preemptible (CONFIG_PREEMPT=y): ++ * ++ * - in syscall or exception context, at the next outmost ++ * preempt_enable(). (this might be as soon as the wake_up()'s ++ * spin_unlock()!) ++ * ++ * - in IRQ context, return from interrupt-handler to ++ * preemptible context ++ * ++ * - If the kernel is not preemptible (CONFIG_PREEMPT is not set) ++ * then at the next: ++ * ++ * - cond_resched() call ++ * - explicit schedule() call ++ * - return from syscall or exception to user-space ++ * - return from interrupt-handler to user-space ++ * ++ * WARNING: must be called with preemption disabled! ++ */ ++static void __sched notrace __schedule(bool preempt) ++{ ++ struct task_struct *prev, *next, *idle; ++ unsigned long *switch_count; ++ bool deactivate = false; ++ struct rq *rq; ++ int cpu; ++ ++ cpu = smp_processor_id(); ++ rq = cpu_rq(cpu); ++ rcu_note_context_switch(); ++ prev = rq->curr; ++ ++ /* ++ * do_exit() calls schedule() with preemption disabled as an exception; ++ * however we must fix that up, otherwise the next task will see an ++ * inconsistent (higher) preempt count. ++ * ++ * It also avoids the below schedule_debug() test from complaining ++ * about this. ++ */ ++ if (unlikely(prev->state == TASK_DEAD)) ++ preempt_enable_no_resched_notrace(); ++ ++ schedule_debug(prev); ++ ++ /* ++ * Make sure that signal_pending_state()->signal_pending() below ++ * can't be reordered with __set_current_state(TASK_INTERRUPTIBLE) ++ * done by the caller to avoid the race with signal_wake_up(). ++ */ ++ smp_mb__before_spinlock(); ++ grq_lock_irq(); ++ ++ switch_count = &prev->nivcsw; ++ if (!preempt && prev->state) { ++ if (unlikely(signal_pending_state(prev->state, prev))) { ++ prev->state = TASK_RUNNING; ++ } else { ++ deactivate = true; ++ prev->on_rq = 0; ++ ++ /* ++ * If a worker is going to sleep, notify and ++ * ask workqueue whether it wants to wake up a ++ * task to maintain concurrency. If so, wake ++ * up the task. ++ */ ++ if (prev->flags & PF_WQ_WORKER) { ++ struct task_struct *to_wakeup; ++ ++ to_wakeup = wq_worker_sleeping(prev, cpu); ++ if (to_wakeup) { ++ /* This shouldn't happen, but does */ ++ if (unlikely(to_wakeup == prev)) ++ deactivate = false; ++ else ++ try_to_wake_up_local(to_wakeup); ++ } ++ } ++ } ++ switch_count = &prev->nvcsw; ++ } ++ ++ update_clocks(rq); ++ update_cpu_clock_switch(rq, prev); ++ if (rq->clock - rq->last_tick > HALF_JIFFY_NS) ++ rq->dither = false; ++ else ++ rq->dither = true; ++ ++ clear_tsk_need_resched(prev); ++ clear_preempt_need_resched(); ++ ++ idle = rq->idle; ++ if (idle != prev) { ++ /* Update all the information stored on struct rq */ ++ prev->time_slice = rq->rq_time_slice; ++ prev->deadline = rq->rq_deadline; ++ check_deadline(prev); ++ prev->last_ran = rq->clock_task; ++ ++ /* Task changed affinity off this CPU */ ++ if (likely(!needs_other_cpu(prev, cpu))) { ++ if (!deactivate) { ++ if (!queued_notrunning()) { ++ /* ++ * We now know prev is the only thing that is ++ * awaiting CPU so we can bypass rechecking for ++ * the earliest deadline task and just run it ++ * again. ++ */ ++ set_rq_task(rq, prev); ++ check_smt_siblings(cpu); ++ grq_unlock_irq(); ++ goto rerun_prev_unlocked; ++ } else ++ swap_sticky(rq, cpu, prev); ++ } ++ } ++ return_task(prev, rq, deactivate); ++ } ++ ++ if (unlikely(!queued_notrunning())) { ++ /* ++ * This CPU is now truly idle as opposed to when idle is ++ * scheduled as a high priority task in its own right. ++ */ ++ next = idle; ++ schedstat_inc(rq, sched_goidle); ++ set_cpuidle_map(cpu); ++ } else { ++ next = earliest_deadline_task(rq, cpu, idle); ++ if (likely(next->prio != PRIO_LIMIT)) ++ clear_cpuidle_map(cpu); ++ else ++ set_cpuidle_map(cpu); ++ } ++ ++ if (likely(prev != next)) { ++ /* ++ * Don't reschedule an idle task or deactivated tasks ++ */ ++ if (prev != idle && !deactivate) ++ resched_suitable_idle(prev); ++ /* ++ * Don't stick tasks when a real time task is going to run as ++ * they may literally get stuck. ++ */ ++ if (rt_task(next)) ++ unstick_task(rq, prev); ++ set_rq_task(rq, next); ++ if (next != idle) ++ check_smt_siblings(cpu); ++ else ++ wake_smt_siblings(cpu); ++ grq.nr_switches++; ++ prev->on_cpu = false; ++ next->on_cpu = true; ++ rq->curr = next; ++ ++*switch_count; ++ ++ trace_sched_switch(preempt, prev, next); ++ rq = context_switch(rq, prev, next); /* unlocks the grq */ ++ cpu = cpu_of(rq); ++ idle = rq->idle; ++ } else { ++ check_smt_siblings(cpu); ++ grq_unlock_irq(); ++ } ++ ++rerun_prev_unlocked: ++ return; ++} ++ ++static inline void sched_submit_work(struct task_struct *tsk) ++{ ++ if (!tsk->state || tsk_is_pi_blocked(tsk) || ++ preempt_count() || ++ signal_pending_state(tsk->state, tsk)) ++ return; ++ ++ /* ++ * If we are going to sleep and we have plugged IO queued, ++ * make sure to submit it to avoid deadlocks. ++ */ ++ if (blk_needs_flush_plug(tsk)) ++ blk_schedule_flush_plug(tsk); ++} ++ ++asmlinkage __visible void __sched schedule(void) ++{ ++ struct task_struct *tsk = current; ++ ++ sched_submit_work(tsk); ++ do { ++ preempt_disable(); ++ __schedule(false); ++ sched_preempt_enable_no_resched(); ++ } while (need_resched()); ++} ++ ++EXPORT_SYMBOL(schedule); ++ ++#ifdef CONFIG_CONTEXT_TRACKING ++asmlinkage __visible void __sched schedule_user(void) ++{ ++ /* ++ * If we come here after a random call to set_need_resched(), ++ * or we have been woken up remotely but the IPI has not yet arrived, ++ * we haven't yet exited the RCU idle mode. Do it here manually until ++ * we find a better solution. ++ * ++ * NB: There are buggy callers of this function. Ideally we ++ * should warn if prev_state != IN_USER, but that will trigger ++ * too frequently to make sense yet. ++ */ ++ enum ctx_state prev_state = exception_enter(); ++ schedule(); ++ exception_exit(prev_state); ++} ++#endif ++ ++/** ++ * schedule_preempt_disabled - called with preemption disabled ++ * ++ * Returns with preemption disabled. Note: preempt_count must be 1 ++ */ ++void __sched schedule_preempt_disabled(void) ++{ ++ sched_preempt_enable_no_resched(); ++ schedule(); ++ preempt_disable(); ++} ++ ++static void __sched notrace preempt_schedule_common(void) ++{ ++ do { ++ preempt_disable_notrace(); ++ __schedule(true); ++ preempt_enable_no_resched_notrace(); ++ ++ /* ++ * Check again in case we missed a preemption opportunity ++ * between schedule and now. ++ */ ++ } while (need_resched()); ++} ++ ++#ifdef CONFIG_PREEMPT ++/* ++ * this is the entry point to schedule() from in-kernel preemption ++ * off of preempt_enable. Kernel preemptions off return from interrupt ++ * occur there and call schedule directly. ++ */ ++asmlinkage __visible void __sched notrace preempt_schedule(void) ++{ ++ /* ++ * If there is a non-zero preempt_count or interrupts are disabled, ++ * we do not want to preempt the current task. Just return.. ++ */ ++ if (likely(!preemptible())) ++ return; ++ ++ preempt_schedule_common(); ++} ++NOKPROBE_SYMBOL(preempt_schedule); ++EXPORT_SYMBOL(preempt_schedule); ++ ++/** ++ * preempt_schedule_notrace - preempt_schedule called by tracing ++ * ++ * The tracing infrastructure uses preempt_enable_notrace to prevent ++ * recursion and tracing preempt enabling caused by the tracing ++ * infrastructure itself. But as tracing can happen in areas coming ++ * from userspace or just about to enter userspace, a preempt enable ++ * can occur before user_exit() is called. This will cause the scheduler ++ * to be called when the system is still in usermode. ++ * ++ * To prevent this, the preempt_enable_notrace will use this function ++ * instead of preempt_schedule() to exit user context if needed before ++ * calling the scheduler. ++ */ ++asmlinkage __visible void __sched notrace preempt_schedule_notrace(void) ++{ ++ enum ctx_state prev_ctx; ++ ++ if (likely(!preemptible())) ++ return; ++ ++ do { ++ preempt_disable_notrace(); ++ /* ++ * Needs preempt disabled in case user_exit() is traced ++ * and the tracer calls preempt_enable_notrace() causing ++ * an infinite recursion. ++ */ ++ prev_ctx = exception_enter(); ++ __schedule(true); ++ exception_exit(prev_ctx); ++ ++ preempt_enable_no_resched_notrace(); ++ } while (need_resched()); ++} ++EXPORT_SYMBOL_GPL(preempt_schedule_notrace); ++ ++#endif /* CONFIG_PREEMPT */ ++ ++/* ++ * this is the entry point to schedule() from kernel preemption ++ * off of irq context. ++ * Note, that this is called and return with irqs disabled. This will ++ * protect us against recursive calling from irq. ++ */ ++asmlinkage __visible void __sched preempt_schedule_irq(void) ++{ ++ enum ctx_state prev_state; ++ ++ /* Catch callers which need to be fixed */ ++ BUG_ON(preempt_count() || !irqs_disabled()); ++ ++ prev_state = exception_enter(); ++ ++ do { ++ preempt_disable(); ++ local_irq_enable(); ++ __schedule(true); ++ local_irq_disable(); ++ sched_preempt_enable_no_resched(); ++ } while (need_resched()); ++ ++ exception_exit(prev_state); ++} ++ ++int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, ++ void *key) ++{ ++ return try_to_wake_up(curr->private, mode, wake_flags); ++} ++EXPORT_SYMBOL(default_wake_function); ++ ++#ifdef CONFIG_RT_MUTEXES ++ ++/* ++ * rt_mutex_setprio - set the current priority of a task ++ * @p: task ++ * @prio: prio value (kernel-internal form) ++ * ++ * This function changes the 'effective' priority of a task. It does ++ * not touch ->normal_prio like __setscheduler(). ++ * ++ * Used by the rt_mutex code to implement priority inheritance ++ * logic. Call site only calls if the priority of the task changed. ++ */ ++void rt_mutex_setprio(struct task_struct *p, int prio) ++{ ++ unsigned long flags; ++ int queued, oldprio; ++ struct rq *rq; ++ ++ BUG_ON(prio < 0 || prio > MAX_PRIO); ++ ++ rq = task_grq_lock(p, &flags); ++ ++ /* ++ * Idle task boosting is a nono in general. There is one ++ * exception, when PREEMPT_RT and NOHZ is active: ++ * ++ * The idle task calls get_next_timer_interrupt() and holds ++ * the timer wheel base->lock on the CPU and another CPU wants ++ * to access the timer (probably to cancel it). We can safely ++ * ignore the boosting request, as the idle CPU runs this code ++ * with interrupts disabled and will complete the lock ++ * protected section without being interrupted. So there is no ++ * real need to boost. ++ */ ++ if (unlikely(p == rq->idle)) { ++ WARN_ON(p != rq->curr); ++ WARN_ON(p->pi_blocked_on); ++ goto out_unlock; ++ } ++ ++ trace_sched_pi_setprio(p, prio); ++ oldprio = p->prio; ++ queued = task_queued(p); ++ if (queued) ++ dequeue_task(p); ++ p->prio = prio; ++ if (task_running(p) && prio > oldprio) ++ resched_task(p); ++ if (queued) { ++ enqueue_task(p, rq); ++ try_preempt(p, rq); ++ } ++ ++out_unlock: ++ task_grq_unlock(&flags); ++} ++ ++#endif ++ ++/* ++ * Adjust the deadline for when the priority is to change, before it's ++ * changed. ++ */ ++static inline void adjust_deadline(struct task_struct *p, int new_prio) ++{ ++ p->deadline += static_deadline_diff(new_prio) - task_deadline_diff(p); ++} ++ ++void set_user_nice(struct task_struct *p, long nice) ++{ ++ int queued, new_static, old_static; ++ unsigned long flags; ++ struct rq *rq; ++ ++ if (task_nice(p) == nice || nice < MIN_NICE || nice > MAX_NICE) ++ return; ++ new_static = NICE_TO_PRIO(nice); ++ /* ++ * We have to be careful, if called from sys_setpriority(), ++ * the task might be in the middle of scheduling on another CPU. ++ */ ++ rq = time_task_grq_lock(p, &flags); ++ /* ++ * The RT priorities are set via sched_setscheduler(), but we still ++ * allow the 'normal' nice value to be set - but as expected ++ * it wont have any effect on scheduling until the task is ++ * not SCHED_NORMAL/SCHED_BATCH: ++ */ ++ if (has_rt_policy(p)) { ++ p->static_prio = new_static; ++ goto out_unlock; ++ } ++ queued = task_queued(p); ++ if (queued) ++ dequeue_task(p); ++ ++ adjust_deadline(p, new_static); ++ old_static = p->static_prio; ++ p->static_prio = new_static; ++ p->prio = effective_prio(p); ++ ++ if (queued) { ++ enqueue_task(p, rq); ++ if (new_static < old_static) ++ try_preempt(p, rq); ++ } else if (task_running(p)) { ++ reset_rq_task(rq, p); ++ if (old_static < new_static) ++ resched_task(p); ++ } ++out_unlock: ++ task_grq_unlock(&flags); ++} ++EXPORT_SYMBOL(set_user_nice); ++ ++/* ++ * can_nice - check if a task can reduce its nice value ++ * @p: task ++ * @nice: nice value ++ */ ++int can_nice(const struct task_struct *p, const int nice) ++{ ++ /* convert nice value [19,-20] to rlimit style value [1,40] */ ++ int nice_rlim = nice_to_rlimit(nice); ++ ++ return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || ++ capable(CAP_SYS_NICE)); ++} ++ ++#ifdef __ARCH_WANT_SYS_NICE ++ ++/* ++ * sys_nice - change the priority of the current process. ++ * @increment: priority increment ++ * ++ * sys_setpriority is a more generic, but much slower function that ++ * does similar things. ++ */ ++SYSCALL_DEFINE1(nice, int, increment) ++{ ++ long nice, retval; ++ ++ /* ++ * Setpriority might change our priority at the same moment. ++ * We don't have to worry. Conceptually one call occurs first ++ * and we have a single winner. ++ */ ++ ++ increment = clamp(increment, -NICE_WIDTH, NICE_WIDTH); ++ nice = task_nice(current) + increment; ++ ++ nice = clamp_val(nice, MIN_NICE, MAX_NICE); ++ if (increment < 0 && !can_nice(current, nice)) ++ return -EPERM; ++ ++ retval = security_task_setnice(current, nice); ++ if (retval) ++ return retval; ++ ++ set_user_nice(current, nice); ++ return 0; ++} ++ ++#endif ++ ++/** ++ * task_prio - return the priority value of a given task. ++ * @p: the task in question. ++ * ++ * Return: The priority value as seen by users in /proc. ++ * RT tasks are offset by -100. Normal tasks are centered around 1, value goes ++ * from 0 (SCHED_ISO) up to 82 (nice +19 SCHED_IDLEPRIO). ++ */ ++int task_prio(const struct task_struct *p) ++{ ++ int delta, prio = p->prio - MAX_RT_PRIO; ++ ++ /* rt tasks and iso tasks */ ++ if (prio <= 0) ++ goto out; ++ ++ /* Convert to ms to avoid overflows */ ++ delta = NS_TO_MS(p->deadline - grq.niffies); ++ delta = delta * 40 / ms_longest_deadline_diff(); ++ if (delta > 0 && delta <= 80) ++ prio += delta; ++ if (idleprio_task(p)) ++ prio += 40; ++out: ++ return prio; ++} ++ ++/** ++ * idle_cpu - is a given cpu idle currently? ++ * @cpu: the processor in question. ++ * ++ * Return: 1 if the CPU is currently idle. 0 otherwise. ++ */ ++int idle_cpu(int cpu) ++{ ++ return cpu_curr(cpu) == cpu_rq(cpu)->idle; ++} ++ ++/** ++ * idle_task - return the idle task for a given cpu. ++ * @cpu: the processor in question. ++ * ++ * Return: The idle task for the cpu @cpu. ++ */ ++struct task_struct *idle_task(int cpu) ++{ ++ return cpu_rq(cpu)->idle; ++} ++ ++/** ++ * find_process_by_pid - find a process with a matching PID value. ++ * @pid: the pid in question. ++ * ++ * The task of @pid, if found. %NULL otherwise. ++ */ ++static inline struct task_struct *find_process_by_pid(pid_t pid) ++{ ++ return pid ? find_task_by_vpid(pid) : current; ++} ++ ++/* Actually do priority change: must hold grq lock. */ ++static void __setscheduler(struct task_struct *p, struct rq *rq, int policy, ++ int prio, bool keep_boost) ++{ ++ int oldrtprio, oldprio; ++ ++ p->policy = policy; ++ oldrtprio = p->rt_priority; ++ p->rt_priority = prio; ++ p->normal_prio = normal_prio(p); ++ oldprio = p->prio; ++ /* ++ * Keep a potential priority boosting if called from ++ * sched_setscheduler(). ++ */ ++ if (keep_boost) { ++ /* ++ * Take priority boosted tasks into account. If the new ++ * effective priority is unchanged, we just store the new ++ * normal parameters and do not touch the scheduler class and ++ * the runqueue. This will be done when the task deboost ++ * itself. ++ */ ++ p->prio = rt_mutex_get_effective_prio(p, p->normal_prio); ++ } else ++ p->prio = p->normal_prio; ++ if (task_running(p)) { ++ reset_rq_task(rq, p); ++ /* Resched only if we might now be preempted */ ++ if (p->prio > oldprio || p->rt_priority > oldrtprio) ++ resched_task(p); ++ } ++} ++ ++/* ++ * check the target process has a UID that matches the current process's ++ */ ++static bool check_same_owner(struct task_struct *p) ++{ ++ const struct cred *cred = current_cred(), *pcred; ++ bool match; ++ ++ rcu_read_lock(); ++ pcred = __task_cred(p); ++ match = (uid_eq(cred->euid, pcred->euid) || ++ uid_eq(cred->euid, pcred->uid)); ++ rcu_read_unlock(); ++ return match; ++} ++ ++static int ++__sched_setscheduler(struct task_struct *p, int policy, ++ const struct sched_param *param, bool user, bool pi) ++{ ++ struct sched_param zero_param = { .sched_priority = 0 }; ++ int queued, retval, oldpolicy = -1; ++ unsigned long flags, rlim_rtprio = 0; ++ int reset_on_fork; ++ struct rq *rq; ++ ++ /* may grab non-irq protected spin_locks */ ++ BUG_ON(in_interrupt()); ++ ++ if (is_rt_policy(policy) && !capable(CAP_SYS_NICE)) { ++ unsigned long lflags; ++ ++ if (!lock_task_sighand(p, &lflags)) ++ return -ESRCH; ++ rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); ++ unlock_task_sighand(p, &lflags); ++ if (rlim_rtprio) ++ goto recheck; ++ /* ++ * If the caller requested an RT policy without having the ++ * necessary rights, we downgrade the policy to SCHED_ISO. ++ * We also set the parameter to zero to pass the checks. ++ */ ++ policy = SCHED_ISO; ++ param = &zero_param; ++ } ++recheck: ++ /* double check policy once rq lock held */ ++ if (policy < 0) { ++ reset_on_fork = p->sched_reset_on_fork; ++ policy = oldpolicy = p->policy; ++ } else { ++ reset_on_fork = !!(policy & SCHED_RESET_ON_FORK); ++ policy &= ~SCHED_RESET_ON_FORK; ++ ++ if (!SCHED_RANGE(policy)) ++ return -EINVAL; ++ } ++ ++ /* ++ * Valid priorities for SCHED_FIFO and SCHED_RR are ++ * 1..MAX_USER_RT_PRIO-1, valid priority for SCHED_NORMAL and ++ * SCHED_BATCH is 0. ++ */ ++ if (param->sched_priority < 0 || ++ (p->mm && param->sched_priority > MAX_USER_RT_PRIO - 1) || ++ (!p->mm && param->sched_priority > MAX_RT_PRIO - 1)) ++ return -EINVAL; ++ if (is_rt_policy(policy) != (param->sched_priority != 0)) ++ return -EINVAL; ++ ++ /* ++ * Allow unprivileged RT tasks to decrease priority: ++ */ ++ if (user && !capable(CAP_SYS_NICE)) { ++ if (is_rt_policy(policy)) { ++ unsigned long rlim_rtprio = ++ task_rlimit(p, RLIMIT_RTPRIO); ++ ++ /* can't set/change the rt policy */ ++ if (policy != p->policy && !rlim_rtprio) ++ return -EPERM; ++ ++ /* can't increase priority */ ++ if (param->sched_priority > p->rt_priority && ++ param->sched_priority > rlim_rtprio) ++ return -EPERM; ++ } else { ++ switch (p->policy) { ++ /* ++ * Can only downgrade policies but not back to ++ * SCHED_NORMAL ++ */ ++ case SCHED_ISO: ++ if (policy == SCHED_ISO) ++ goto out; ++ if (policy == SCHED_NORMAL) ++ return -EPERM; ++ break; ++ case SCHED_BATCH: ++ if (policy == SCHED_BATCH) ++ goto out; ++ if (policy != SCHED_IDLEPRIO) ++ return -EPERM; ++ break; ++ case SCHED_IDLEPRIO: ++ if (policy == SCHED_IDLEPRIO) ++ goto out; ++ return -EPERM; ++ default: ++ break; ++ } ++ } ++ ++ /* can't change other user's priorities */ ++ if (!check_same_owner(p)) ++ return -EPERM; ++ ++ /* Normal users shall not reset the sched_reset_on_fork flag */ ++ if (p->sched_reset_on_fork && !reset_on_fork) ++ return -EPERM; ++ } ++ ++ if (user) { ++ retval = security_task_setscheduler(p); ++ if (retval) ++ return retval; ++ } ++ ++ /* ++ * make sure no PI-waiters arrive (or leave) while we are ++ * changing the priority of the task: ++ */ ++ raw_spin_lock_irqsave(&p->pi_lock, flags); ++ /* ++ * To be able to change p->policy safely, the grunqueue lock must be ++ * held. ++ */ ++ rq = __task_grq_lock(p); ++ ++ /* ++ * Changing the policy of the stop threads its a very bad idea ++ */ ++ if (p == rq->stop) { ++ __task_grq_unlock(); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ return -EINVAL; ++ } ++ ++ /* ++ * If not changing anything there's no need to proceed further: ++ */ ++ if (unlikely(policy == p->policy && (!is_rt_policy(policy) || ++ param->sched_priority == p->rt_priority))) { ++ ++ __task_grq_unlock(); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ return 0; ++ } ++ ++ /* recheck policy now with rq lock held */ ++ if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { ++ policy = oldpolicy = -1; ++ __task_grq_unlock(); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ goto recheck; ++ } ++ update_clocks(rq); ++ p->sched_reset_on_fork = reset_on_fork; ++ ++ queued = task_queued(p); ++ if (queued) ++ dequeue_task(p); ++ __setscheduler(p, rq, policy, param->sched_priority, pi); ++ if (queued) { ++ enqueue_task(p, rq); ++ try_preempt(p, rq); ++ } ++ __task_grq_unlock(); ++ raw_spin_unlock_irqrestore(&p->pi_lock, flags); ++ ++ if (pi) ++ rt_mutex_adjust_pi(p); ++out: ++ return 0; ++} ++ ++/** ++ * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. ++ * @p: the task in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * Return: 0 on success. An error code otherwise. ++ * ++ * NOTE that the task may be already dead. ++ */ ++int sched_setscheduler(struct task_struct *p, int policy, ++ const struct sched_param *param) ++{ ++ return __sched_setscheduler(p, policy, param, true, true); ++} ++ ++EXPORT_SYMBOL_GPL(sched_setscheduler); ++ ++int sched_setattr(struct task_struct *p, const struct sched_attr *attr) ++{ ++ const struct sched_param param = { .sched_priority = attr->sched_priority }; ++ int policy = attr->sched_policy; ++ ++ return __sched_setscheduler(p, policy, ¶m, true, true); ++} ++EXPORT_SYMBOL_GPL(sched_setattr); ++ ++/** ++ * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. ++ * @p: the task in question. ++ * @policy: new policy. ++ * @param: structure containing the new RT priority. ++ * ++ * Just like sched_setscheduler, only don't bother checking if the ++ * current context has permission. For example, this is needed in ++ * stop_machine(): we create temporary high priority worker threads, ++ * but our caller might not have that capability. ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++int sched_setscheduler_nocheck(struct task_struct *p, int policy, ++ const struct sched_param *param) ++{ ++ return __sched_setscheduler(p, policy, param, false, true); ++} ++EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck); ++ ++static int ++do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) ++{ ++ struct sched_param lparam; ++ struct task_struct *p; ++ int retval; ++ ++ if (!param || pid < 0) ++ return -EINVAL; ++ if (copy_from_user(&lparam, param, sizeof(struct sched_param))) ++ return -EFAULT; ++ ++ rcu_read_lock(); ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (p != NULL) ++ retval = sched_setscheduler(p, policy, &lparam); ++ rcu_read_unlock(); ++ ++ return retval; ++} ++ ++/* ++ * Mimics kernel/events/core.c perf_copy_attr(). ++ */ ++static int sched_copy_attr(struct sched_attr __user *uattr, ++ struct sched_attr *attr) ++{ ++ u32 size; ++ int ret; ++ ++ if (!access_ok(VERIFY_WRITE, uattr, SCHED_ATTR_SIZE_VER0)) ++ return -EFAULT; ++ ++ /* ++ * zero the full structure, so that a short copy will be nice. ++ */ ++ memset(attr, 0, sizeof(*attr)); ++ ++ ret = get_user(size, &uattr->size); ++ if (ret) ++ return ret; ++ ++ if (size > PAGE_SIZE) /* silly large */ ++ goto err_size; ++ ++ if (!size) /* abi compat */ ++ size = SCHED_ATTR_SIZE_VER0; ++ ++ if (size < SCHED_ATTR_SIZE_VER0) ++ goto err_size; ++ ++ /* ++ * If we're handed a bigger struct than we know of, ++ * ensure all the unknown bits are 0 - i.e. new ++ * user-space does not rely on any kernel feature ++ * extensions we dont know about yet. ++ */ ++ if (size > sizeof(*attr)) { ++ unsigned char __user *addr; ++ unsigned char __user *end; ++ unsigned char val; ++ ++ addr = (void __user *)uattr + sizeof(*attr); ++ end = (void __user *)uattr + size; ++ ++ for (; addr < end; addr++) { ++ ret = get_user(val, addr); ++ if (ret) ++ return ret; ++ if (val) ++ goto err_size; ++ } ++ size = sizeof(*attr); ++ } ++ ++ ret = copy_from_user(attr, uattr, size); ++ if (ret) ++ return -EFAULT; ++ ++ /* ++ * XXX: do we want to be lenient like existing syscalls; or do we want ++ * to be strict and return an error on out-of-bounds values? ++ */ ++ attr->sched_nice = clamp(attr->sched_nice, -20, 19); ++ ++ /* sched/core.c uses zero here but we already know ret is zero */ ++ return 0; ++ ++err_size: ++ put_user(sizeof(*attr), &uattr->size); ++ return -E2BIG; ++} ++ ++/** ++ * sys_sched_setscheduler - set/change the scheduler policy and RT priority ++ * @pid: the pid in question. ++ * @policy: new policy. ++ * ++ * Return: 0 on success. An error code otherwise. ++ * @param: structure containing the new RT priority. ++ */ ++asmlinkage long sys_sched_setscheduler(pid_t pid, int policy, ++ struct sched_param __user *param) ++{ ++ /* negative values for policy are not valid */ ++ if (policy < 0) ++ return -EINVAL; ++ ++ return do_sched_setscheduler(pid, policy, param); ++} ++ ++/* ++ * sched_setparam() passes in -1 for its policy, to let the functions ++ * it calls know not to change it. ++ */ ++#define SETPARAM_POLICY -1 ++ ++/** ++ * sys_sched_setparam - set/change the RT priority of a thread ++ * @pid: the pid in question. ++ * @param: structure containing the new RT priority. ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param) ++{ ++ return do_sched_setscheduler(pid, SETPARAM_POLICY, param); ++} ++ ++/** ++ * sys_sched_setattr - same as above, but with extended sched_attr ++ * @pid: the pid in question. ++ * @uattr: structure containing the extended parameters. ++ */ ++SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr, ++ unsigned int, flags) ++{ ++ struct sched_attr attr; ++ struct task_struct *p; ++ int retval; ++ ++ if (!uattr || pid < 0 || flags) ++ return -EINVAL; ++ ++ retval = sched_copy_attr(uattr, &attr); ++ if (retval) ++ return retval; ++ ++ if ((int)attr.sched_policy < 0) ++ return -EINVAL; ++ ++ rcu_read_lock(); ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (p != NULL) ++ retval = sched_setattr(p, &attr); ++ rcu_read_unlock(); ++ ++ return retval; ++} ++ ++/** ++ * sys_sched_getscheduler - get the policy (scheduling class) of a thread ++ * @pid: the pid in question. ++ * ++ * Return: On success, the policy of the thread. Otherwise, a negative error ++ * code. ++ */ ++SYSCALL_DEFINE1(sched_getscheduler, pid_t, pid) ++{ ++ struct task_struct *p; ++ int retval = -EINVAL; ++ ++ if (pid < 0) ++ goto out_nounlock; ++ ++ retval = -ESRCH; ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ if (p) { ++ retval = security_task_getscheduler(p); ++ if (!retval) ++ retval = p->policy; ++ } ++ rcu_read_unlock(); ++ ++out_nounlock: ++ return retval; ++} ++ ++/** ++ * sys_sched_getscheduler - get the RT priority of a thread ++ * @pid: the pid in question. ++ * @param: structure containing the RT priority. ++ * ++ * Return: On success, 0 and the RT priority is in @param. Otherwise, an error ++ * code. ++ */ ++SYSCALL_DEFINE2(sched_getparam, pid_t, pid, struct sched_param __user *, param) ++{ ++ struct sched_param lp = { .sched_priority = 0 }; ++ struct task_struct *p; ++ int retval = -EINVAL; ++ ++ if (!param || pid < 0) ++ goto out_nounlock; ++ ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ retval = -ESRCH; ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ if (has_rt_policy(p)) ++ lp.sched_priority = p->rt_priority; ++ rcu_read_unlock(); ++ ++ /* ++ * This one might sleep, we cannot do it with a spinlock held ... ++ */ ++ retval = copy_to_user(param, &lp, sizeof(*param)) ? -EFAULT : 0; ++ ++out_nounlock: ++ return retval; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++static int sched_read_attr(struct sched_attr __user *uattr, ++ struct sched_attr *attr, ++ unsigned int usize) ++{ ++ int ret; ++ ++ if (!access_ok(VERIFY_WRITE, uattr, usize)) ++ return -EFAULT; ++ ++ /* ++ * If we're handed a smaller struct than we know of, ++ * ensure all the unknown bits are 0 - i.e. old ++ * user-space does not get uncomplete information. ++ */ ++ if (usize < sizeof(*attr)) { ++ unsigned char *addr; ++ unsigned char *end; ++ ++ addr = (void *)attr + usize; ++ end = (void *)attr + sizeof(*attr); ++ ++ for (; addr < end; addr++) { ++ if (*addr) ++ return -EFBIG; ++ } ++ ++ attr->size = usize; ++ } ++ ++ ret = copy_to_user(uattr, attr, attr->size); ++ if (ret) ++ return -EFAULT; ++ ++ /* sched/core.c uses zero here but we already know ret is zero */ ++ return ret; ++} ++ ++/** ++ * sys_sched_getattr - similar to sched_getparam, but with sched_attr ++ * @pid: the pid in question. ++ * @uattr: structure containing the extended parameters. ++ * @size: sizeof(attr) for fwd/bwd comp. ++ * @flags: for future extension. ++ */ ++SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr, ++ unsigned int, size, unsigned int, flags) ++{ ++ struct sched_attr attr = { ++ .size = sizeof(struct sched_attr), ++ }; ++ struct task_struct *p; ++ int retval; ++ ++ if (!uattr || pid < 0 || size > PAGE_SIZE || ++ size < SCHED_ATTR_SIZE_VER0 || flags) ++ return -EINVAL; ++ ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ retval = -ESRCH; ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ attr.sched_policy = p->policy; ++ if (rt_task(p)) ++ attr.sched_priority = p->rt_priority; ++ else ++ attr.sched_nice = task_nice(p); ++ ++ rcu_read_unlock(); ++ ++ retval = sched_read_attr(uattr, &attr, size); ++ return retval; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++long sched_setaffinity(pid_t pid, const struct cpumask *in_mask) ++{ ++ cpumask_var_t cpus_allowed, new_mask; ++ struct task_struct *p; ++ int retval; ++ ++ get_online_cpus(); ++ rcu_read_lock(); ++ ++ p = find_process_by_pid(pid); ++ if (!p) { ++ rcu_read_unlock(); ++ put_online_cpus(); ++ return -ESRCH; ++ } ++ ++ /* Prevent p going away */ ++ get_task_struct(p); ++ rcu_read_unlock(); ++ ++ if (p->flags & PF_NO_SETAFFINITY) { ++ retval = -EINVAL; ++ goto out_put_task; ++ } ++ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { ++ retval = -ENOMEM; ++ goto out_put_task; ++ } ++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { ++ retval = -ENOMEM; ++ goto out_free_cpus_allowed; ++ } ++ retval = -EPERM; ++ if (!check_same_owner(p)) { ++ rcu_read_lock(); ++ if (!ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) { ++ rcu_read_unlock(); ++ goto out_unlock; ++ } ++ rcu_read_unlock(); ++ } ++ ++ retval = security_task_setscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ cpuset_cpus_allowed(p, cpus_allowed); ++ cpumask_and(new_mask, in_mask, cpus_allowed); ++again: ++ retval = __set_cpus_allowed_ptr(p, new_mask, true); ++ ++ if (!retval) { ++ cpuset_cpus_allowed(p, cpus_allowed); ++ if (!cpumask_subset(new_mask, cpus_allowed)) { ++ /* ++ * We must have raced with a concurrent cpuset ++ * update. Just reset the cpus_allowed to the ++ * cpuset's cpus_allowed ++ */ ++ cpumask_copy(new_mask, cpus_allowed); ++ goto again; ++ } ++ } ++out_unlock: ++ free_cpumask_var(new_mask); ++out_free_cpus_allowed: ++ free_cpumask_var(cpus_allowed); ++out_put_task: ++ put_task_struct(p); ++ put_online_cpus(); ++ return retval; ++} ++ ++static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len, ++ cpumask_t *new_mask) ++{ ++ if (len < sizeof(cpumask_t)) { ++ memset(new_mask, 0, sizeof(cpumask_t)); ++ } else if (len > sizeof(cpumask_t)) { ++ len = sizeof(cpumask_t); ++ } ++ return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; ++} ++ ++ ++/** ++ * sys_sched_setaffinity - set the cpu affinity of a process ++ * @pid: pid of the process ++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr ++ * @user_mask_ptr: user-space pointer to the new cpu mask ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE3(sched_setaffinity, pid_t, pid, unsigned int, len, ++ unsigned long __user *, user_mask_ptr) ++{ ++ cpumask_var_t new_mask; ++ int retval; ++ ++ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask); ++ if (retval == 0) ++ retval = sched_setaffinity(pid, new_mask); ++ free_cpumask_var(new_mask); ++ return retval; ++} ++ ++long sched_getaffinity(pid_t pid, cpumask_t *mask) ++{ ++ struct task_struct *p; ++ unsigned long flags; ++ int retval; ++ ++ get_online_cpus(); ++ rcu_read_lock(); ++ ++ retval = -ESRCH; ++ p = find_process_by_pid(pid); ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ grq_lock_irqsave(&flags); ++ cpumask_and(mask, tsk_cpus_allowed(p), cpu_active_mask); ++ grq_unlock_irqrestore(&flags); ++ ++out_unlock: ++ rcu_read_unlock(); ++ put_online_cpus(); ++ ++ return retval; ++} ++ ++/** ++ * sys_sched_getaffinity - get the cpu affinity of a process ++ * @pid: pid of the process ++ * @len: length in bytes of the bitmask pointed to by user_mask_ptr ++ * @user_mask_ptr: user-space pointer to hold the current cpu mask ++ * ++ * Return: 0 on success. An error code otherwise. ++ */ ++SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, ++ unsigned long __user *, user_mask_ptr) ++{ ++ int ret; ++ cpumask_var_t mask; ++ ++ if ((len * BITS_PER_BYTE) < nr_cpu_ids) ++ return -EINVAL; ++ if (len & (sizeof(unsigned long)-1)) ++ return -EINVAL; ++ ++ if (!alloc_cpumask_var(&mask, GFP_KERNEL)) ++ return -ENOMEM; ++ ++ ret = sched_getaffinity(pid, mask); ++ if (ret == 0) { ++ size_t retlen = min_t(size_t, len, cpumask_size()); ++ ++ if (copy_to_user(user_mask_ptr, mask, retlen)) ++ ret = -EFAULT; ++ else ++ ret = retlen; ++ } ++ free_cpumask_var(mask); ++ ++ return ret; ++} ++ ++/** ++ * sys_sched_yield - yield the current processor to other threads. ++ * ++ * This function yields the current CPU to other tasks. It does this by ++ * scheduling away the current task. If it still has the earliest deadline ++ * it will be scheduled again as the next task. ++ * ++ * Return: 0. ++ */ ++SYSCALL_DEFINE0(sched_yield) ++{ ++ struct task_struct *p; ++ ++ p = current; ++ grq_lock_irq(); ++ schedstat_inc(task_rq(p), yld_count); ++ requeue_task(p); ++ ++ /* ++ * Since we are going to call schedule() anyway, there's ++ * no need to preempt or enable interrupts: ++ */ ++ __release(grq.lock); ++ spin_release(&grq.lock.dep_map, 1, _THIS_IP_); ++ do_raw_spin_unlock(&grq.lock); ++ sched_preempt_enable_no_resched(); ++ ++ schedule(); ++ ++ return 0; ++} ++ ++int __sched _cond_resched(void) ++{ ++ if (should_resched(0)) { ++ preempt_schedule_common(); ++ return 1; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(_cond_resched); ++ ++/* ++ * __cond_resched_lock() - if a reschedule is pending, drop the given lock, ++ * call schedule, and on return reacquire the lock. ++ * ++ * This works OK both with and without CONFIG_PREEMPT. We do strange low-level ++ * operations here to prevent schedule() from being called twice (once via ++ * spin_unlock(), once by hand). ++ */ ++int __cond_resched_lock(spinlock_t *lock) ++{ ++ int resched = should_resched(PREEMPT_LOCK_OFFSET); ++ int ret = 0; ++ ++ lockdep_assert_held(lock); ++ ++ if (spin_needbreak(lock) || resched) { ++ spin_unlock(lock); ++ if (resched) ++ preempt_schedule_common(); ++ else ++ cpu_relax(); ++ ret = 1; ++ spin_lock(lock); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(__cond_resched_lock); ++ ++int __sched __cond_resched_softirq(void) ++{ ++ BUG_ON(!in_softirq()); ++ ++ if (should_resched(SOFTIRQ_DISABLE_OFFSET)) { ++ local_bh_enable(); ++ preempt_schedule_common(); ++ local_bh_disable(); ++ return 1; ++ } ++ return 0; ++} ++EXPORT_SYMBOL(__cond_resched_softirq); ++ ++/** ++ * yield - yield the current processor to other threads. ++ * ++ * Do not ever use this function, there's a 99% chance you're doing it wrong. ++ * ++ * The scheduler is at all times free to pick the calling task as the most ++ * eligible task to run, if removing the yield() call from your code breaks ++ * it, its already broken. ++ * ++ * Typical broken usage is: ++ * ++ * while (!event) ++ * yield(); ++ * ++ * where one assumes that yield() will let 'the other' process run that will ++ * make event true. If the current task is a SCHED_FIFO task that will never ++ * happen. Never use yield() as a progress guarantee!! ++ * ++ * If you want to use yield() to wait for something, use wait_event(). ++ * If you want to use yield() to be 'nice' for others, use cond_resched(). ++ * If you still want to use yield(), do not! ++ */ ++void __sched yield(void) ++{ ++ set_current_state(TASK_RUNNING); ++ sys_sched_yield(); ++} ++EXPORT_SYMBOL(yield); ++ ++/** ++ * yield_to - yield the current processor to another thread in ++ * your thread group, or accelerate that thread toward the ++ * processor it's on. ++ * @p: target task ++ * @preempt: whether task preemption is allowed or not ++ * ++ * It's the caller's job to ensure that the target task struct ++ * can't go away on us before we can do any checks. ++ * ++ * Return: ++ * true (>0) if we indeed boosted the target task. ++ * false (0) if we failed to boost the target. ++ * -ESRCH if there's no task to yield to. ++ */ ++int __sched yield_to(struct task_struct *p, bool preempt) ++{ ++ struct rq *rq, *p_rq; ++ unsigned long flags; ++ int yielded = 0; ++ ++ rq = this_rq(); ++ grq_lock_irqsave(&flags); ++ if (task_running(p) || p->state) { ++ yielded = -ESRCH; ++ goto out_unlock; ++ } ++ ++ p_rq = task_rq(p); ++ yielded = 1; ++ if (p->deadline > rq->rq_deadline) ++ p->deadline = rq->rq_deadline; ++ p->time_slice += rq->rq_time_slice; ++ rq->rq_time_slice = 0; ++ if (p->time_slice > timeslice()) ++ p->time_slice = timeslice(); ++ if (preempt && rq != p_rq) ++ resched_curr(p_rq); ++out_unlock: ++ grq_unlock_irqrestore(&flags); ++ ++ if (yielded > 0) ++ schedule(); ++ return yielded; ++} ++EXPORT_SYMBOL_GPL(yield_to); ++ ++/* ++ * This task is about to go to sleep on IO. Increment rq->nr_iowait so ++ * that process accounting knows that this is a task in IO wait state. ++ * ++ * But don't do that if it is a deliberate, throttling IO wait (this task ++ * has set its backing_dev_info: the queue against which it should throttle) ++ */ ++ ++long __sched io_schedule_timeout(long timeout) ++{ ++ int old_iowait = current->in_iowait; ++ struct rq *rq; ++ long ret; ++ ++ current->in_iowait = 1; ++ blk_schedule_flush_plug(current); ++ ++ delayacct_blkio_start(); ++ rq = raw_rq(); ++ atomic_inc(&rq->nr_iowait); ++ ret = schedule_timeout(timeout); ++ current->in_iowait = old_iowait; ++ atomic_dec(&rq->nr_iowait); ++ delayacct_blkio_end(); ++ ++ return ret; ++} ++EXPORT_SYMBOL(io_schedule_timeout); ++ ++/** ++ * sys_sched_get_priority_max - return maximum RT priority. ++ * @policy: scheduling class. ++ * ++ * Return: On success, this syscall returns the maximum ++ * rt_priority that can be used by a given scheduling class. ++ * On failure, a negative error code is returned. ++ */ ++SYSCALL_DEFINE1(sched_get_priority_max, int, policy) ++{ ++ int ret = -EINVAL; ++ ++ switch (policy) { ++ case SCHED_FIFO: ++ case SCHED_RR: ++ ret = MAX_USER_RT_PRIO-1; ++ break; ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_ISO: ++ case SCHED_IDLEPRIO: ++ ret = 0; ++ break; ++ } ++ return ret; ++} ++ ++/** ++ * sys_sched_get_priority_min - return minimum RT priority. ++ * @policy: scheduling class. ++ * ++ * Return: On success, this syscall returns the minimum ++ * rt_priority that can be used by a given scheduling class. ++ * On failure, a negative error code is returned. ++ */ ++SYSCALL_DEFINE1(sched_get_priority_min, int, policy) ++{ ++ int ret = -EINVAL; ++ ++ switch (policy) { ++ case SCHED_FIFO: ++ case SCHED_RR: ++ ret = 1; ++ break; ++ case SCHED_NORMAL: ++ case SCHED_BATCH: ++ case SCHED_ISO: ++ case SCHED_IDLEPRIO: ++ ret = 0; ++ break; ++ } ++ return ret; ++} ++ ++/** ++ * sys_sched_rr_get_interval - return the default timeslice of a process. ++ * @pid: pid of the process. ++ * @interval: userspace pointer to the timeslice value. ++ * ++ * ++ * Return: On success, 0 and the timeslice is in @interval. Otherwise, ++ * an error code. ++ */ ++SYSCALL_DEFINE2(sched_rr_get_interval, pid_t, pid, ++ struct timespec __user *, interval) ++{ ++ struct task_struct *p; ++ unsigned int time_slice; ++ unsigned long flags; ++ int retval; ++ struct timespec t; ++ ++ if (pid < 0) ++ return -EINVAL; ++ ++ retval = -ESRCH; ++ rcu_read_lock(); ++ p = find_process_by_pid(pid); ++ if (!p) ++ goto out_unlock; ++ ++ retval = security_task_getscheduler(p); ++ if (retval) ++ goto out_unlock; ++ ++ grq_lock_irqsave(&flags); ++ time_slice = p->policy == SCHED_FIFO ? 0 : MS_TO_NS(task_timeslice(p)); ++ grq_unlock_irqrestore(&flags); ++ ++ rcu_read_unlock(); ++ t = ns_to_timespec(time_slice); ++ retval = copy_to_user(interval, &t, sizeof(t)) ? -EFAULT : 0; ++ return retval; ++ ++out_unlock: ++ rcu_read_unlock(); ++ return retval; ++} ++ ++static const char stat_nam[] = TASK_STATE_TO_CHAR_STR; ++ ++void sched_show_task(struct task_struct *p) ++{ ++ unsigned long free = 0; ++ int ppid; ++ unsigned long state = p->state; ++ ++ if (state) ++ state = __ffs(state) + 1; ++ printk(KERN_INFO "%-15.15s %c", p->comm, ++ state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); ++#if BITS_PER_LONG == 32 ++ if (state == TASK_RUNNING) ++ printk(KERN_CONT " running "); ++ else ++ printk(KERN_CONT " %08lx ", thread_saved_pc(p)); ++#else ++ if (state == TASK_RUNNING) ++ printk(KERN_CONT " running task "); ++ else ++ printk(KERN_CONT " %016lx ", thread_saved_pc(p)); ++#endif ++#ifdef CONFIG_DEBUG_STACK_USAGE ++ free = stack_not_used(p); ++#endif ++ ppid = 0; ++ rcu_read_lock(); ++ if (pid_alive(p)) ++ ppid = task_pid_nr(rcu_dereference(p->real_parent)); ++ rcu_read_unlock(); ++ printk(KERN_CONT "%5lu %5d %6d 0x%08lx\n", free, ++ task_pid_nr(p), ppid, ++ (unsigned long)task_thread_info(p)->flags); ++ ++ print_worker_info(KERN_INFO, p); ++ show_stack(p, NULL); ++} ++ ++void show_state_filter(unsigned long state_filter) ++{ ++ struct task_struct *g, *p; ++ ++#if BITS_PER_LONG == 32 ++ printk(KERN_INFO ++ " task PC stack pid father\n"); ++#else ++ printk(KERN_INFO ++ " task PC stack pid father\n"); ++#endif ++ rcu_read_lock(); ++ for_each_process_thread(g, p) { ++ /* ++ * reset the NMI-timeout, listing all files on a slow ++ * console might take a lot of time: ++ */ ++ touch_nmi_watchdog(); ++ if (!state_filter || (p->state & state_filter)) ++ sched_show_task(p); ++ } ++ ++ touch_all_softlockup_watchdogs(); ++ ++ rcu_read_unlock(); ++ /* ++ * Only show locks if all tasks are dumped: ++ */ ++ if (!state_filter) ++ debug_show_all_locks(); ++} ++ ++void dump_cpu_task(int cpu) ++{ ++ pr_info("Task dump for CPU %d:\n", cpu); ++ sched_show_task(cpu_curr(cpu)); ++} ++ ++#ifdef CONFIG_SMP ++void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ cpumask_copy(&p->cpus_allowed, new_mask); ++ p->nr_cpus_allowed = cpumask_weight(new_mask); ++} ++ ++void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ cpumask_copy(tsk_cpus_allowed(p), new_mask); ++} ++#endif ++ ++/** ++ * init_idle - set up an idle thread for a given CPU ++ * @idle: task in question ++ * @cpu: cpu the idle task belongs to ++ * ++ * NOTE: this function does not set the idle thread's NEED_RESCHED ++ * flag, to make booting more robust. ++ */ ++void init_idle(struct task_struct *idle, int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&idle->pi_lock, flags); ++ time_lock_grq(rq); ++ idle->last_ran = rq->clock_task; ++ idle->state = TASK_RUNNING; ++ /* Setting prio to illegal value shouldn't matter when never queued */ ++ idle->prio = PRIO_LIMIT; ++#ifdef CONFIG_SMT_NICE ++ idle->smt_bias = 0; ++#endif ++ set_rq_task(rq, idle); ++ do_set_cpus_allowed(idle, get_cpu_mask(cpu)); ++ /* Silence PROVE_RCU */ ++ rcu_read_lock(); ++ set_task_cpu(idle, cpu); ++ rcu_read_unlock(); ++ rq->curr = rq->idle = idle; ++ idle->on_cpu = 1; ++ grq_unlock(); ++ raw_spin_unlock_irqrestore(&idle->pi_lock, flags); ++ ++ /* Set the preempt count _outside_ the spinlocks! */ ++ init_idle_preempt_count(idle, cpu); ++ ++ ftrace_graph_init_idle_task(idle, cpu); ++#ifdef CONFIG_SMP ++ sprintf(idle->comm, "%s/%d", INIT_TASK_COMM, cpu); ++#endif ++} ++ ++int cpuset_cpumask_can_shrink(const struct cpumask __maybe_unused *cur, ++ const struct cpumask __maybe_unused *trial) ++{ ++ return 1; ++} ++ ++int task_can_attach(struct task_struct *p, ++ const struct cpumask *cs_cpus_allowed) ++{ ++ int ret = 0; ++ ++ /* ++ * Kthreads which disallow setaffinity shouldn't be moved ++ * to a new cpuset; we don't want to change their cpu ++ * affinity and isolating such threads by their set of ++ * allowed nodes is unnecessary. Thus, cpusets are not ++ * applicable for such threads. This prevents checking for ++ * success of set_cpus_allowed_ptr() on all attached tasks ++ * before cpus_allowed may be changed. ++ */ ++ if (p->flags & PF_NO_SETAFFINITY) ++ ret = -EINVAL; ++ ++ return ret; ++} ++ ++void wake_q_add(struct wake_q_head *head, struct task_struct *task) ++{ ++ struct wake_q_node *node = &task->wake_q; ++ ++ /* ++ * Atomically grab the task, if ->wake_q is !nil already it means ++ * its already queued (either by us or someone else) and will get the ++ * wakeup due to that. ++ * ++ * This cmpxchg() implies a full barrier, which pairs with the write ++ * barrier implied by the wakeup in wake_up_list(). ++ */ ++ if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL)) ++ return; ++ ++ get_task_struct(task); ++ ++ /* ++ * The head is context local, there can be no concurrency. ++ */ ++ *head->lastp = node; ++ head->lastp = &node->next; ++} ++ ++void wake_up_q(struct wake_q_head *head) ++{ ++ struct wake_q_node *node = head->first; ++ ++ while (node != WAKE_Q_TAIL) { ++ struct task_struct *task; ++ ++ task = container_of(node, struct task_struct, wake_q); ++ BUG_ON(!task); ++ /* task can safely be re-inserted now */ ++ node = node->next; ++ task->wake_q.next = NULL; ++ ++ /* ++ * wake_up_process() implies a wmb() to pair with the queueing ++ * in wake_q_add() so as not to miss wakeups. ++ */ ++ wake_up_process(task); ++ put_task_struct(task); ++ } ++} ++ ++void resched_cpu(int cpu) ++{ ++ unsigned long flags; ++ ++ grq_lock_irqsave(&flags); ++ resched_task(cpu_curr(cpu)); ++ grq_unlock_irqrestore(&flags); ++} ++ ++#ifdef CONFIG_SMP ++#ifdef CONFIG_NO_HZ_COMMON ++void nohz_balance_enter_idle(int cpu) ++{ ++} ++ ++void select_nohz_load_balancer(int stop_tick) ++{ ++} ++ ++void set_cpu_sd_state_idle(void) {} ++#if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT) ++/** ++ * lowest_flag_domain - Return lowest sched_domain containing flag. ++ * @cpu: The cpu whose lowest level of sched domain is to ++ * be returned. ++ * @flag: The flag to check for the lowest sched_domain ++ * for the given cpu. ++ * ++ * Returns the lowest sched_domain of a cpu which contains the given flag. ++ */ ++static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) ++{ ++ struct sched_domain *sd; ++ ++ for_each_domain(cpu, sd) ++ if (sd && (sd->flags & flag)) ++ break; ++ ++ return sd; ++} ++ ++/** ++ * for_each_flag_domain - Iterates over sched_domains containing the flag. ++ * @cpu: The cpu whose domains we're iterating over. ++ * @sd: variable holding the value of the power_savings_sd ++ * for cpu. ++ * @flag: The flag to filter the sched_domains to be iterated. ++ * ++ * Iterates over all the scheduler domains for a given cpu that has the 'flag' ++ * set, starting from the lowest sched_domain to the highest. ++ */ ++#define for_each_flag_domain(cpu, sd, flag) \ ++ for (sd = lowest_flag_domain(cpu, flag); \ ++ (sd && (sd->flags & flag)); sd = sd->parent) ++ ++#endif /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */ ++ ++/* ++ * In the semi idle case, use the nearest busy cpu for migrating timers ++ * from an idle cpu. This is good for power-savings. ++ * ++ * We don't do similar optimization for completely idle system, as ++ * selecting an idle cpu will add more delays to the timers than intended ++ * (as that cpu's timer base may not be uptodate wrt jiffies etc). ++ */ ++int get_nohz_timer_target(void) ++{ ++ int i, cpu = smp_processor_id(); ++ struct sched_domain *sd; ++ ++ if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu)) ++ return cpu; ++ ++ rcu_read_lock(); ++ for_each_domain(cpu, sd) { ++ for_each_cpu(i, sched_domain_span(sd)) { ++ if (!idle_cpu(i) && is_housekeeping_cpu(cpu)) { ++ cpu = i; ++ goto unlock; ++ } ++ } ++ } ++ ++ if (!is_housekeeping_cpu(cpu)) ++ cpu = housekeeping_any_cpu(); ++unlock: ++ rcu_read_unlock(); ++ return cpu; ++} ++ ++/* ++ * When add_timer_on() enqueues a timer into the timer wheel of an ++ * idle CPU then this timer might expire before the next timer event ++ * which is scheduled to wake up that CPU. In case of a completely ++ * idle system the next event might even be infinite time into the ++ * future. wake_up_idle_cpu() ensures that the CPU is woken up and ++ * leaves the inner idle loop so the newly added timer is taken into ++ * account when the CPU goes back to idle and evaluates the timer ++ * wheel for the next timer event. ++ */ ++void wake_up_idle_cpu(int cpu) ++{ ++ if (cpu == smp_processor_id()) ++ return; ++ ++ set_tsk_need_resched(cpu_rq(cpu)->idle); ++ smp_send_reschedule(cpu); ++} ++ ++void wake_up_nohz_cpu(int cpu) ++{ ++ wake_up_idle_cpu(cpu); ++} ++#endif /* CONFIG_NO_HZ_COMMON */ ++ ++/* ++ * Change a given task's CPU affinity. Migrate the thread to a ++ * proper CPU and schedule it away if the CPU it's executing on ++ * is removed from the allowed bitmask. ++ * ++ * NOTE: the caller must have a valid reference to the task, the ++ * task must not exit() & deallocate itself prematurely. The ++ * call is not atomic; no spinlocks may be held. ++ */ ++static int __set_cpus_allowed_ptr(struct task_struct *p, ++ const struct cpumask *new_mask, bool check) ++{ ++ bool running_wrong = false; ++ bool queued = false; ++ unsigned long flags; ++ struct rq *rq; ++ int ret = 0; ++ ++ rq = task_grq_lock(p, &flags); ++ ++ /* ++ * Must re-check here, to close a race against __kthread_bind(), ++ * sched_setaffinity() is not guaranteed to observe the flag. ++ */ ++ if (check && (p->flags & PF_NO_SETAFFINITY)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ if (cpumask_equal(tsk_cpus_allowed(p), new_mask)) ++ goto out; ++ ++ if (!cpumask_intersects(new_mask, cpu_active_mask)) { ++ ret = -EINVAL; ++ goto out; ++ } ++ ++ queued = task_queued(p); ++ ++ do_set_cpus_allowed(p, new_mask); ++ ++ /* Can the task run on the task's current CPU? If so, we're done */ ++ if (cpumask_test_cpu(task_cpu(p), new_mask)) ++ goto out; ++ ++ if (task_running(p)) { ++ /* Task is running on the wrong cpu now, reschedule it. */ ++ if (rq == this_rq()) { ++ set_tsk_need_resched(p); ++ running_wrong = true; ++ } else ++ resched_task(p); ++ } else ++ set_task_cpu(p, cpumask_any_and(cpu_active_mask, new_mask)); ++ ++out: ++ if (queued) ++ try_preempt(p, rq); ++ task_grq_unlock(&flags); ++ ++ if (running_wrong) ++ preempt_schedule_common(); ++ ++ return ret; ++} ++ ++int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) ++{ ++ return __set_cpus_allowed_ptr(p, new_mask, false); ++} ++EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr); ++ ++#ifdef CONFIG_HOTPLUG_CPU ++/* Run through task list and find tasks affined to the dead cpu, then remove ++ * that cpu from the list, enable cpu0 and set the zerobound flag. */ ++static void bind_zero(int src_cpu) ++{ ++ struct task_struct *p, *t; ++ int bound = 0; ++ ++ if (src_cpu == 0) ++ return; ++ ++ do_each_thread(t, p) { ++ if (cpumask_test_cpu(src_cpu, tsk_cpus_allowed(p))) { ++ cpumask_clear_cpu(src_cpu, tsk_cpus_allowed(p)); ++ cpumask_set_cpu(0, tsk_cpus_allowed(p)); ++ p->zerobound = true; ++ bound++; ++ } ++ clear_sticky(p); ++ } while_each_thread(t, p); ++ ++ if (bound) { ++ printk(KERN_INFO "Removed affinity for %d processes to cpu %d\n", ++ bound, src_cpu); ++ } ++} ++ ++/* Find processes with the zerobound flag and reenable their affinity for the ++ * CPU coming alive. */ ++static void unbind_zero(int src_cpu) ++{ ++ int unbound = 0, zerobound = 0; ++ struct task_struct *p, *t; ++ ++ if (src_cpu == 0) ++ return; ++ ++ do_each_thread(t, p) { ++ if (!p->mm) ++ p->zerobound = false; ++ if (p->zerobound) { ++ unbound++; ++ cpumask_set_cpu(src_cpu, tsk_cpus_allowed(p)); ++ /* Once every CPU affinity has been re-enabled, remove ++ * the zerobound flag */ ++ if (cpumask_subset(cpu_possible_mask, tsk_cpus_allowed(p))) { ++ p->zerobound = false; ++ zerobound++; ++ } ++ } ++ } while_each_thread(t, p); ++ ++ if (unbound) { ++ printk(KERN_INFO "Added affinity for %d processes to cpu %d\n", ++ unbound, src_cpu); ++ } ++ if (zerobound) { ++ printk(KERN_INFO "Released forced binding to cpu0 for %d processes\n", ++ zerobound); ++ } ++} ++ ++/* ++ * Ensures that the idle task is using init_mm right before its cpu goes ++ * offline. ++ */ ++void idle_task_exit(void) ++{ ++ struct mm_struct *mm = current->active_mm; ++ ++ BUG_ON(cpu_online(smp_processor_id())); ++ ++ if (mm != &init_mm) { ++ switch_mm(mm, &init_mm, current); ++ finish_arch_post_lock_switch(); ++ } ++ mmdrop(mm); ++} ++#else /* CONFIG_HOTPLUG_CPU */ ++static void unbind_zero(int src_cpu) {} ++#endif /* CONFIG_HOTPLUG_CPU */ ++ ++void sched_set_stop_task(int cpu, struct task_struct *stop) ++{ ++ struct sched_param stop_param = { .sched_priority = STOP_PRIO }; ++ struct sched_param start_param = { .sched_priority = 0 }; ++ struct task_struct *old_stop = cpu_rq(cpu)->stop; ++ ++ if (stop) { ++ /* ++ * Make it appear like a SCHED_FIFO task, its something ++ * userspace knows about and won't get confused about. ++ * ++ * Also, it will make PI more or less work without too ++ * much confusion -- but then, stop work should not ++ * rely on PI working anyway. ++ */ ++ sched_setscheduler_nocheck(stop, SCHED_FIFO, &stop_param); ++ } ++ ++ cpu_rq(cpu)->stop = stop; ++ ++ if (old_stop) { ++ /* ++ * Reset it back to a normal scheduling policy so that ++ * it can die in pieces. ++ */ ++ sched_setscheduler_nocheck(old_stop, SCHED_NORMAL, &start_param); ++ } ++} ++ ++ ++#if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) ++ ++static struct ctl_table sd_ctl_dir[] = { ++ { ++ .procname = "sched_domain", ++ .mode = 0555, ++ }, ++ {} ++}; ++ ++static struct ctl_table sd_ctl_root[] = { ++ { ++ .procname = "kernel", ++ .mode = 0555, ++ .child = sd_ctl_dir, ++ }, ++ {} ++}; ++ ++static struct ctl_table *sd_alloc_ctl_entry(int n) ++{ ++ struct ctl_table *entry = ++ kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL); ++ ++ return entry; ++} ++ ++static void sd_free_ctl_entry(struct ctl_table **tablep) ++{ ++ struct ctl_table *entry; ++ ++ /* ++ * In the intermediate directories, both the child directory and ++ * procname are dynamically allocated and could fail but the mode ++ * will always be set. In the lowest directory the names are ++ * static strings and all have proc handlers. ++ */ ++ for (entry = *tablep; entry->mode; entry++) { ++ if (entry->child) ++ sd_free_ctl_entry(&entry->child); ++ if (entry->proc_handler == NULL) ++ kfree(entry->procname); ++ } ++ ++ kfree(*tablep); ++ *tablep = NULL; ++} ++ ++static void ++set_table_entry(struct ctl_table *entry, ++ const char *procname, void *data, int maxlen, ++ mode_t mode, proc_handler *proc_handler) ++{ ++ entry->procname = procname; ++ entry->data = data; ++ entry->maxlen = maxlen; ++ entry->mode = mode; ++ entry->proc_handler = proc_handler; ++} ++ ++static struct ctl_table * ++sd_alloc_ctl_domain_table(struct sched_domain *sd) ++{ ++ struct ctl_table *table = sd_alloc_ctl_entry(14); ++ ++ if (table == NULL) ++ return NULL; ++ ++ set_table_entry(&table[0], "min_interval", &sd->min_interval, ++ sizeof(long), 0644, proc_doulongvec_minmax); ++ set_table_entry(&table[1], "max_interval", &sd->max_interval, ++ sizeof(long), 0644, proc_doulongvec_minmax); ++ set_table_entry(&table[2], "busy_idx", &sd->busy_idx, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[3], "idle_idx", &sd->idle_idx, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[4], "newidle_idx", &sd->newidle_idx, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[5], "wake_idx", &sd->wake_idx, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[6], "forkexec_idx", &sd->forkexec_idx, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[7], "busy_factor", &sd->busy_factor, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[8], "imbalance_pct", &sd->imbalance_pct, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[9], "cache_nice_tries", ++ &sd->cache_nice_tries, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[10], "flags", &sd->flags, ++ sizeof(int), 0644, proc_dointvec_minmax); ++ set_table_entry(&table[11], "max_newidle_lb_cost", ++ &sd->max_newidle_lb_cost, ++ sizeof(long), 0644, proc_doulongvec_minmax); ++ set_table_entry(&table[12], "name", sd->name, ++ CORENAME_MAX_SIZE, 0444, proc_dostring); ++ /* &table[13] is terminator */ ++ ++ return table; ++} ++ ++static struct ctl_table *sd_alloc_ctl_cpu_table(int cpu) ++{ ++ struct ctl_table *entry, *table; ++ struct sched_domain *sd; ++ int domain_num = 0, i; ++ char buf[32]; ++ ++ for_each_domain(cpu, sd) ++ domain_num++; ++ entry = table = sd_alloc_ctl_entry(domain_num + 1); ++ if (table == NULL) ++ return NULL; ++ ++ i = 0; ++ for_each_domain(cpu, sd) { ++ snprintf(buf, 32, "domain%d", i); ++ entry->procname = kstrdup(buf, GFP_KERNEL); ++ entry->mode = 0555; ++ entry->child = sd_alloc_ctl_domain_table(sd); ++ entry++; ++ i++; ++ } ++ return table; ++} ++ ++static struct ctl_table_header *sd_sysctl_header; ++static void register_sched_domain_sysctl(void) ++{ ++ int i, cpu_num = num_possible_cpus(); ++ struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1); ++ char buf[32]; ++ ++ WARN_ON(sd_ctl_dir[0].child); ++ sd_ctl_dir[0].child = entry; ++ ++ if (entry == NULL) ++ return; ++ ++ for_each_possible_cpu(i) { ++ snprintf(buf, 32, "cpu%d", i); ++ entry->procname = kstrdup(buf, GFP_KERNEL); ++ entry->mode = 0555; ++ entry->child = sd_alloc_ctl_cpu_table(i); ++ entry++; ++ } ++ ++ WARN_ON(sd_sysctl_header); ++ sd_sysctl_header = register_sysctl_table(sd_ctl_root); ++} ++ ++/* may be called multiple times per register */ ++static void unregister_sched_domain_sysctl(void) ++{ ++ unregister_sysctl_table(sd_sysctl_header); ++ sd_sysctl_header = NULL; ++ if (sd_ctl_dir[0].child) ++ sd_free_ctl_entry(&sd_ctl_dir[0].child); ++} ++#else /* CONFIG_SCHED_DEBUG && CONFIG_SYSCTL */ ++static void register_sched_domain_sysctl(void) ++{ ++} ++static void unregister_sched_domain_sysctl(void) ++{ ++} ++#endif /* CONFIG_SCHED_DEBUG && CONFIG_SYSCTL */ ++ ++static void set_rq_online(struct rq *rq) ++{ ++ if (!rq->online) { ++ cpumask_set_cpu(cpu_of(rq), rq->rd->online); ++ rq->online = true; ++ } ++} ++ ++static void set_rq_offline(struct rq *rq) ++{ ++ if (rq->online) { ++ cpumask_clear_cpu(cpu_of(rq), rq->rd->online); ++ rq->online = false; ++ } ++} ++ ++/* ++ * migration_call - callback that gets triggered when a CPU is added. ++ */ ++static int ++migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu) ++{ ++ int cpu = (long)hcpu; ++ unsigned long flags; ++ struct rq *rq = cpu_rq(cpu); ++#ifdef CONFIG_HOTPLUG_CPU ++ struct task_struct *idle = rq->idle; ++#endif ++ ++ switch (action & ~CPU_TASKS_FROZEN) { ++ case CPU_STARTING: ++ return NOTIFY_OK; ++ case CPU_UP_PREPARE: ++ break; ++ ++ case CPU_ONLINE: ++ /* Update our root-domain */ ++ grq_lock_irqsave(&flags); ++ if (rq->rd) { ++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); ++ ++ set_rq_online(rq); ++ } ++ unbind_zero(cpu); ++ grq.noc = num_online_cpus(); ++ grq_unlock_irqrestore(&flags); ++ break; ++ ++#ifdef CONFIG_HOTPLUG_CPU ++ case CPU_DEAD: ++ grq_lock_irq(); ++ set_rq_task(rq, idle); ++ update_clocks(rq); ++ grq_unlock_irq(); ++ break; ++ ++ case CPU_DYING: ++ /* Update our root-domain */ ++ grq_lock_irqsave(&flags); ++ if (rq->rd) { ++ BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span)); ++ set_rq_offline(rq); ++ } ++ bind_zero(cpu); ++ grq.noc = num_online_cpus(); ++ grq_unlock_irqrestore(&flags); ++ break; ++#endif ++ } ++ return NOTIFY_OK; ++} ++ ++/* ++ * Register at high priority so that task migration (migrate_all_tasks) ++ * happens before everything else. This has to be lower priority than ++ * the notifier in the perf_counter subsystem, though. ++ */ ++static struct notifier_block migration_notifier = { ++ .notifier_call = migration_call, ++ .priority = CPU_PRI_MIGRATION, ++}; ++ ++static int sched_cpu_active(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ int cpu = (long)hcpu; ++ ++ switch (action & ~CPU_TASKS_FROZEN) { ++ case CPU_STARTING: ++ return NOTIFY_OK; ++ case CPU_ONLINE: ++ /* ++ * At this point a starting CPU has marked itself as online via ++ * set_cpu_online(). But it might not yet have marked itself ++ * as active, which is essential from here on. ++ */ ++ set_cpu_active(cpu, true); ++ stop_machine_unpark(cpu); ++ return NOTIFY_OK; ++ ++ case CPU_DOWN_FAILED: ++ set_cpu_active(cpu, true); ++ return NOTIFY_OK; ++ default: ++ return NOTIFY_DONE; ++ } ++} ++ ++static int sched_cpu_inactive(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ switch (action & ~CPU_TASKS_FROZEN) { ++ case CPU_DOWN_PREPARE: ++ set_cpu_active((long)hcpu, false); ++ return NOTIFY_OK; ++ default: ++ return NOTIFY_DONE; ++ } ++} ++ ++int __init migration_init(void) ++{ ++ void *cpu = (void *)(long)smp_processor_id(); ++ int err; ++ ++ /* Initialise migration for the boot CPU */ ++ err = migration_call(&migration_notifier, CPU_UP_PREPARE, cpu); ++ BUG_ON(err == NOTIFY_BAD); ++ migration_call(&migration_notifier, CPU_ONLINE, cpu); ++ register_cpu_notifier(&migration_notifier); ++ ++ /* Register cpu active notifiers */ ++ cpu_notifier(sched_cpu_active, CPU_PRI_SCHED_ACTIVE); ++ cpu_notifier(sched_cpu_inactive, CPU_PRI_SCHED_INACTIVE); ++ ++ return 0; ++} ++early_initcall(migration_init); ++ ++static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */ ++ ++#ifdef CONFIG_SCHED_DEBUG ++ ++static __read_mostly int sched_debug_enabled; ++ ++static int __init sched_debug_setup(char *str) ++{ ++ sched_debug_enabled = 1; ++ ++ return 0; ++} ++early_param("sched_debug", sched_debug_setup); ++ ++static inline bool sched_debug(void) ++{ ++ return sched_debug_enabled; ++} ++ ++static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, ++ struct cpumask *groupmask) ++{ ++ cpumask_clear(groupmask); ++ ++ printk(KERN_DEBUG "%*s domain %d: ", level, "", level); ++ ++ if (!(sd->flags & SD_LOAD_BALANCE)) { ++ printk("does not load-balance\n"); ++ if (sd->parent) ++ printk(KERN_ERR "ERROR: !SD_LOAD_BALANCE domain" ++ " has parent"); ++ return -1; ++ } ++ ++ printk(KERN_CONT "span %*pbl level %s\n", ++ cpumask_pr_args(sched_domain_span(sd)), sd->name); ++ ++ if (!cpumask_test_cpu(cpu, sched_domain_span(sd))) { ++ printk(KERN_ERR "ERROR: domain->span does not contain " ++ "CPU%d\n", cpu); ++ } ++ ++ printk(KERN_CONT "\n"); ++ ++ if (!cpumask_equal(sched_domain_span(sd), groupmask)) ++ printk(KERN_ERR "ERROR: groups don't span domain->span\n"); ++ ++ if (sd->parent && ++ !cpumask_subset(groupmask, sched_domain_span(sd->parent))) ++ printk(KERN_ERR "ERROR: parent span is not a superset " ++ "of domain->span\n"); ++ return 0; ++} ++ ++static void sched_domain_debug(struct sched_domain *sd, int cpu) ++{ ++ int level = 0; ++ ++ if (!sched_debug_enabled) ++ return; ++ ++ if (!sd) { ++ printk(KERN_DEBUG "CPU%d attaching NULL sched-domain.\n", cpu); ++ return; ++ } ++ ++ printk(KERN_DEBUG "CPU%d attaching sched-domain:\n", cpu); ++ ++ for (;;) { ++ if (sched_domain_debug_one(sd, cpu, level, sched_domains_tmpmask)) ++ break; ++ level++; ++ sd = sd->parent; ++ if (!sd) ++ break; ++ } ++} ++#else /* !CONFIG_SCHED_DEBUG */ ++# define sched_domain_debug(sd, cpu) do { } while (0) ++static inline bool sched_debug(void) ++{ ++ return false; ++} ++#endif /* CONFIG_SCHED_DEBUG */ ++ ++static int sd_degenerate(struct sched_domain *sd) ++{ ++ if (cpumask_weight(sched_domain_span(sd)) == 1) ++ return 1; ++ ++ /* Following flags don't use groups */ ++ if (sd->flags & (SD_WAKE_AFFINE)) ++ return 0; ++ ++ return 1; ++} ++ ++static int ++sd_parent_degenerate(struct sched_domain *sd, struct sched_domain *parent) ++{ ++ unsigned long cflags = sd->flags, pflags = parent->flags; ++ ++ if (sd_degenerate(parent)) ++ return 1; ++ ++ if (!cpumask_equal(sched_domain_span(sd), sched_domain_span(parent))) ++ return 0; ++ ++ if (~cflags & pflags) ++ return 0; ++ ++ return 1; ++} ++ ++static void free_rootdomain(struct rcu_head *rcu) ++{ ++ struct root_domain *rd = container_of(rcu, struct root_domain, rcu); ++ ++ cpupri_cleanup(&rd->cpupri); ++ free_cpumask_var(rd->rto_mask); ++ free_cpumask_var(rd->online); ++ free_cpumask_var(rd->span); ++ kfree(rd); ++} ++ ++static void rq_attach_root(struct rq *rq, struct root_domain *rd) ++{ ++ struct root_domain *old_rd = NULL; ++ unsigned long flags; ++ ++ grq_lock_irqsave(&flags); ++ ++ if (rq->rd) { ++ old_rd = rq->rd; ++ ++ if (cpumask_test_cpu(rq->cpu, old_rd->online)) ++ set_rq_offline(rq); ++ ++ cpumask_clear_cpu(rq->cpu, old_rd->span); ++ ++ /* ++ * If we dont want to free the old_rd yet then ++ * set old_rd to NULL to skip the freeing later ++ * in this function: ++ */ ++ if (!atomic_dec_and_test(&old_rd->refcount)) ++ old_rd = NULL; ++ } ++ ++ atomic_inc(&rd->refcount); ++ rq->rd = rd; ++ ++ cpumask_set_cpu(rq->cpu, rd->span); ++ if (cpumask_test_cpu(rq->cpu, cpu_active_mask)) ++ set_rq_online(rq); ++ ++ grq_unlock_irqrestore(&flags); ++ ++ if (old_rd) ++ call_rcu_sched(&old_rd->rcu, free_rootdomain); ++} ++ ++static int init_rootdomain(struct root_domain *rd) ++{ ++ memset(rd, 0, sizeof(*rd)); ++ ++ if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL)) ++ goto out; ++ if (!zalloc_cpumask_var(&rd->online, GFP_KERNEL)) ++ goto free_span; ++ if (!zalloc_cpumask_var(&rd->rto_mask, GFP_KERNEL)) ++ goto free_online; ++ ++ if (cpupri_init(&rd->cpupri) != 0) ++ goto free_rto_mask; ++ return 0; ++ ++free_rto_mask: ++ free_cpumask_var(rd->rto_mask); ++free_online: ++ free_cpumask_var(rd->online); ++free_span: ++ free_cpumask_var(rd->span); ++out: ++ return -ENOMEM; ++} ++ ++static void init_defrootdomain(void) ++{ ++ init_rootdomain(&def_root_domain); ++ ++ atomic_set(&def_root_domain.refcount, 1); ++} ++ ++static struct root_domain *alloc_rootdomain(void) ++{ ++ struct root_domain *rd; ++ ++ rd = kmalloc(sizeof(*rd), GFP_KERNEL); ++ if (!rd) ++ return NULL; ++ ++ if (init_rootdomain(rd) != 0) { ++ kfree(rd); ++ return NULL; ++ } ++ ++ return rd; ++} ++ ++static void free_sched_domain(struct rcu_head *rcu) ++{ ++ struct sched_domain *sd = container_of(rcu, struct sched_domain, rcu); ++ ++ kfree(sd); ++} ++ ++static void destroy_sched_domain(struct sched_domain *sd, int cpu) ++{ ++ call_rcu(&sd->rcu, free_sched_domain); ++} ++ ++static void destroy_sched_domains(struct sched_domain *sd, int cpu) ++{ ++ for (; sd; sd = sd->parent) ++ destroy_sched_domain(sd, cpu); ++} ++ ++/* ++ * Attach the domain 'sd' to 'cpu' as its base domain. Callers must ++ * hold the hotplug lock. ++ */ ++static void ++cpu_attach_domain(struct sched_domain *sd, struct root_domain *rd, int cpu) ++{ ++ struct rq *rq = cpu_rq(cpu); ++ struct sched_domain *tmp; ++ ++ /* Remove the sched domains which do not contribute to scheduling. */ ++ for (tmp = sd; tmp; ) { ++ struct sched_domain *parent = tmp->parent; ++ if (!parent) ++ break; ++ ++ if (sd_parent_degenerate(tmp, parent)) { ++ tmp->parent = parent->parent; ++ if (parent->parent) ++ parent->parent->child = tmp; ++ /* ++ * Transfer SD_PREFER_SIBLING down in case of a ++ * degenerate parent; the spans match for this ++ * so the property transfers. ++ */ ++ if (parent->flags & SD_PREFER_SIBLING) ++ tmp->flags |= SD_PREFER_SIBLING; ++ destroy_sched_domain(parent, cpu); ++ } else ++ tmp = tmp->parent; ++ } ++ ++ if (sd && sd_degenerate(sd)) { ++ tmp = sd; ++ sd = sd->parent; ++ destroy_sched_domain(tmp, cpu); ++ if (sd) ++ sd->child = NULL; ++ } ++ ++ sched_domain_debug(sd, cpu); ++ ++ rq_attach_root(rq, rd); ++ tmp = rq->sd; ++ rcu_assign_pointer(rq->sd, sd); ++ destroy_sched_domains(tmp, cpu); ++} ++ ++/* Setup the mask of cpus configured for isolated domains */ ++static int __init isolated_cpu_setup(char *str) ++{ ++ alloc_bootmem_cpumask_var(&cpu_isolated_map); ++ cpulist_parse(str, cpu_isolated_map); ++ return 1; ++} ++ ++__setup("isolcpus=", isolated_cpu_setup); ++ ++struct s_data { ++ struct sched_domain ** __percpu sd; ++ struct root_domain *rd; ++}; ++ ++enum s_alloc { ++ sa_rootdomain, ++ sa_sd, ++ sa_sd_storage, ++ sa_none, ++}; ++ ++/* ++ * Initializers for schedule domains ++ * Non-inlined to reduce accumulated stack pressure in build_sched_domains() ++ */ ++ ++static int default_relax_domain_level = -1; ++int sched_domain_level_max; ++ ++static int __init setup_relax_domain_level(char *str) ++{ ++ if (kstrtoint(str, 0, &default_relax_domain_level)) ++ pr_warn("Unable to set relax_domain_level\n"); ++ ++ return 1; ++} ++__setup("relax_domain_level=", setup_relax_domain_level); ++ ++static void set_domain_attribute(struct sched_domain *sd, ++ struct sched_domain_attr *attr) ++{ ++ int request; ++ ++ if (!attr || attr->relax_domain_level < 0) { ++ if (default_relax_domain_level < 0) ++ return; ++ else ++ request = default_relax_domain_level; ++ } else ++ request = attr->relax_domain_level; ++ if (request < sd->level) { ++ /* turn off idle balance on this domain */ ++ sd->flags &= ~(SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); ++ } else { ++ /* turn on idle balance on this domain */ ++ sd->flags |= (SD_BALANCE_WAKE|SD_BALANCE_NEWIDLE); ++ } ++} ++ ++static void __sdt_free(const struct cpumask *cpu_map); ++static int __sdt_alloc(const struct cpumask *cpu_map); ++ ++static void __free_domain_allocs(struct s_data *d, enum s_alloc what, ++ const struct cpumask *cpu_map) ++{ ++ switch (what) { ++ case sa_rootdomain: ++ if (!atomic_read(&d->rd->refcount)) ++ free_rootdomain(&d->rd->rcu); /* fall through */ ++ case sa_sd: ++ free_percpu(d->sd); /* fall through */ ++ case sa_sd_storage: ++ __sdt_free(cpu_map); /* fall through */ ++ case sa_none: ++ break; ++ } ++} ++ ++static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, ++ const struct cpumask *cpu_map) ++{ ++ memset(d, 0, sizeof(*d)); ++ ++ if (__sdt_alloc(cpu_map)) ++ return sa_sd_storage; ++ d->sd = alloc_percpu(struct sched_domain *); ++ if (!d->sd) ++ return sa_sd_storage; ++ d->rd = alloc_rootdomain(); ++ if (!d->rd) ++ return sa_sd; ++ return sa_rootdomain; ++} ++ ++/* ++ * NULL the sd_data elements we've used to build the sched_domain ++ * structure so that the subsequent __free_domain_allocs() ++ * will not free the data we're using. ++ */ ++static void claim_allocations(int cpu, struct sched_domain *sd) ++{ ++ struct sd_data *sdd = sd->private; ++ ++ WARN_ON_ONCE(*per_cpu_ptr(sdd->sd, cpu) != sd); ++ *per_cpu_ptr(sdd->sd, cpu) = NULL; ++} ++ ++#ifdef CONFIG_NUMA ++static int sched_domains_numa_levels; ++static int *sched_domains_numa_distance; ++static struct cpumask ***sched_domains_numa_masks; ++static int sched_domains_curr_level; ++#endif ++ ++/* ++ * SD_flags allowed in topology descriptions. ++ * ++ * SD_SHARE_CPUCAPACITY - describes SMT topologies ++ * SD_SHARE_PKG_RESOURCES - describes shared caches ++ * SD_NUMA - describes NUMA topologies ++ * SD_SHARE_POWERDOMAIN - describes shared power domain ++ * ++ * Odd one out: ++ * SD_ASYM_PACKING - describes SMT quirks ++ */ ++#define TOPOLOGY_SD_FLAGS \ ++ (SD_SHARE_CPUCAPACITY | \ ++ SD_SHARE_PKG_RESOURCES | \ ++ SD_NUMA | \ ++ SD_ASYM_PACKING | \ ++ SD_SHARE_POWERDOMAIN) ++ ++static struct sched_domain * ++sd_init(struct sched_domain_topology_level *tl, int cpu) ++{ ++ struct sched_domain *sd = *per_cpu_ptr(tl->data.sd, cpu); ++ int sd_weight, sd_flags = 0; ++ ++#ifdef CONFIG_NUMA ++ /* ++ * Ugly hack to pass state to sd_numa_mask()... ++ */ ++ sched_domains_curr_level = tl->numa_level; ++#endif ++ ++ sd_weight = cpumask_weight(tl->mask(cpu)); ++ ++ if (tl->sd_flags) ++ sd_flags = (*tl->sd_flags)(); ++ if (WARN_ONCE(sd_flags & ~TOPOLOGY_SD_FLAGS, ++ "wrong sd_flags in topology description\n")) ++ sd_flags &= ~TOPOLOGY_SD_FLAGS; ++ ++ *sd = (struct sched_domain){ ++ .min_interval = sd_weight, ++ .max_interval = 2*sd_weight, ++ .busy_factor = 32, ++ .imbalance_pct = 125, ++ ++ .cache_nice_tries = 0, ++ .busy_idx = 0, ++ .idle_idx = 0, ++ .newidle_idx = 0, ++ .wake_idx = 0, ++ .forkexec_idx = 0, ++ ++ .flags = 1*SD_LOAD_BALANCE ++ | 1*SD_BALANCE_NEWIDLE ++ | 1*SD_BALANCE_EXEC ++ | 1*SD_BALANCE_FORK ++ | 0*SD_BALANCE_WAKE ++ | 1*SD_WAKE_AFFINE ++ | 0*SD_SHARE_CPUCAPACITY ++ | 0*SD_SHARE_PKG_RESOURCES ++ | 0*SD_SERIALIZE ++ | 0*SD_PREFER_SIBLING ++ | 0*SD_NUMA ++ | sd_flags ++ , ++ ++ .last_balance = jiffies, ++ .balance_interval = sd_weight, ++ .smt_gain = 0, ++ .max_newidle_lb_cost = 0, ++ .next_decay_max_lb_cost = jiffies, ++#ifdef CONFIG_SCHED_DEBUG ++ .name = tl->name, ++#endif ++ }; ++ ++ /* ++ * Convert topological properties into behaviour. ++ */ ++ ++ if (sd->flags & SD_SHARE_CPUCAPACITY) { ++ sd->flags |= SD_PREFER_SIBLING; ++ sd->imbalance_pct = 110; ++ sd->smt_gain = 1178; /* ~15% */ ++ ++ } else if (sd->flags & SD_SHARE_PKG_RESOURCES) { ++ sd->imbalance_pct = 117; ++ sd->cache_nice_tries = 1; ++ sd->busy_idx = 2; ++ ++#ifdef CONFIG_NUMA ++ } else if (sd->flags & SD_NUMA) { ++ sd->cache_nice_tries = 2; ++ sd->busy_idx = 3; ++ sd->idle_idx = 2; ++ ++ sd->flags |= SD_SERIALIZE; ++ if (sched_domains_numa_distance[tl->numa_level] > RECLAIM_DISTANCE) { ++ sd->flags &= ~(SD_BALANCE_EXEC | ++ SD_BALANCE_FORK | ++ SD_WAKE_AFFINE); ++ } ++ ++#endif ++ } else { ++ sd->flags |= SD_PREFER_SIBLING; ++ sd->cache_nice_tries = 1; ++ sd->busy_idx = 2; ++ sd->idle_idx = 1; ++ } ++ ++ sd->private = &tl->data; ++ ++ return sd; ++} ++ ++/* ++ * Topology list, bottom-up. ++ */ ++static struct sched_domain_topology_level default_topology[] = { ++#ifdef CONFIG_SCHED_SMT ++ { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) }, ++#endif ++#ifdef CONFIG_SCHED_MC ++ { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) }, ++#endif ++ { cpu_cpu_mask, SD_INIT_NAME(DIE) }, ++ { NULL, }, ++}; ++ ++static struct sched_domain_topology_level *sched_domain_topology = ++ default_topology; ++ ++#define for_each_sd_topology(tl) \ ++ for (tl = sched_domain_topology; tl->mask; tl++) ++ ++void set_sched_topology(struct sched_domain_topology_level *tl) ++{ ++ sched_domain_topology = tl; ++} ++ ++#ifdef CONFIG_NUMA ++ ++static const struct cpumask *sd_numa_mask(int cpu) ++{ ++ return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; ++} ++ ++static void sched_numa_warn(const char *str) ++{ ++ static int done = false; ++ int i,j; ++ ++ if (done) ++ return; ++ ++ done = true; ++ ++ printk(KERN_WARNING "ERROR: %s\n\n", str); ++ ++ for (i = 0; i < nr_node_ids; i++) { ++ printk(KERN_WARNING " "); ++ for (j = 0; j < nr_node_ids; j++) ++ printk(KERN_CONT "%02d ", node_distance(i,j)); ++ printk(KERN_CONT "\n"); ++ } ++ printk(KERN_WARNING "\n"); ++} ++ ++static bool find_numa_distance(int distance) ++{ ++ int i; ++ ++ if (distance == node_distance(0, 0)) ++ return true; ++ ++ for (i = 0; i < sched_domains_numa_levels; i++) { ++ if (sched_domains_numa_distance[i] == distance) ++ return true; ++ } ++ ++ return false; ++} ++ ++static void sched_init_numa(void) ++{ ++ int next_distance, curr_distance = node_distance(0, 0); ++ struct sched_domain_topology_level *tl; ++ int level = 0; ++ int i, j, k; ++ ++ sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); ++ if (!sched_domains_numa_distance) ++ return; ++ ++ /* ++ * O(nr_nodes^2) deduplicating selection sort -- in order to find the ++ * unique distances in the node_distance() table. ++ * ++ * Assumes node_distance(0,j) includes all distances in ++ * node_distance(i,j) in order to avoid cubic time. ++ */ ++ next_distance = curr_distance; ++ for (i = 0; i < nr_node_ids; i++) { ++ for (j = 0; j < nr_node_ids; j++) { ++ for (k = 0; k < nr_node_ids; k++) { ++ int distance = node_distance(i, k); ++ ++ if (distance > curr_distance && ++ (distance < next_distance || ++ next_distance == curr_distance)) ++ next_distance = distance; ++ ++ /* ++ * While not a strong assumption it would be nice to know ++ * about cases where if node A is connected to B, B is not ++ * equally connected to A. ++ */ ++ if (sched_debug() && node_distance(k, i) != distance) ++ sched_numa_warn("Node-distance not symmetric"); ++ ++ if (sched_debug() && i && !find_numa_distance(distance)) ++ sched_numa_warn("Node-0 not representative"); ++ } ++ if (next_distance != curr_distance) { ++ sched_domains_numa_distance[level++] = next_distance; ++ sched_domains_numa_levels = level; ++ curr_distance = next_distance; ++ } else break; ++ } ++ ++ /* ++ * In case of sched_debug() we verify the above assumption. ++ */ ++ if (!sched_debug()) ++ break; ++ } ++ /* ++ * 'level' contains the number of unique distances, excluding the ++ * identity distance node_distance(i,i). ++ * ++ * The sched_domains_numa_distance[] array includes the actual distance ++ * numbers. ++ */ ++ ++ /* ++ * Here, we should temporarily reset sched_domains_numa_levels to 0. ++ * If it fails to allocate memory for array sched_domains_numa_masks[][], ++ * the array will contain less then 'level' members. This could be ++ * dangerous when we use it to iterate array sched_domains_numa_masks[][] ++ * in other functions. ++ * ++ * We reset it to 'level' at the end of this function. ++ */ ++ sched_domains_numa_levels = 0; ++ ++ sched_domains_numa_masks = kzalloc(sizeof(void *) * level, GFP_KERNEL); ++ if (!sched_domains_numa_masks) ++ return; ++ ++ /* ++ * Now for each level, construct a mask per node which contains all ++ * cpus of nodes that are that many hops away from us. ++ */ ++ for (i = 0; i < level; i++) { ++ sched_domains_numa_masks[i] = ++ kzalloc(nr_node_ids * sizeof(void *), GFP_KERNEL); ++ if (!sched_domains_numa_masks[i]) ++ return; ++ ++ for (j = 0; j < nr_node_ids; j++) { ++ struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL); ++ if (!mask) ++ return; ++ ++ sched_domains_numa_masks[i][j] = mask; ++ ++ for (k = 0; k < nr_node_ids; k++) { ++ if (node_distance(j, k) > sched_domains_numa_distance[i]) ++ continue; ++ ++ cpumask_or(mask, mask, cpumask_of_node(k)); ++ } ++ } ++ } ++ ++ /* Compute default topology size */ ++ for (i = 0; sched_domain_topology[i].mask; i++); ++ ++ tl = kzalloc((i + level + 1) * ++ sizeof(struct sched_domain_topology_level), GFP_KERNEL); ++ if (!tl) ++ return; ++ ++ /* ++ * Copy the default topology bits.. ++ */ ++ for (i = 0; sched_domain_topology[i].mask; i++) ++ tl[i] = sched_domain_topology[i]; ++ ++ /* ++ * .. and append 'j' levels of NUMA goodness. ++ */ ++ for (j = 0; j < level; i++, j++) { ++ tl[i] = (struct sched_domain_topology_level){ ++ .mask = sd_numa_mask, ++ .sd_flags = cpu_numa_flags, ++ .flags = SDTL_OVERLAP, ++ .numa_level = j, ++ SD_INIT_NAME(NUMA) ++ }; ++ } ++ ++ sched_domain_topology = tl; ++ ++ sched_domains_numa_levels = level; ++} ++ ++static void sched_domains_numa_masks_set(int cpu) ++{ ++ int i, j; ++ int node = cpu_to_node(cpu); ++ ++ for (i = 0; i < sched_domains_numa_levels; i++) { ++ for (j = 0; j < nr_node_ids; j++) { ++ if (node_distance(j, node) <= sched_domains_numa_distance[i]) ++ cpumask_set_cpu(cpu, sched_domains_numa_masks[i][j]); ++ } ++ } ++} ++ ++static void sched_domains_numa_masks_clear(int cpu) ++{ ++ int i, j; ++ for (i = 0; i < sched_domains_numa_levels; i++) { ++ for (j = 0; j < nr_node_ids; j++) ++ cpumask_clear_cpu(cpu, sched_domains_numa_masks[i][j]); ++ } ++} ++ ++/* ++ * Update sched_domains_numa_masks[level][node] array when new cpus ++ * are onlined. ++ */ ++static int sched_domains_numa_masks_update(struct notifier_block *nfb, ++ unsigned long action, ++ void *hcpu) ++{ ++ int cpu = (long)hcpu; ++ ++ switch (action & ~CPU_TASKS_FROZEN) { ++ case CPU_ONLINE: ++ sched_domains_numa_masks_set(cpu); ++ break; ++ ++ case CPU_DEAD: ++ sched_domains_numa_masks_clear(cpu); ++ break; ++ ++ default: ++ return NOTIFY_DONE; ++ } ++ ++ return NOTIFY_OK; ++} ++#else ++static inline void sched_init_numa(void) ++{ ++} ++ ++static int sched_domains_numa_masks_update(struct notifier_block *nfb, ++ unsigned long action, ++ void *hcpu) ++{ ++ return 0; ++} ++#endif /* CONFIG_NUMA */ ++ ++static int __sdt_alloc(const struct cpumask *cpu_map) ++{ ++ struct sched_domain_topology_level *tl; ++ int j; ++ ++ for_each_sd_topology(tl) { ++ struct sd_data *sdd = &tl->data; ++ ++ sdd->sd = alloc_percpu(struct sched_domain *); ++ if (!sdd->sd) ++ return -ENOMEM; ++ ++ for_each_cpu(j, cpu_map) { ++ struct sched_domain *sd; ++ ++ sd = kzalloc_node(sizeof(struct sched_domain) + cpumask_size(), ++ GFP_KERNEL, cpu_to_node(j)); ++ if (!sd) ++ return -ENOMEM; ++ ++ *per_cpu_ptr(sdd->sd, j) = sd; ++ } ++ } ++ ++ return 0; ++} ++ ++static void __sdt_free(const struct cpumask *cpu_map) ++{ ++ struct sched_domain_topology_level *tl; ++ int j; ++ ++ for_each_sd_topology(tl) { ++ struct sd_data *sdd = &tl->data; ++ ++ for_each_cpu(j, cpu_map) { ++ struct sched_domain *sd; ++ ++ if (sdd->sd) { ++ sd = *per_cpu_ptr(sdd->sd, j); ++ kfree(*per_cpu_ptr(sdd->sd, j)); ++ } ++ } ++ free_percpu(sdd->sd); ++ sdd->sd = NULL; ++ } ++} ++ ++struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl, ++ const struct cpumask *cpu_map, struct sched_domain_attr *attr, ++ struct sched_domain *child, int cpu) ++{ ++ struct sched_domain *sd = sd_init(tl, cpu); ++ if (!sd) ++ return child; ++ ++ cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); ++ if (child) { ++ sd->level = child->level + 1; ++ sched_domain_level_max = max(sched_domain_level_max, sd->level); ++ child->parent = sd; ++ sd->child = child; ++ ++ if (!cpumask_subset(sched_domain_span(child), ++ sched_domain_span(sd))) { ++ pr_err("BUG: arch topology borken\n"); ++#ifdef CONFIG_SCHED_DEBUG ++ pr_err(" the %s domain not a subset of the %s domain\n", ++ child->name, sd->name); ++#endif ++ /* Fixup, ensure @sd has at least @child cpus. */ ++ cpumask_or(sched_domain_span(sd), ++ sched_domain_span(sd), ++ sched_domain_span(child)); ++ } ++ ++ } ++ set_domain_attribute(sd, attr); ++ ++ return sd; ++} ++ ++/* ++ * Build sched domains for a given set of cpus and attach the sched domains ++ * to the individual cpus ++ */ ++static int build_sched_domains(const struct cpumask *cpu_map, ++ struct sched_domain_attr *attr) ++{ ++ enum s_alloc alloc_state; ++ struct sched_domain *sd; ++ struct s_data d; ++ int i, ret = -ENOMEM; ++ ++ alloc_state = __visit_domain_allocation_hell(&d, cpu_map); ++ if (alloc_state != sa_rootdomain) ++ goto error; ++ ++ /* Set up domains for cpus specified by the cpu_map. */ ++ for_each_cpu(i, cpu_map) { ++ struct sched_domain_topology_level *tl; ++ ++ sd = NULL; ++ for_each_sd_topology(tl) { ++ sd = build_sched_domain(tl, cpu_map, attr, sd, i); ++ if (tl == sched_domain_topology) ++ *per_cpu_ptr(d.sd, i) = sd; ++ if (tl->flags & SDTL_OVERLAP) ++ sd->flags |= SD_OVERLAP; ++ if (cpumask_equal(cpu_map, sched_domain_span(sd))) ++ break; ++ } ++ } ++ ++ /* Calculate CPU capacity for physical packages and nodes */ ++ for (i = nr_cpumask_bits-1; i >= 0; i--) { ++ if (!cpumask_test_cpu(i, cpu_map)) ++ continue; ++ ++ for (sd = *per_cpu_ptr(d.sd, i); sd; sd = sd->parent) { ++ claim_allocations(i, sd); ++ } ++ } ++ ++ /* Attach the domains */ ++ rcu_read_lock(); ++ for_each_cpu(i, cpu_map) { ++ sd = *per_cpu_ptr(d.sd, i); ++ cpu_attach_domain(sd, d.rd, i); ++ } ++ rcu_read_unlock(); ++ ++ ret = 0; ++error: ++ __free_domain_allocs(&d, alloc_state, cpu_map); ++ return ret; ++} ++ ++static cpumask_var_t *doms_cur; /* current sched domains */ ++static int ndoms_cur; /* number of sched domains in 'doms_cur' */ ++static struct sched_domain_attr *dattr_cur; ++ /* attribues of custom domains in 'doms_cur' */ ++ ++/* ++ * Special case: If a kmalloc of a doms_cur partition (array of ++ * cpumask) fails, then fallback to a single sched domain, ++ * as determined by the single cpumask fallback_doms. ++ */ ++static cpumask_var_t fallback_doms; ++ ++/* ++ * arch_update_cpu_topology lets virtualized architectures update the ++ * cpu core maps. It is supposed to return 1 if the topology changed ++ * or 0 if it stayed the same. ++ */ ++int __weak arch_update_cpu_topology(void) ++{ ++ return 0; ++} ++ ++cpumask_var_t *alloc_sched_domains(unsigned int ndoms) ++{ ++ int i; ++ cpumask_var_t *doms; ++ ++ doms = kmalloc(sizeof(*doms) * ndoms, GFP_KERNEL); ++ if (!doms) ++ return NULL; ++ for (i = 0; i < ndoms; i++) { ++ if (!alloc_cpumask_var(&doms[i], GFP_KERNEL)) { ++ free_sched_domains(doms, i); ++ return NULL; ++ } ++ } ++ return doms; ++} ++ ++void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms) ++{ ++ unsigned int i; ++ for (i = 0; i < ndoms; i++) ++ free_cpumask_var(doms[i]); ++ kfree(doms); ++} ++ ++/* ++ * Set up scheduler domains and groups. Callers must hold the hotplug lock. ++ * For now this just excludes isolated cpus, but could be used to ++ * exclude other special cases in the future. ++ */ ++static int init_sched_domains(const struct cpumask *cpu_map) ++{ ++ int err; ++ ++ arch_update_cpu_topology(); ++ ndoms_cur = 1; ++ doms_cur = alloc_sched_domains(ndoms_cur); ++ if (!doms_cur) ++ doms_cur = &fallback_doms; ++ cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); ++ err = build_sched_domains(doms_cur[0], NULL); ++ register_sched_domain_sysctl(); ++ ++ return err; ++} ++ ++/* ++ * Detach sched domains from a group of cpus specified in cpu_map ++ * These cpus will now be attached to the NULL domain ++ */ ++static void detach_destroy_domains(const struct cpumask *cpu_map) ++{ ++ int i; ++ ++ rcu_read_lock(); ++ for_each_cpu(i, cpu_map) ++ cpu_attach_domain(NULL, &def_root_domain, i); ++ rcu_read_unlock(); ++} ++ ++/* handle null as "default" */ ++static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, ++ struct sched_domain_attr *new, int idx_new) ++{ ++ struct sched_domain_attr tmp; ++ ++ /* fast path */ ++ if (!new && !cur) ++ return 1; ++ ++ tmp = SD_ATTR_INIT; ++ return !memcmp(cur ? (cur + idx_cur) : &tmp, ++ new ? (new + idx_new) : &tmp, ++ sizeof(struct sched_domain_attr)); ++} ++ ++/* ++ * Partition sched domains as specified by the 'ndoms_new' ++ * cpumasks in the array doms_new[] of cpumasks. This compares ++ * doms_new[] to the current sched domain partitioning, doms_cur[]. ++ * It destroys each deleted domain and builds each new domain. ++ * ++ * 'doms_new' is an array of cpumask_var_t's of length 'ndoms_new'. ++ * The masks don't intersect (don't overlap.) We should setup one ++ * sched domain for each mask. CPUs not in any of the cpumasks will ++ * not be load balanced. If the same cpumask appears both in the ++ * current 'doms_cur' domains and in the new 'doms_new', we can leave ++ * it as it is. ++ * ++ * The passed in 'doms_new' should be allocated using ++ * alloc_sched_domains. This routine takes ownership of it and will ++ * free_sched_domains it when done with it. If the caller failed the ++ * alloc call, then it can pass in doms_new == NULL && ndoms_new == 1, ++ * and partition_sched_domains() will fallback to the single partition ++ * 'fallback_doms', it also forces the domains to be rebuilt. ++ * ++ * If doms_new == NULL it will be replaced with cpu_online_mask. ++ * ndoms_new == 0 is a special case for destroying existing domains, ++ * and it will not create the default domain. ++ * ++ * Call with hotplug lock held ++ */ ++void partition_sched_domains(int ndoms_new, cpumask_var_t doms_new[], ++ struct sched_domain_attr *dattr_new) ++{ ++ int i, j, n; ++ int new_topology; ++ ++ mutex_lock(&sched_domains_mutex); ++ ++ /* always unregister in case we don't destroy any domains */ ++ unregister_sched_domain_sysctl(); ++ ++ /* Let architecture update cpu core mappings. */ ++ new_topology = arch_update_cpu_topology(); ++ ++ n = doms_new ? ndoms_new : 0; ++ ++ /* Destroy deleted domains */ ++ for (i = 0; i < ndoms_cur; i++) { ++ for (j = 0; j < n && !new_topology; j++) { ++ if (cpumask_equal(doms_cur[i], doms_new[j]) ++ && dattrs_equal(dattr_cur, i, dattr_new, j)) ++ goto match1; ++ } ++ /* no match - a current sched domain not in new doms_new[] */ ++ detach_destroy_domains(doms_cur[i]); ++match1: ++ ; ++ } ++ ++ n = ndoms_cur; ++ if (doms_new == NULL) { ++ n = 0; ++ doms_new = &fallback_doms; ++ cpumask_andnot(doms_new[0], cpu_active_mask, cpu_isolated_map); ++ WARN_ON_ONCE(dattr_new); ++ } ++ ++ /* Build new domains */ ++ for (i = 0; i < ndoms_new; i++) { ++ for (j = 0; j < n && !new_topology; j++) { ++ if (cpumask_equal(doms_new[i], doms_cur[j]) ++ && dattrs_equal(dattr_new, i, dattr_cur, j)) ++ goto match2; ++ } ++ /* no match - add a new doms_new */ ++ build_sched_domains(doms_new[i], dattr_new ? dattr_new + i : NULL); ++match2: ++ ; ++ } ++ ++ /* Remember the new sched domains */ ++ if (doms_cur != &fallback_doms) ++ free_sched_domains(doms_cur, ndoms_cur); ++ kfree(dattr_cur); /* kfree(NULL) is safe */ ++ doms_cur = doms_new; ++ dattr_cur = dattr_new; ++ ndoms_cur = ndoms_new; ++ ++ register_sched_domain_sysctl(); ++ ++ mutex_unlock(&sched_domains_mutex); ++} ++ ++static int num_cpus_frozen; /* used to mark begin/end of suspend/resume */ ++ ++/* ++ * Update cpusets according to cpu_active mask. If cpusets are ++ * disabled, cpuset_update_active_cpus() becomes a simple wrapper ++ * around partition_sched_domains(). ++ * ++ * If we come here as part of a suspend/resume, don't touch cpusets because we ++ * want to restore it back to its original state upon resume anyway. ++ */ ++static int cpuset_cpu_active(struct notifier_block *nfb, unsigned long action, ++ void *hcpu) ++{ ++ switch (action) { ++ case CPU_ONLINE_FROZEN: ++ case CPU_DOWN_FAILED_FROZEN: ++ ++ /* ++ * num_cpus_frozen tracks how many CPUs are involved in suspend ++ * resume sequence. As long as this is not the last online ++ * operation in the resume sequence, just build a single sched ++ * domain, ignoring cpusets. ++ */ ++ num_cpus_frozen--; ++ if (likely(num_cpus_frozen)) { ++ partition_sched_domains(1, NULL, NULL); ++ break; ++ } ++ ++ /* ++ * This is the last CPU online operation. So fall through and ++ * restore the original sched domains by considering the ++ * cpuset configurations. ++ */ ++ ++ case CPU_ONLINE: ++ cpuset_update_active_cpus(true); ++ break; ++ default: ++ return NOTIFY_DONE; ++ } ++ return NOTIFY_OK; ++} ++ ++static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action, ++ void *hcpu) ++{ ++ switch (action) { ++ case CPU_DOWN_PREPARE: ++ cpuset_update_active_cpus(false); ++ break; ++ case CPU_DOWN_PREPARE_FROZEN: ++ num_cpus_frozen++; ++ partition_sched_domains(1, NULL, NULL); ++ break; ++ default: ++ return NOTIFY_DONE; ++ } ++ return NOTIFY_OK; ++} ++ ++#if defined(CONFIG_SCHED_SMT) || defined(CONFIG_SCHED_MC) ++/* ++ * Cheaper version of the below functions in case support for SMT and MC is ++ * compiled in but CPUs have no siblings. ++ */ ++static bool sole_cpu_idle(int cpu) ++{ ++ return rq_idle(cpu_rq(cpu)); ++} ++#endif ++#ifdef CONFIG_SCHED_SMT ++static const cpumask_t *thread_cpumask(int cpu) ++{ ++ return topology_sibling_cpumask(cpu); ++} ++/* All this CPU's SMT siblings are idle */ ++static bool siblings_cpu_idle(int cpu) ++{ ++ return cpumask_subset(thread_cpumask(cpu), &grq.cpu_idle_map); ++} ++#endif ++#ifdef CONFIG_SCHED_MC ++static const cpumask_t *core_cpumask(int cpu) ++{ ++ return topology_core_cpumask(cpu); ++} ++/* All this CPU's shared cache siblings are idle */ ++static bool cache_cpu_idle(int cpu) ++{ ++ return cpumask_subset(core_cpumask(cpu), &grq.cpu_idle_map); ++} ++#endif ++ ++enum sched_domain_level { ++ SD_LV_NONE = 0, ++ SD_LV_SIBLING, ++ SD_LV_MC, ++ SD_LV_BOOK, ++ SD_LV_CPU, ++ SD_LV_NODE, ++ SD_LV_ALLNODES, ++ SD_LV_MAX ++}; ++ ++void __init sched_init_smp(void) ++{ ++ struct sched_domain *sd; ++ int cpu, other_cpu; ++ ++ cpumask_var_t non_isolated_cpus; ++ ++ alloc_cpumask_var(&non_isolated_cpus, GFP_KERNEL); ++ alloc_cpumask_var(&fallback_doms, GFP_KERNEL); ++ ++ sched_init_numa(); ++ ++ /* ++ * There's no userspace yet to cause hotplug operations; hence all the ++ * cpu masks are stable and all blatant races in the below code cannot ++ * happen. ++ */ ++ mutex_lock(&sched_domains_mutex); ++ init_sched_domains(cpu_active_mask); ++ cpumask_andnot(non_isolated_cpus, cpu_possible_mask, cpu_isolated_map); ++ if (cpumask_empty(non_isolated_cpus)) ++ cpumask_set_cpu(smp_processor_id(), non_isolated_cpus); ++ mutex_unlock(&sched_domains_mutex); ++ ++ hotcpu_notifier(sched_domains_numa_masks_update, CPU_PRI_SCHED_ACTIVE); ++ hotcpu_notifier(cpuset_cpu_active, CPU_PRI_CPUSET_ACTIVE); ++ hotcpu_notifier(cpuset_cpu_inactive, CPU_PRI_CPUSET_INACTIVE); ++ ++ /* Move init over to a non-isolated CPU */ ++ if (set_cpus_allowed_ptr(current, non_isolated_cpus) < 0) ++ BUG(); ++ free_cpumask_var(non_isolated_cpus); ++ ++ mutex_lock(&sched_domains_mutex); ++ grq_lock_irq(); ++ /* ++ * Set up the relative cache distance of each online cpu from each ++ * other in a simple array for quick lookup. Locality is determined ++ * by the closest sched_domain that CPUs are separated by. CPUs with ++ * shared cache in SMT and MC are treated as local. Separate CPUs ++ * (within the same package or physically) within the same node are ++ * treated as not local. CPUs not even in the same domain (different ++ * nodes) are treated as very distant. ++ */ ++ for_each_online_cpu(cpu) { ++ struct rq *rq = cpu_rq(cpu); ++ ++ /* First check if this cpu is in the same node */ ++ for_each_domain(cpu, sd) { ++ if (sd->level > SD_LV_NODE) ++ continue; ++ /* Set locality to local node if not already found lower */ ++ for_each_cpu(other_cpu, sched_domain_span(sd)) { ++ if (rq->cpu_locality[other_cpu] > 3) ++ rq->cpu_locality[other_cpu] = 3; ++ } ++ } ++ ++ /* ++ * Each runqueue has its own function in case it doesn't have ++ * siblings of its own allowing mixed topologies. ++ */ ++#ifdef CONFIG_SCHED_MC ++ for_each_cpu(other_cpu, core_cpumask(cpu)) { ++ if (rq->cpu_locality[other_cpu] > 2) ++ rq->cpu_locality[other_cpu] = 2; ++ } ++ if (cpumask_weight(core_cpumask(cpu)) > 1) ++ rq->cache_idle = cache_cpu_idle; ++#endif ++#ifdef CONFIG_SCHED_SMT ++ for_each_cpu(other_cpu, thread_cpumask(cpu)) ++ rq->cpu_locality[other_cpu] = 1; ++ if (cpumask_weight(thread_cpumask(cpu)) > 1) ++ rq->siblings_idle = siblings_cpu_idle; ++#endif ++ } ++ grq_unlock_irq(); ++ mutex_unlock(&sched_domains_mutex); ++ ++ for_each_online_cpu(cpu) { ++ struct rq *rq = cpu_rq(cpu); ++ for_each_online_cpu(other_cpu) { ++ if (other_cpu <= cpu) ++ continue; ++ printk(KERN_DEBUG "BFS LOCALITY CPU %d to %d: %d\n", cpu, other_cpu, rq->cpu_locality[other_cpu]); ++ } ++ } ++} ++#else ++void __init sched_init_smp(void) ++{ ++} ++#endif /* CONFIG_SMP */ ++ ++int in_sched_functions(unsigned long addr) ++{ ++ return in_lock_functions(addr) || ++ (addr >= (unsigned long)__sched_text_start ++ && addr < (unsigned long)__sched_text_end); ++} ++ ++void __init sched_init(void) ++{ ++#ifdef CONFIG_SMP ++ int cpu_ids; ++#endif ++ int i; ++ struct rq *rq; ++ ++ prio_ratios[0] = 128; ++ for (i = 1 ; i < NICE_WIDTH ; i++) ++ prio_ratios[i] = prio_ratios[i - 1] * 11 / 10; ++ ++ raw_spin_lock_init(&grq.lock); ++ grq.nr_running = grq.nr_uninterruptible = grq.nr_switches = 0; ++ grq.niffies = 0; ++ grq.last_jiffy = jiffies; ++ raw_spin_lock_init(&grq.iso_lock); ++ grq.iso_ticks = 0; ++ grq.iso_refractory = false; ++ grq.noc = 1; ++#ifdef CONFIG_SMP ++ init_defrootdomain(); ++ grq.qnr = grq.idle_cpus = 0; ++ cpumask_clear(&grq.cpu_idle_map); ++#else ++ uprq = &per_cpu(runqueues, 0); ++#endif ++ for_each_possible_cpu(i) { ++ rq = cpu_rq(i); ++ rq->grq_lock = &grq.lock; ++ rq->user_pc = rq->nice_pc = rq->softirq_pc = rq->system_pc = ++ rq->iowait_pc = rq->idle_pc = 0; ++ rq->dither = false; ++#ifdef CONFIG_SMP ++ rq->sticky_task = NULL; ++ rq->last_niffy = 0; ++ rq->sd = NULL; ++ rq->rd = NULL; ++ rq->online = false; ++ rq->cpu = i; ++ rq_attach_root(rq, &def_root_domain); ++#endif ++ atomic_set(&rq->nr_iowait, 0); ++ } ++ ++#ifdef CONFIG_SMP ++ cpu_ids = i; ++ /* ++ * Set the base locality for cpu cache distance calculation to ++ * "distant" (3). Make sure the distance from a CPU to itself is 0. ++ */ ++ for_each_possible_cpu(i) { ++ int j; ++ ++ rq = cpu_rq(i); ++#ifdef CONFIG_SCHED_SMT ++ rq->siblings_idle = sole_cpu_idle; ++#endif ++#ifdef CONFIG_SCHED_MC ++ rq->cache_idle = sole_cpu_idle; ++#endif ++ rq->cpu_locality = kmalloc(cpu_ids * sizeof(int *), GFP_ATOMIC); ++ for_each_possible_cpu(j) { ++ if (i == j) ++ rq->cpu_locality[j] = 0; ++ else ++ rq->cpu_locality[j] = 4; ++ } ++ } ++#endif ++ ++ for (i = 0; i < PRIO_LIMIT; i++) ++ INIT_LIST_HEAD(grq.queue + i); ++ /* delimiter for bitsearch */ ++ __set_bit(PRIO_LIMIT, grq.prio_bitmap); ++ ++#ifdef CONFIG_PREEMPT_NOTIFIERS ++ INIT_HLIST_HEAD(&init_task.preempt_notifiers); ++#endif ++ ++ /* ++ * The boot idle thread does lazy MMU switching as well: ++ */ ++ atomic_inc(&init_mm.mm_count); ++ enter_lazy_tlb(&init_mm, current); ++ ++ /* ++ * Make us the idle thread. Technically, schedule() should not be ++ * called from this thread, however somewhere below it might be, ++ * but because we are the idle thread, we just pick up running again ++ * when this runqueue becomes "idle". ++ */ ++ init_idle(current, smp_processor_id()); ++ ++#ifdef CONFIG_SMP ++ zalloc_cpumask_var(&sched_domains_tmpmask, GFP_NOWAIT); ++ /* May be allocated at isolcpus cmdline parse time */ ++ if (cpu_isolated_map == NULL) ++ zalloc_cpumask_var(&cpu_isolated_map, GFP_NOWAIT); ++ idle_thread_set_boot_cpu(); ++#endif /* SMP */ ++} ++ ++#ifdef CONFIG_DEBUG_ATOMIC_SLEEP ++static inline int preempt_count_equals(int preempt_offset) ++{ ++ int nested = preempt_count() + rcu_preempt_depth(); ++ ++ return (nested == preempt_offset); ++} ++ ++void __might_sleep(const char *file, int line, int preempt_offset) ++{ ++ /* ++ * Blocking primitives will set (and therefore destroy) current->state, ++ * since we will exit with TASK_RUNNING make sure we enter with it, ++ * otherwise we will destroy state. ++ */ ++ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change, ++ "do not call blocking ops when !TASK_RUNNING; " ++ "state=%lx set at [<%p>] %pS\n", ++ current->state, ++ (void *)current->task_state_change, ++ (void *)current->task_state_change); ++ ++ ___might_sleep(file, line, preempt_offset); ++} ++EXPORT_SYMBOL(__might_sleep); ++ ++void ___might_sleep(const char *file, int line, int preempt_offset) ++{ ++ static unsigned long prev_jiffy; /* ratelimiting */ ++ ++ rcu_sleep_check(); /* WARN_ON_ONCE() by default, no rate limit reqd. */ ++ if ((preempt_count_equals(preempt_offset) && !irqs_disabled() && ++ !is_idle_task(current)) || ++ system_state != SYSTEM_RUNNING || oops_in_progress) ++ return; ++ if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) ++ return; ++ prev_jiffy = jiffies; ++ ++ printk(KERN_ERR ++ "BUG: sleeping function called from invalid context at %s:%d\n", ++ file, line); ++ printk(KERN_ERR ++ "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n", ++ in_atomic(), irqs_disabled(), ++ current->pid, current->comm); ++ ++ if (task_stack_end_corrupted(current)) ++ printk(KERN_EMERG "Thread overran stack, or stack corrupted\n"); ++ ++ debug_show_held_locks(current); ++ if (irqs_disabled()) ++ print_irqtrace_events(current); ++#ifdef CONFIG_DEBUG_PREEMPT ++ if (!preempt_count_equals(preempt_offset)) { ++ pr_err("Preemption disabled at:"); ++ print_ip_sym(current->preempt_disable_ip); ++ pr_cont("\n"); ++ } ++#endif ++ dump_stack(); ++} ++EXPORT_SYMBOL(___might_sleep); ++#endif ++ ++#ifdef CONFIG_MAGIC_SYSRQ ++static inline void normalise_rt_tasks(void) ++{ ++ struct task_struct *g, *p; ++ unsigned long flags; ++ struct rq *rq; ++ int queued; ++ ++ read_lock(&tasklist_lock); ++ for_each_process_thread(g, p) { ++ /* ++ * Only normalize user tasks: ++ */ ++ if (p->flags & PF_KTHREAD) ++ continue; ++ ++ if (!rt_task(p) && !iso_task(p)) ++ continue; ++ ++ rq = task_grq_lock(p, &flags); ++ queued = task_queued(p); ++ if (queued) ++ dequeue_task(p); ++ __setscheduler(p, rq, SCHED_NORMAL, 0, false); ++ if (queued) { ++ enqueue_task(p, rq); ++ try_preempt(p, rq); ++ } ++ ++ task_grq_unlock(&flags); ++ } ++ read_unlock(&tasklist_lock); ++} ++ ++void normalize_rt_tasks(void) ++{ ++ normalise_rt_tasks(); ++} ++#endif /* CONFIG_MAGIC_SYSRQ */ ++ ++#if defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) ++/* ++ * These functions are only useful for the IA64 MCA handling, or kdb. ++ * ++ * They can only be called when the whole system has been ++ * stopped - every CPU needs to be quiescent, and no scheduling ++ * activity can take place. Using them for anything else would ++ * be a serious bug, and as a result, they aren't even visible ++ * under any other configuration. ++ */ ++ ++/** ++ * curr_task - return the current task for a given cpu. ++ * @cpu: the processor in question. ++ * ++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! ++ * ++ * Return: The current task for @cpu. ++ */ ++struct task_struct *curr_task(int cpu) ++{ ++ return cpu_curr(cpu); ++} ++ ++#endif /* defined(CONFIG_IA64) || defined(CONFIG_KGDB_KDB) */ ++ ++#ifdef CONFIG_IA64 ++/** ++ * set_curr_task - set the current task for a given cpu. ++ * @cpu: the processor in question. ++ * @p: the task pointer to set. ++ * ++ * Description: This function must only be used when non-maskable interrupts ++ * are serviced on a separate stack. It allows the architecture to switch the ++ * notion of the current task on a cpu in a non-blocking manner. This function ++ * must be called with all CPU's synchronised, and interrupts disabled, the ++ * and caller must save the original value of the current task (see ++ * curr_task() above) and restore that value before reenabling interrupts and ++ * re-starting the system. ++ * ++ * ONLY VALID WHEN THE WHOLE SYSTEM IS STOPPED! ++ */ ++void set_curr_task(int cpu, struct task_struct *p) ++{ ++ cpu_curr(cpu) = p; ++} ++ ++#endif ++ ++/* ++ * Use precise platform statistics if available: ++ */ ++#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE ++void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) ++{ ++ *ut = p->utime; ++ *st = p->stime; ++} ++EXPORT_SYMBOL_GPL(task_cputime_adjusted); ++ ++void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) ++{ ++ struct task_cputime cputime; ++ ++ thread_group_cputime(p, &cputime); ++ ++ *ut = cputime.utime; ++ *st = cputime.stime; ++} ++ ++void vtime_account_system_irqsafe(struct task_struct *tsk) ++{ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ vtime_account_system(tsk); ++ local_irq_restore(flags); ++} ++EXPORT_SYMBOL_GPL(vtime_account_system_irqsafe); ++ ++#ifndef __ARCH_HAS_VTIME_TASK_SWITCH ++void vtime_task_switch(struct task_struct *prev) ++{ ++ if (is_idle_task(prev)) ++ vtime_account_idle(prev); ++ else ++ vtime_account_system(prev); ++ ++ vtime_account_user(prev); ++ arch_vtime_task_switch(prev); ++} ++#endif ++ ++#else ++/* ++ * Perform (stime * rtime) / total, but avoid multiplication overflow by ++ * losing precision when the numbers are big. ++ */ ++static cputime_t scale_stime(u64 stime, u64 rtime, u64 total) ++{ ++ u64 scaled; ++ ++ for (;;) { ++ /* Make sure "rtime" is the bigger of stime/rtime */ ++ if (stime > rtime) { ++ u64 tmp = rtime; rtime = stime; stime = tmp; ++ } ++ ++ /* Make sure 'total' fits in 32 bits */ ++ if (total >> 32) ++ goto drop_precision; ++ ++ /* Does rtime (and thus stime) fit in 32 bits? */ ++ if (!(rtime >> 32)) ++ break; ++ ++ /* Can we just balance rtime/stime rather than dropping bits? */ ++ if (stime >> 31) ++ goto drop_precision; ++ ++ /* We can grow stime and shrink rtime and try to make them both fit */ ++ stime <<= 1; ++ rtime >>= 1; ++ continue; ++ ++drop_precision: ++ /* We drop from rtime, it has more bits than stime */ ++ rtime >>= 1; ++ total >>= 1; ++ } ++ ++ /* ++ * Make sure gcc understands that this is a 32x32->64 multiply, ++ * followed by a 64/32->64 divide. ++ */ ++ scaled = div_u64((u64) (u32) stime * (u64) (u32) rtime, (u32)total); ++ return (__force cputime_t) scaled; ++} ++ ++/* ++ * Adjust tick based cputime random precision against scheduler ++ * runtime accounting. ++ */ ++static void cputime_adjust(struct task_cputime *curr, ++ struct prev_cputime *prev, ++ cputime_t *ut, cputime_t *st) ++{ ++ cputime_t rtime, stime, utime, total; ++ ++ stime = curr->stime; ++ total = stime + curr->utime; ++ ++ /* ++ * Tick based cputime accounting depend on random scheduling ++ * timeslices of a task to be interrupted or not by the timer. ++ * Depending on these circumstances, the number of these interrupts ++ * may be over or under-optimistic, matching the real user and system ++ * cputime with a variable precision. ++ * ++ * Fix this by scaling these tick based values against the total ++ * runtime accounted by the CFS scheduler. ++ */ ++ rtime = nsecs_to_cputime(curr->sum_exec_runtime); ++ ++ /* ++ * Update userspace visible utime/stime values only if actual execution ++ * time is bigger than already exported. Note that can happen, that we ++ * provided bigger values due to scaling inaccuracy on big numbers. ++ */ ++ if (prev->stime + prev->utime >= rtime) ++ goto out; ++ ++ if (total) { ++ stime = scale_stime((__force u64)stime, ++ (__force u64)rtime, (__force u64)total); ++ utime = rtime - stime; ++ } else { ++ stime = rtime; ++ utime = 0; ++ } ++ ++ /* ++ * If the tick based count grows faster than the scheduler one, ++ * the result of the scaling may go backward. ++ * Let's enforce monotonicity. ++ */ ++ prev->stime = max(prev->stime, stime); ++ prev->utime = max(prev->utime, utime); ++ ++out: ++ *ut = prev->utime; ++ *st = prev->stime; ++} ++ ++void task_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) ++{ ++ struct task_cputime cputime = { ++ .sum_exec_runtime = tsk_seruntime(p), ++ }; ++ ++ task_cputime(p, &cputime.utime, &cputime.stime); ++ cputime_adjust(&cputime, &p->prev_cputime, ut, st); ++} ++EXPORT_SYMBOL_GPL(task_cputime_adjusted); ++ ++/* ++ * Must be called with siglock held. ++ */ ++void thread_group_cputime_adjusted(struct task_struct *p, cputime_t *ut, cputime_t *st) ++{ ++ struct task_cputime cputime; ++ ++ thread_group_cputime(p, &cputime); ++ cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st); ++} ++#endif ++ ++void init_idle_bootup_task(struct task_struct *idle) ++{} ++ ++#ifdef CONFIG_SCHED_DEBUG ++void proc_sched_show_task(struct task_struct *p, struct seq_file *m) ++{} ++ ++void proc_sched_set_task(struct task_struct *p) ++{} ++#endif ++ ++#ifdef CONFIG_SMP ++#define SCHED_LOAD_SHIFT (10) ++#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) ++ ++unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu) ++{ ++ return SCHED_LOAD_SCALE; ++} ++ ++unsigned long default_scale_smt_power(struct sched_domain *sd, int cpu) ++{ ++ unsigned long weight = cpumask_weight(sched_domain_span(sd)); ++ unsigned long smt_gain = sd->smt_gain; ++ ++ smt_gain /= weight; ++ ++ return smt_gain; ++} ++#endif +Index: linux-4.4-ck1/include/uapi/linux/sched.h +=================================================================== +--- linux-4.4-ck1.orig/include/uapi/linux/sched.h 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/include/uapi/linux/sched.h 2016-03-25 16:03:45.294350506 +1100 +@@ -37,9 +37,16 @@ + #define SCHED_FIFO 1 + #define SCHED_RR 2 + #define SCHED_BATCH 3 +-/* SCHED_ISO: reserved but not implemented yet */ ++/* SCHED_ISO: Implemented on BFS only */ + #define SCHED_IDLE 5 ++#ifdef CONFIG_SCHED_BFS ++#define SCHED_ISO 4 ++#define SCHED_IDLEPRIO SCHED_IDLE ++#define SCHED_MAX (SCHED_IDLEPRIO) ++#define SCHED_RANGE(policy) ((policy) <= SCHED_MAX) ++#else /* CONFIG_SCHED_BFS */ + #define SCHED_DEADLINE 6 ++#endif /* CONFIG_SCHED_BFS */ + + /* Can be ORed in to make sure the process is reverted back to SCHED_NORMAL on fork */ + #define SCHED_RESET_ON_FORK 0x40000000 +Index: linux-4.4-ck1/drivers/cpufreq/cpufreq_conservative.c +=================================================================== +--- linux-4.4-ck1.orig/drivers/cpufreq/cpufreq_conservative.c 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/drivers/cpufreq/cpufreq_conservative.c 2016-03-25 16:03:45.294350506 +1100 +@@ -15,8 +15,8 @@ + #include "cpufreq_governor.h" + + /* Conservative governor macros */ +-#define DEF_FREQUENCY_UP_THRESHOLD (80) +-#define DEF_FREQUENCY_DOWN_THRESHOLD (20) ++#define DEF_FREQUENCY_UP_THRESHOLD (63) ++#define DEF_FREQUENCY_DOWN_THRESHOLD (26) + #define DEF_FREQUENCY_STEP (5) + #define DEF_SAMPLING_DOWN_FACTOR (1) + #define MAX_SAMPLING_DOWN_FACTOR (10) +Index: linux-4.4-ck1/kernel/time/Kconfig +=================================================================== +--- linux-4.4-ck1.orig/kernel/time/Kconfig 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/kernel/time/Kconfig 2016-03-25 16:03:45.294350506 +1100 +@@ -89,7 +89,7 @@ config NO_HZ_IDLE + config NO_HZ_FULL + bool "Full dynticks system (tickless)" + # NO_HZ_COMMON dependency +- depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS ++ depends on !ARCH_USES_GETTIMEOFFSET && GENERIC_CLOCKEVENTS && !SCHED_BFS + # We need at least one periodic CPU for timekeeping + depends on SMP + depends on HAVE_CONTEXT_TRACKING +Index: linux-4.4-ck1/kernel/sched/Makefile +=================================================================== +--- linux-4.4-ck1.orig/kernel/sched/Makefile 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/kernel/sched/Makefile 2016-03-25 16:03:45.294350506 +1100 +@@ -11,11 +11,17 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER + CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer + endif + ++ifdef CONFIG_SCHED_BFS ++obj-y += bfs.o clock.o ++else + obj-y += core.o loadavg.o clock.o cputime.o + obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o + obj-y += wait.o completion.o idle.o +-obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o ++obj-$(CONFIG_SMP) += cpudeadline.o + obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o +-obj-$(CONFIG_SCHEDSTATS) += stats.o + obj-$(CONFIG_SCHED_DEBUG) += debug.o + obj-$(CONFIG_CGROUP_CPUACCT) += cpuacct.o ++endif ++obj-y += wait.o completion.o idle.o ++obj-$(CONFIG_SMP) += cpupri.o ++obj-$(CONFIG_SCHEDSTATS) += stats.o +Index: linux-4.4-ck1/kernel/sched/bfs_sched.h +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-4.4-ck1/kernel/sched/bfs_sched.h 2016-03-25 16:03:45.294350506 +1100 +@@ -0,0 +1,181 @@ ++#include ++#include ++#include ++ ++#ifndef BFS_SCHED_H ++#define BFS_SCHED_H ++ ++/* ++ * This is the main, per-CPU runqueue data structure. ++ * This data should only be modified by the local cpu. ++ */ ++struct rq { ++ struct task_struct *curr, *idle, *stop; ++ struct mm_struct *prev_mm; ++ ++ /* Pointer to grq spinlock */ ++ raw_spinlock_t *grq_lock; ++ ++ /* Stored data about rq->curr to work outside grq lock */ ++ u64 rq_deadline; ++ unsigned int rq_policy; ++ int rq_time_slice; ++ u64 rq_last_ran; ++ int rq_prio; ++ bool rq_running; /* There is a task running */ ++ int soft_affined; /* Running or queued tasks with this set as their rq */ ++#ifdef CONFIG_SMT_NICE ++ struct mm_struct *rq_mm; ++ int rq_smt_bias; /* Policy/nice level bias across smt siblings */ ++#endif ++ /* Accurate timekeeping data */ ++ u64 timekeep_clock; ++ unsigned long user_pc, nice_pc, irq_pc, softirq_pc, system_pc, ++ iowait_pc, idle_pc; ++ atomic_t nr_iowait; ++ ++#ifdef CONFIG_SMP ++ int cpu; /* cpu of this runqueue */ ++ bool online; ++ bool scaling; /* This CPU is managed by a scaling CPU freq governor */ ++ struct task_struct *sticky_task; ++ ++ struct root_domain *rd; ++ struct sched_domain *sd; ++ int *cpu_locality; /* CPU relative cache distance */ ++#ifdef CONFIG_SCHED_SMT ++ bool (*siblings_idle)(int cpu); ++ /* See if all smt siblings are idle */ ++#endif /* CONFIG_SCHED_SMT */ ++#ifdef CONFIG_SCHED_MC ++ bool (*cache_idle)(int cpu); ++ /* See if all cache siblings are idle */ ++#endif /* CONFIG_SCHED_MC */ ++ u64 last_niffy; /* Last time this RQ updated grq.niffies */ ++#endif /* CONFIG_SMP */ ++#ifdef CONFIG_IRQ_TIME_ACCOUNTING ++ u64 prev_irq_time; ++#endif /* CONFIG_IRQ_TIME_ACCOUNTING */ ++#ifdef CONFIG_PARAVIRT ++ u64 prev_steal_time; ++#endif /* CONFIG_PARAVIRT */ ++#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING ++ u64 prev_steal_time_rq; ++#endif /* CONFIG_PARAVIRT_TIME_ACCOUNTING */ ++ ++ u64 clock, old_clock, last_tick; ++ u64 clock_task; ++ bool dither; ++ ++#ifdef CONFIG_SCHEDSTATS ++ ++ /* latency stats */ ++ struct sched_info rq_sched_info; ++ unsigned long long rq_cpu_time; ++ /* could above be rq->cfs_rq.exec_clock + rq->rt_rq.rt_runtime ? */ ++ ++ /* sys_sched_yield() stats */ ++ unsigned int yld_count; ++ ++ /* schedule() stats */ ++ unsigned int sched_switch; ++ unsigned int sched_count; ++ unsigned int sched_goidle; ++ ++ /* try_to_wake_up() stats */ ++ unsigned int ttwu_count; ++ unsigned int ttwu_local; ++#endif /* CONFIG_SCHEDSTATS */ ++#ifdef CONFIG_CPU_IDLE ++ /* Must be inspected within a rcu lock section */ ++ struct cpuidle_state *idle_state; ++#endif ++}; ++ ++#ifdef CONFIG_SMP ++struct rq *cpu_rq(int cpu); ++#endif ++ ++#ifndef CONFIG_SMP ++extern struct rq *uprq; ++#define cpu_rq(cpu) (uprq) ++#define this_rq() (uprq) ++#define raw_rq() (uprq) ++#define task_rq(p) (uprq) ++#define cpu_curr(cpu) ((uprq)->curr) ++#else /* CONFIG_SMP */ ++DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); ++#define this_rq() this_cpu_ptr(&runqueues) ++#define raw_rq() raw_cpu_ptr(&runqueues) ++#endif /* CONFIG_SMP */ ++ ++static inline u64 __rq_clock_broken(struct rq *rq) ++{ ++ return READ_ONCE(rq->clock); ++} ++ ++static inline u64 rq_clock(struct rq *rq) ++{ ++ lockdep_assert_held(rq->grq_lock); ++ return rq->clock; ++} ++ ++static inline u64 rq_clock_task(struct rq *rq) ++{ ++ lockdep_assert_held(rq->grq_lock); ++ return rq->clock_task; ++} ++ ++extern struct mutex sched_domains_mutex; ++ ++#define rcu_dereference_check_sched_domain(p) \ ++ rcu_dereference_check((p), \ ++ lockdep_is_held(&sched_domains_mutex)) ++ ++/* ++ * The domain tree (rq->sd) is protected by RCU's quiescent state transition. ++ * See detach_destroy_domains: synchronize_sched for details. ++ * ++ * The domain tree of any CPU may only be accessed from within ++ * preempt-disabled sections. ++ */ ++#define for_each_domain(cpu, __sd) \ ++ for (__sd = rcu_dereference_check_sched_domain(cpu_rq(cpu)->sd); __sd; __sd = __sd->parent) ++ ++static inline void sched_ttwu_pending(void) { } ++ ++static inline int task_on_rq_queued(struct task_struct *p) ++{ ++ return p->on_rq; ++} ++ ++#ifdef CONFIG_SMP ++ ++extern void set_cpus_allowed_common(struct task_struct *p, const struct cpumask *new_mask); ++ ++#endif ++ ++#ifdef CONFIG_CPU_IDLE ++static inline void idle_set_state(struct rq *rq, ++ struct cpuidle_state *idle_state) ++{ ++ rq->idle_state = idle_state; ++} ++ ++static inline struct cpuidle_state *idle_get_state(struct rq *rq) ++{ ++ WARN_ON(!rcu_read_lock_held()); ++ return rq->idle_state; ++} ++#else ++static inline void idle_set_state(struct rq *rq, ++ struct cpuidle_state *idle_state) ++{ ++} ++ ++static inline struct cpuidle_state *idle_get_state(struct rq *rq) ++{ ++ return NULL; ++} ++#endif ++#endif /* BFS_SCHED_H */ +Index: linux-4.4-ck1/kernel/sched/stats.c +=================================================================== +--- linux-4.4-ck1.orig/kernel/sched/stats.c 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/kernel/sched/stats.c 2016-03-25 16:03:45.294350506 +1100 +@@ -4,7 +4,11 @@ + #include + #include + ++#ifndef CONFIG_SCHED_BFS + #include "sched.h" ++#else ++#include "bfs_sched.h" ++#endif + + /* + * bump this up when changing the output format or the meaning of an existing +Index: linux-4.4-ck1/arch/x86/Kconfig +=================================================================== +--- linux-4.4-ck1.orig/arch/x86/Kconfig 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/arch/x86/Kconfig 2016-03-25 16:03:45.294350506 +1100 +@@ -877,10 +877,26 @@ config SCHED_SMT + depends on SMP + ---help--- + SMT scheduler support improves the CPU scheduler's decision making +- when dealing with Intel Pentium 4 chips with HyperThreading at a ++ when dealing with Intel P4/Core 2 chips with HyperThreading at a + cost of slightly increased overhead in some places. If unsure say + N here. + ++config SMT_NICE ++ bool "SMT (Hyperthreading) aware nice priority and policy support" ++ depends on SCHED_BFS && SCHED_SMT ++ default y ++ ---help--- ++ Enabling Hyperthreading on Intel CPUs decreases the effectiveness ++ of the use of 'nice' levels and different scheduling policies ++ (e.g. realtime) due to sharing of CPU power between hyperthreads. ++ SMT nice support makes each logical CPU aware of what is running on ++ its hyperthread siblings, maintaining appropriate distribution of ++ CPU according to nice levels and scheduling policies at the expense ++ of slightly increased overhead. ++ ++ If unsure say Y here. ++ ++ + config SCHED_MC + def_bool y + prompt "Multi-core scheduler support" +@@ -1961,7 +1977,7 @@ config HOTPLUG_CPU + config BOOTPARAM_HOTPLUG_CPU0 + bool "Set default setting of cpu0_hotpluggable" + default n +- depends on HOTPLUG_CPU ++ depends on HOTPLUG_CPU && !SCHED_BFS + ---help--- + Set whether default state of cpu0_hotpluggable is on or off. + +@@ -1990,7 +2006,7 @@ config BOOTPARAM_HOTPLUG_CPU0 + config DEBUG_HOTPLUG_CPU0 + def_bool n + prompt "Debug CPU0 hotplug" +- depends on HOTPLUG_CPU ++ depends on HOTPLUG_CPU && !SCHED_BFS + ---help--- + Enabling this option offlines CPU0 (if CPU0 can be offlined) as + soon as possible and boots up userspace with CPU0 offlined. User +Index: linux-4.4-ck1/include/linux/sched/prio.h +=================================================================== +--- linux-4.4-ck1.orig/include/linux/sched/prio.h 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/include/linux/sched/prio.h 2016-03-25 16:03:45.295350525 +1100 +@@ -19,8 +19,20 @@ + */ + + #define MAX_USER_RT_PRIO 100 ++ ++#ifdef CONFIG_SCHED_BFS ++/* Note different MAX_RT_PRIO */ ++#define MAX_RT_PRIO (MAX_USER_RT_PRIO + 1) ++ ++#define ISO_PRIO (MAX_RT_PRIO) ++#define NORMAL_PRIO (MAX_RT_PRIO + 1) ++#define IDLE_PRIO (MAX_RT_PRIO + 2) ++#define PRIO_LIMIT ((IDLE_PRIO) + 1) ++#else /* CONFIG_SCHED_BFS */ + #define MAX_RT_PRIO MAX_USER_RT_PRIO + ++#endif /* CONFIG_SCHED_BFS */ ++ + #define MAX_PRIO (MAX_RT_PRIO + NICE_WIDTH) + #define DEFAULT_PRIO (MAX_RT_PRIO + NICE_WIDTH / 2) + +Index: linux-4.4-ck1/drivers/cpufreq/intel_pstate.c +=================================================================== +--- linux-4.4-ck1.orig/drivers/cpufreq/intel_pstate.c 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/drivers/cpufreq/intel_pstate.c 2016-03-25 16:03:45.295350525 +1100 +@@ -569,8 +569,13 @@ static void atom_set_pstate(struct cpuda + vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); + vid = ceiling_fp(vid_fp); + +- if (pstate > cpudata->pstate.max_pstate) +- vid = cpudata->vid.turbo; ++ if (pstate < cpudata->pstate.max_pstate) ++ cpu_scaling(cpudata->cpu); ++ else { ++ if (pstate > cpudata->pstate.max_pstate) ++ vid = cpudata->vid.turbo; ++ cpu_nonscaling(cpudata->cpu); ++ } + + val |= vid; + +Index: linux-4.4-ck1/kernel/sched/idle.c +=================================================================== +--- linux-4.4-ck1.orig/kernel/sched/idle.c 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/kernel/sched/idle.c 2016-03-25 16:03:45.295350525 +1100 +@@ -13,7 +13,11 @@ + + #include + ++#ifdef CONFIG_SCHED_BFS ++#include "bfs_sched.h" ++#else + #include "sched.h" ++#endif + + /** + * sched_idle_set_state - Record idle state for the current CPU. +Index: linux-4.4-ck1/kernel/time/posix-cpu-timers.c +=================================================================== +--- linux-4.4-ck1.orig/kernel/time/posix-cpu-timers.c 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/kernel/time/posix-cpu-timers.c 2016-03-25 16:03:45.295350525 +1100 +@@ -448,7 +448,7 @@ static void cleanup_timers(struct list_h + */ + void posix_cpu_timers_exit(struct task_struct *tsk) + { +- add_device_randomness((const void*) &tsk->se.sum_exec_runtime, ++ add_device_randomness((const void*) &tsk_seruntime(tsk), + sizeof(unsigned long long)); + cleanup_timers(tsk->cpu_timers); + +@@ -878,7 +878,7 @@ static void check_thread_timers(struct t + tsk_expires->virt_exp = expires_to_cputime(expires); + + tsk_expires->sched_exp = check_timers_list(++timers, firing, +- tsk->se.sum_exec_runtime); ++ tsk_seruntime(tsk)); + + /* + * Check for the special case thread timers. +@@ -889,7 +889,7 @@ static void check_thread_timers(struct t + READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max); + + if (hard != RLIM_INFINITY && +- tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { ++ tsk_rttimeout(tsk) > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { + /* + * At the hard limit, we just die. + * No need to calculate anything else now. +@@ -897,7 +897,7 @@ static void check_thread_timers(struct t + __group_send_sig_info(SIGKILL, SEND_SIG_PRIV, tsk); + return; + } +- if (tsk->rt.timeout > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { ++ if (tsk_rttimeout(tsk) > DIV_ROUND_UP(soft, USEC_PER_SEC/HZ)) { + /* + * At the soft limit, send a SIGXCPU every second. + */ +@@ -1144,7 +1144,7 @@ static inline int fastpath_timer_check(s + struct task_cputime task_sample; + + task_cputime(tsk, &task_sample.utime, &task_sample.stime); +- task_sample.sum_exec_runtime = tsk->se.sum_exec_runtime; ++ task_sample.sum_exec_runtime = tsk_seruntime(tsk); + if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) + return 1; + } +Index: linux-4.4-ck1/kernel/trace/trace_selftest.c +=================================================================== +--- linux-4.4-ck1.orig/kernel/trace/trace_selftest.c 2016-03-25 16:03:45.297350563 +1100 ++++ linux-4.4-ck1/kernel/trace/trace_selftest.c 2016-03-25 16:03:45.295350525 +1100 +@@ -1039,10 +1039,15 @@ static int trace_wakeup_test_thread(void + { + /* Make this a -deadline thread */ + static const struct sched_attr attr = { ++#ifdef CONFIG_SCHED_BFS ++ /* No deadline on BFS, use RR */ ++ .sched_policy = SCHED_RR, ++#else + .sched_policy = SCHED_DEADLINE, + .sched_runtime = 100000ULL, + .sched_deadline = 10000000ULL, + .sched_period = 10000000ULL ++#endif + }; + struct wakeup_test_data *x = data; + diff --git a/patches_disabled/BLD-4.6.patch b/patches_disabled/BLD-4.6.patch new file mode 100644 index 0000000..51cc1f7 --- /dev/null +++ b/patches_disabled/BLD-4.6.patch @@ -0,0 +1,708 @@ +diff --git a/init/Kconfig b/init/Kconfig +index 0dfd09d..8d704e5 100644 +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -36,6 +36,15 @@ config BROKEN_ON_SMP + depends on BROKEN || !SMP + default y + ++config BLD ++ bool "An alternate CPU load distribution technique for task scheduler" ++ depends on SMP ++ default y ++ help ++ This is an alternate CPU load distribution technique based for task ++ scheduler based on The Barbershop Load Distribution algorithm. Not ++ suitable for NUMA, should work well on SMP. ++ + config INIT_ENV_ARG_LIMIT + int + default 32 if !UML +diff --git a/kernel/sched/bld.h b/kernel/sched/bld.h +new file mode 100644 +index 0000000..f1f9fba +--- /dev/null ++++ b/kernel/sched/bld.h +@@ -0,0 +1,215 @@ ++#ifdef CONFIG_BLD ++ ++static DEFINE_RWLOCK(rt_list_lock); ++static LIST_HEAD(rt_rq_head); ++static LIST_HEAD(cfs_rq_head); ++static DEFINE_RWLOCK(cfs_list_lock); ++ ++#ifdef CONFIG_FAIR_GROUP_SCHED ++static inline struct rq *rq_of_cfs(struct cfs_rq *cfs_rq) ++{ ++ return cfs_rq->rq; ++} ++#else ++static inline struct rq *rq_of_cfs(struct cfs_rq *cfs_rq) ++{ ++ return container_of(cfs_rq, struct rq, cfs); ++} ++#endif ++ ++#ifdef CONFIG_RT_GROUP_SCHED ++static inline struct rq *rq_of_rt(struct rt_rq *rt_rq) ++{ ++ return rt_rq->rq; ++} ++#else ++static inline struct rq *rq_of_rt(struct rt_rq *rt_rq) ++{ ++ return container_of(rt_rq, struct rq, rt); ++} ++#endif ++ ++static int select_cpu_for_wakeup(int task_type, struct cpumask *mask) ++{ ++ int cpu = smp_processor_id(), i; ++ unsigned long load, varload; ++ struct rq *rq; ++ ++ if (task_type) { ++ varload = ULONG_MAX; ++ for_each_cpu(i, mask) { ++ rq = cpu_rq(i); ++ load = rq->cfs.load.weight; ++ if (load < varload) { ++ varload = load; ++ cpu = i; ++ } ++ } ++ } else { ++ /* Here's an attempt to get a CPU within the mask where ++ * we can preempt easily. To achieve this we tried to ++ * maintain a lowbit, which indicate the lowest bit set on ++ * array bitmap. Since all CPUs contains high priority ++ * kernel threads therefore we eliminate 0, so it might not ++ * be right every time, but it's just an indicator. ++ */ ++ varload = 1; ++ ++ for_each_cpu(i, mask) { ++ rq = cpu_rq(i); ++ load = rq->rt.lowbit; ++ if (load >= varload) { ++ varload = load; ++ cpu = i; ++ } ++ } ++ } ++ ++ return cpu; ++} ++ ++static int bld_pick_cpu_cfs(struct task_struct *p, int sd_flags, int wake_flags) ++{ ++ struct cfs_rq *cfs; ++ unsigned long flags; ++ unsigned int cpu = smp_processor_id(); ++ ++ read_lock_irqsave(&cfs_list_lock, flags); ++ list_for_each_entry(cfs, &cfs_rq_head, bld_cfs_list) { ++ cpu = cpu_of(rq_of_cfs(cfs)); ++ if (cpu_online(cpu)) ++ break; ++ } ++ read_unlock_irqrestore(&cfs_list_lock, flags); ++ return cpu; ++} ++ ++static int bld_pick_cpu_rt(struct task_struct *p, int sd_flags, int wake_flags) ++{ ++ struct rt_rq *rt; ++ unsigned long flags; ++ unsigned int cpu = smp_processor_id(); ++ ++ read_lock_irqsave(&rt_list_lock, flags); ++ list_for_each_entry(rt, &rt_rq_head, bld_rt_list) { ++ cpu = cpu_of(rq_of_rt(rt)); ++ if (cpu_online(cpu)) ++ break; ++ } ++ read_unlock_irqrestore(&rt_list_lock, flags); ++ return cpu; ++} ++ ++static int bld_pick_cpu_domain(struct task_struct *p, int sd_flags, int wake_flags) ++{ ++ unsigned int cpu = smp_processor_id(), want_affine = 0; ++ struct cpumask *tmpmask; ++ ++ if (p->nr_cpus_allowed == 1) ++ return task_cpu(p); ++ ++ if (sd_flags & SD_BALANCE_WAKE) { ++ if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) { ++ want_affine = 1; ++ } ++ } ++ ++ if (want_affine) ++ tmpmask = tsk_cpus_allowed(p); ++ else ++ tmpmask = sched_domain_span(cpu_rq(task_cpu(p))->sd); ++ ++ if (rt_task(p)) ++ cpu = select_cpu_for_wakeup(0, tmpmask); ++ else ++ cpu = select_cpu_for_wakeup(1, tmpmask); ++ ++ return cpu; ++} ++ ++static void track_load_rt(struct rq *rq, struct task_struct *p) ++{ ++ unsigned long flag; ++ int firstbit; ++ struct rt_rq *first; ++ struct rt_prio_array *array = &rq->rt.active; ++ ++ first = list_entry(rt_rq_head.next, struct rt_rq, bld_rt_list); ++ firstbit = sched_find_first_bit(array->bitmap); ++ ++ /* Maintaining rt.lowbit */ ++ if (firstbit > 0 && firstbit <= rq->rt.lowbit) ++ rq->rt.lowbit = firstbit; ++ ++ if (rq->rt.lowbit < first->lowbit) { ++ write_lock_irqsave(&rt_list_lock, flag); ++ list_del(&rq->rt.bld_rt_list); ++ list_add_tail(&rq->rt.bld_rt_list, &rt_rq_head); ++ write_unlock_irqrestore(&rt_list_lock, flag); ++ } ++} ++ ++static int bld_get_cpu(struct task_struct *p, int sd_flags, int wake_flags) ++{ ++ unsigned int cpu; ++ ++ if (sd_flags == SD_BALANCE_WAKE || (sd_flags == SD_BALANCE_EXEC && (get_nr_threads(p) > 1))) ++ cpu = bld_pick_cpu_domain(p, sd_flags, wake_flags); ++ else { ++ if (rt_task(p)) ++ cpu = bld_pick_cpu_rt(p, sd_flags, wake_flags); ++ else ++ cpu = bld_pick_cpu_cfs(p, sd_flags, wake_flags); ++ } ++ ++ return cpu; ++} ++ ++static void bld_track_load_activate(struct rq *rq, struct task_struct *p) ++{ ++ unsigned long flag; ++ if (rt_task(p)) { ++ track_load_rt(rq, p); ++ } else { ++ if (rq->cfs.pos != 2) { ++ struct cfs_rq *last; ++ last = list_entry(cfs_rq_head.prev, struct cfs_rq, bld_cfs_list); ++ if (rq->cfs.load.weight >= last->load.weight) { ++ write_lock_irqsave(&cfs_list_lock, flag); ++ list_del(&rq->cfs.bld_cfs_list); ++ list_add_tail(&rq->cfs.bld_cfs_list, &cfs_rq_head); ++ rq->cfs.pos = 2; last->pos = 1; ++ write_unlock_irqrestore(&cfs_list_lock, flag); ++ } ++ } ++ } ++} ++ ++static void bld_track_load_deactivate(struct rq *rq, struct task_struct *p) ++{ ++ unsigned long flag; ++ if (rt_task(p)) { ++ track_load_rt(rq, p); ++ } else { ++ if (rq->cfs.pos != 0) { ++ struct cfs_rq *first; ++ first = list_entry(cfs_rq_head.next, struct cfs_rq, bld_cfs_list); ++ if (rq->cfs.load.weight <= first->load.weight) { ++ write_lock_irqsave(&cfs_list_lock, flag); ++ list_del(&rq->cfs.bld_cfs_list); ++ list_add(&rq->cfs.bld_cfs_list, &cfs_rq_head); ++ rq->cfs.pos = 0; first->pos = 1; ++ write_unlock_irqrestore(&cfs_list_lock, flag); ++ } ++ } ++ } ++} ++#else ++static inline void bld_track_load_activate(struct rq *rq, struct task_struct *p) ++{ ++} ++ ++static inline void bld_track_load_deactivate(struct rq *rq, struct task_struct *p) ++{ ++} ++#endif /* CONFIG_BLD */ +diff --git a/kernel/sched/core.c b/kernel/sched/core.c +index d1f7149..c3236de 100644 +--- a/kernel/sched/core.c ++++ b/kernel/sched/core.c +@@ -24,6 +24,8 @@ + * 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri + * 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins, + * Thomas Gleixner, Mike Kravetz ++ * 2012-Feb The Barbershop Load Distribution (BLD) algorithm - an alternate ++ * CPU load distribution technique for kernel scheduler by Rakib Mullick. + */ + + #include +@@ -86,6 +88,7 @@ + #include "sched.h" + #include "../workqueue_internal.h" + #include "../smpboot.h" ++#include "bld.h" + + #define CREATE_TRACE_POINTS + #include +@@ -713,6 +716,8 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) + if (!(flags & ENQUEUE_RESTORE)) + sched_info_queued(rq, p); + p->sched_class->enqueue_task(rq, p, flags); ++ if (!dl_task(p)) ++ bld_track_load_activate(rq, p); + } + + static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) +@@ -721,6 +726,8 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) + if (!(flags & DEQUEUE_SAVE)) + sched_info_dequeued(rq, p); + p->sched_class->dequeue_task(rq, p, flags); ++ if (!dl_task(p)) ++ bld_track_load_deactivate(rq, p); + } + + void activate_task(struct rq *rq, struct task_struct *p, int flags) +@@ -1515,8 +1522,16 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) + { + lockdep_assert_held(&p->pi_lock); + +- if (p->nr_cpus_allowed > 1) ++ if (p->nr_cpus_allowed > 1) { ++#ifndef CONFIG_BLD + cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); ++#else ++ if(dl_task(p)) ++ cpu = dl_sched_class.select_task_rq(p, cpu, sd_flags, wake_flags); ++ else ++ cpu = bld_get_cpu(p, sd_flags, wake_flags); ++#endif ++ } + + /* + * In order not to call set_task_cpu() on a blocking task we need +@@ -1706,7 +1721,11 @@ void scheduler_ipi(void) + */ + preempt_fold_need_resched(); + ++#ifndef CONFIG_BLD + if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) ++#else ++ if (llist_empty(&this_rq()->wake_list)) ++#endif + return; + + /* +@@ -1728,13 +1747,16 @@ void scheduler_ipi(void) + /* + * Check if someone kicked us for doing the nohz idle load balance. + */ ++#ifndef CONFIG_BLD + if (unlikely(got_nohz_idle_kick())) { + this_rq()->idle_balance = 1; + raise_softirq_irqoff(SCHED_SOFTIRQ); + } ++#endif + irq_exit(); + } + ++#ifndef CONFIG_BLD + static void ttwu_queue_remote(struct task_struct *p, int cpu) + { + struct rq *rq = cpu_rq(cpu); +@@ -1747,6 +1769,13 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu) + } + } + ++#endif ++ ++bool cpus_share_cache(int this_cpu, int that_cpu) ++{ ++ return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); ++} ++ + void wake_up_if_idle(int cpu) + { + struct rq *rq = cpu_rq(cpu); +@@ -1770,18 +1799,13 @@ void wake_up_if_idle(int cpu) + out: + rcu_read_unlock(); + } +- +-bool cpus_share_cache(int this_cpu, int that_cpu) +-{ +- return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu); +-} + #endif /* CONFIG_SMP */ + + static void ttwu_queue(struct task_struct *p, int cpu) + { + struct rq *rq = cpu_rq(cpu); + +-#if defined(CONFIG_SMP) ++#if defined(CONFIG_SMP) && !defined(CONFIG_BLD) + if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { + sched_clock_cpu(cpu); /* sync clocks x-cpu */ + ttwu_queue_remote(p, cpu); +@@ -2292,7 +2316,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) + * Silence PROVE_RCU. + */ + raw_spin_lock_irqsave(&p->pi_lock, flags); +- set_task_cpu(p, cpu); ++ __set_task_cpu(p, cpu); + raw_spin_unlock_irqrestore(&p->pi_lock, flags); + + #ifdef CONFIG_SCHED_INFO +@@ -2837,7 +2861,14 @@ void sched_exec(void) + int dest_cpu; + + raw_spin_lock_irqsave(&p->pi_lock, flags); ++#ifndef CONFIG_BLD + dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0); ++#else ++ if (dl_task(p)) ++ dest_cpu = task_cpu(p); ++ else ++ dest_cpu = bld_get_cpu(p, SD_BALANCE_EXEC, 0); ++#endif + if (dest_cpu == smp_processor_id()) + goto unlock; + +@@ -2926,8 +2957,10 @@ void scheduler_tick(void) + + #ifdef CONFIG_SMP + rq->idle_balance = idle_cpu(cpu); ++#ifndef CONFIG_BLD + trigger_load_balance(rq); + #endif ++#endif + rq_last_tick_reset(rq); + } + +@@ -7359,6 +7392,15 @@ void __init sched_init(void) + #endif + init_rq_hrtick(rq); + atomic_set(&rq->nr_iowait, 0); ++#ifdef CONFIG_BLD ++ INIT_LIST_HEAD(&rq->cfs.bld_cfs_list); ++ list_add_tail(&rq->cfs.bld_cfs_list, &cfs_rq_head); ++ rq->cfs.pos = 0; ++ ++ INIT_LIST_HEAD(&rq->rt.bld_rt_list); ++ list_add_tail(&rq->rt.bld_rt_list, &rt_rq_head); ++ rq->rt.lowbit = INT_MAX; ++#endif + } + + set_load_weight(&init_task); +@@ -7399,6 +7441,9 @@ void __init sched_init(void) + init_sched_fair_class(); + + scheduler_running = 1; ++#ifdef CONFIG_BLD ++ printk(KERN_INFO "BLD: An Alternate CPU load distributor activated.\n"); ++#endif + } + + #ifdef CONFIG_DEBUG_ATOMIC_SLEEP +diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c +index e7dd0ec..555572f 100644 +--- a/kernel/sched/fair.c ++++ b/kernel/sched/fair.c +@@ -4746,6 +4746,7 @@ static void task_waking_fair(struct task_struct *p) + record_wakee(p); + } + ++#ifndef CONFIG_BLD + #ifdef CONFIG_FAIR_GROUP_SCHED + /* + * effective_load() calculates the load change as seen from the root_task_group +@@ -5248,6 +5249,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f + + return new_cpu; + } ++#endif /* CONFIG_BLD */ + + /* + * Called immediately before a task is migrated to a new cpu; task_cpu(p) and +@@ -5552,6 +5554,7 @@ idle: + * further scheduler activity on it and we're being very careful to + * re-start the picking loop. + */ ++#ifndef CONFIG_BLD + lockdep_unpin_lock(&rq->lock); + new_tasks = idle_balance(rq); + lockdep_pin_lock(&rq->lock); +@@ -5565,7 +5568,7 @@ idle: + + if (new_tasks > 0) + goto again; +- ++#endif + return NULL; + } + +@@ -6226,8 +6229,9 @@ static unsigned long task_h_load(struct task_struct *p) + } + #endif + +-/********** Helpers for find_busiest_group ************************/ ++#ifndef CONFIG_BLD + ++/********** Helpers for find_busiest_group ************************/ + enum group_type { + group_other = 0, + group_imbalanced, +@@ -6318,6 +6322,7 @@ static inline int get_sd_load_idx(struct sched_domain *sd, + + return load_idx; + } ++#endif /* CONFIG_BLD */ + + static unsigned long scale_rt_capacity(int cpu) + { +@@ -6426,6 +6431,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu) + sdg->sgc->capacity = capacity; + } + ++#ifndef CONFIG_BLD + /* + * Check whether the capacity of the rq has been noticeably reduced by side + * activity. The imbalance_pct is used for the threshold. +@@ -7659,6 +7665,8 @@ static inline int on_null_domain(struct rq *rq) + return unlikely(!rcu_dereference_sched(rq->sd)); + } + ++#endif /* CONFIG_BLD */ ++ + #ifdef CONFIG_NO_HZ_COMMON + /* + * idle load balancing details +@@ -7666,12 +7674,39 @@ static inline int on_null_domain(struct rq *rq) + * needed, they will kick the idle load balancer, which then does idle + * load balancing for all the idle CPUs. + */ ++#ifndef CONFIG_BLD + static struct { + cpumask_var_t idle_cpus_mask; + atomic_t nr_cpus; + unsigned long next_balance; /* in jiffy units */ + } nohz ____cacheline_aligned; + ++static inline void nohz_balance_exit_idle(int cpu) ++{ ++ if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { ++ /* ++ * Completely isolated CPUs don't ever set, so we must test. ++ */ ++ if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) { ++ cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); ++ atomic_dec(&nohz.nr_cpus); ++ } ++ clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); ++ } ++} ++ ++static int sched_ilb_notifier(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ switch (action & ~CPU_TASKS_FROZEN) { ++ case CPU_DYING: ++ nohz_balance_exit_idle(smp_processor_id()); ++ return NOTIFY_OK; ++ default: ++ return NOTIFY_DONE; ++ } ++} ++ + static inline int find_new_ilb(void) + { + int ilb = cpumask_first(nohz.idle_cpus_mask); +@@ -7709,20 +7744,7 @@ static void nohz_balancer_kick(void) + smp_send_reschedule(ilb_cpu); + return; + } +- +-static inline void nohz_balance_exit_idle(int cpu) +-{ +- if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) { +- /* +- * Completely isolated CPUs don't ever set, so we must test. +- */ +- if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) { +- cpumask_clear_cpu(cpu, nohz.idle_cpus_mask); +- atomic_dec(&nohz.nr_cpus); +- } +- clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); +- } +-} ++#endif /* CONFIG_BLD */ + + static inline void set_cpu_sd_state_busy(void) + { +@@ -7764,6 +7786,7 @@ unlock: + */ + void nohz_balance_enter_idle(int cpu) + { ++#ifndef CONFIG_BLD + /* + * If this cpu is going down, then nothing needs to be done. + */ +@@ -7782,23 +7805,10 @@ void nohz_balance_enter_idle(int cpu) + cpumask_set_cpu(cpu, nohz.idle_cpus_mask); + atomic_inc(&nohz.nr_cpus); + set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); +-} +- +-static int sched_ilb_notifier(struct notifier_block *nfb, +- unsigned long action, void *hcpu) +-{ +- switch (action & ~CPU_TASKS_FROZEN) { +- case CPU_DYING: +- nohz_balance_exit_idle(smp_processor_id()); +- return NOTIFY_OK; +- default: +- return NOTIFY_DONE; +- } ++#endif + } + #endif + +-static DEFINE_SPINLOCK(balancing); +- + /* + * Scale the max load_balance interval with the number of CPUs in the system. + * This trades load-balance latency on larger machines for less cross talk. +@@ -7808,6 +7818,9 @@ void update_max_interval(void) + max_load_balance_interval = HZ*num_online_cpus()/10; + } + ++#ifndef CONFIG_BLD ++static DEFINE_SPINLOCK(balancing); ++ + /* + * It checks each scheduling domain to see if it is due to be balanced, + * and initiates a balancing operation if so. +@@ -8095,6 +8108,7 @@ void trigger_load_balance(struct rq *rq) + nohz_balancer_kick(); + #endif + } ++#endif /* CONFIG_BLD */ + + static void rq_online_fair(struct rq *rq) + { +@@ -8531,7 +8545,9 @@ const struct sched_class fair_sched_class = { + .put_prev_task = put_prev_task_fair, + + #ifdef CONFIG_SMP ++#ifndef CONFIG_BLD + .select_task_rq = select_task_rq_fair, ++#endif + .migrate_task_rq = migrate_task_rq_fair, + + .rq_online = rq_online_fair, +@@ -8593,6 +8609,7 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m) + + __init void init_sched_fair_class(void) + { ++#ifndef CONFIG_BLD + #ifdef CONFIG_SMP + open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); + +@@ -8602,5 +8619,5 @@ __init void init_sched_fair_class(void) + cpu_notifier(sched_ilb_notifier, 0); + #endif + #endif /* SMP */ +- ++#endif /* BLD */ + } +diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c +index ec4f538d..4462bed 100644 +--- a/kernel/sched/rt.c ++++ b/kernel/sched/rt.c +@@ -1375,6 +1375,7 @@ static void yield_task_rt(struct rq *rq) + #ifdef CONFIG_SMP + static int find_lowest_rq(struct task_struct *task); + ++#ifndef CONFIG_BLD + static int + select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) + { +@@ -1430,6 +1431,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags) + out: + return cpu; + } ++#endif /* CONFIG_BLD */ + + static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) + { +@@ -2335,7 +2337,9 @@ const struct sched_class rt_sched_class = { + .put_prev_task = put_prev_task_rt, + + #ifdef CONFIG_SMP ++#ifndef CONFIG_BLD + .select_task_rq = select_task_rq_rt, ++#endif + + .set_cpus_allowed = set_cpus_allowed_common, + .rq_online = rq_online_rt, +diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h +index ec2e8d2..aaab735 100644 +--- a/kernel/sched/sched.h ++++ b/kernel/sched/sched.h +@@ -408,9 +408,8 @@ struct cfs_rq { + #endif /* CONFIG_FAIR_GROUP_SCHED */ + #endif /* CONFIG_SMP */ + +-#ifdef CONFIG_FAIR_GROUP_SCHED + struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */ +- ++#ifdef CONFIG_FAIR_GROUP_SCHED + /* + * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in + * a hierarchy). Non-leaf lrqs hold other higher schedulable entities +@@ -434,6 +433,11 @@ struct cfs_rq { + struct list_head throttled_list; + #endif /* CONFIG_CFS_BANDWIDTH */ + #endif /* CONFIG_FAIR_GROUP_SCHED */ ++ ++#ifdef CONFIG_BLD ++ struct list_head bld_cfs_list; ++ char pos; ++#endif + }; + + static inline int rt_bandwidth_enabled(void) +@@ -479,12 +483,16 @@ struct rt_rq { + /* Nests inside the rq lock: */ + raw_spinlock_t rt_runtime_lock; + ++ struct rq *rq; + #ifdef CONFIG_RT_GROUP_SCHED + unsigned long rt_nr_boosted; + +- struct rq *rq; + struct task_group *tg; + #endif ++#ifdef CONFIG_BLD ++ struct list_head bld_rt_list; ++ int lowbit; ++#endif + }; + + /* Deadline class' related fields in a runqueue */ diff --git a/patches/BLD-4.7.patch b/patches_disabled/BLD-4.8.patch similarity index 87% rename from patches/BLD-4.7.patch rename to patches_disabled/BLD-4.8.patch index e252c45..354d96e 100644 --- a/patches/BLD-4.7.patch +++ b/patches_disabled/BLD-4.8.patch @@ -1,9 +1,5 @@ - BLD changes for Linux kernel version 4.7 - ---- - diff --git a/init/Kconfig b/init/Kconfig -index c02d897..edf8697 100644 +index cac3f09..4e49d16 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -36,6 +36,15 @@ config BROKEN_ON_SMP @@ -244,7 +240,7 @@ index 0000000..f1f9fba +} +#endif /* CONFIG_BLD */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c -index 97ee9ac..b2ddabc 100644 +index 44817c6..f0f3321 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -24,6 +24,8 @@ @@ -256,7 +252,7 @@ index 97ee9ac..b2ddabc 100644 */ #include -@@ -86,6 +88,7 @@ +@@ -87,6 +89,7 @@ #include "sched.h" #include "../workqueue_internal.h" #include "../smpboot.h" @@ -264,7 +260,7 @@ index 97ee9ac..b2ddabc 100644 #define CREATE_TRACE_POINTS #include -@@ -750,6 +753,8 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) +@@ -751,6 +754,8 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) if (!(flags & ENQUEUE_RESTORE)) sched_info_queued(rq, p); p->sched_class->enqueue_task(rq, p, flags); @@ -273,7 +269,7 @@ index 97ee9ac..b2ddabc 100644 } static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) -@@ -758,6 +763,8 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) +@@ -759,6 +764,8 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) if (!(flags & DEQUEUE_SAVE)) sched_info_dequeued(rq, p); p->sched_class->dequeue_task(rq, p, flags); @@ -282,7 +278,7 @@ index 97ee9ac..b2ddabc 100644 } void activate_task(struct rq *rq, struct task_struct *p, int flags) -@@ -1587,11 +1594,17 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) +@@ -1588,11 +1595,17 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) { lockdep_assert_held(&p->pi_lock); @@ -301,7 +297,7 @@ index 97ee9ac..b2ddabc 100644 /* * In order not to call set_task_cpu() on a blocking task we need * to rely on ttwu() to place the task on a valid ->cpus_allowed -@@ -1794,7 +1807,11 @@ void scheduler_ipi(void) +@@ -1795,7 +1808,11 @@ void scheduler_ipi(void) */ preempt_fold_need_resched(); @@ -313,7 +309,7 @@ index 97ee9ac..b2ddabc 100644 return; /* -@@ -1816,13 +1833,16 @@ void scheduler_ipi(void) +@@ -1817,13 +1834,16 @@ void scheduler_ipi(void) /* * Check if someone kicked us for doing the nohz idle load balance. */ @@ -330,7 +326,7 @@ index 97ee9ac..b2ddabc 100644 static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags) { struct rq *rq = cpu_rq(cpu); -@@ -1836,6 +1856,7 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags) +@@ -1837,6 +1857,7 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags) trace_sched_wake_idle_without_ipi(cpu); } } @@ -338,7 +334,7 @@ index 97ee9ac..b2ddabc 100644 void wake_up_if_idle(int cpu) { -@@ -1872,7 +1893,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) +@@ -1873,7 +1894,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) struct rq *rq = cpu_rq(cpu); struct pin_cookie cookie; @@ -347,16 +343,7 @@ index 97ee9ac..b2ddabc 100644 if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { sched_clock_cpu(cpu); /* sync clocks x-cpu */ ttwu_queue_remote(p, cpu, wake_flags); -@@ -2394,7 +2415,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) - * Silence PROVE_RCU. - */ - raw_spin_lock_irqsave(&p->pi_lock, flags); -- set_task_cpu(p, cpu); -+ __set_task_cpu(p, cpu); - raw_spin_unlock_irqrestore(&p->pi_lock, flags); - - #ifdef CONFIG_SCHED_INFO -@@ -2941,7 +2962,14 @@ void sched_exec(void) +@@ -2971,7 +2992,14 @@ void sched_exec(void) int dest_cpu; raw_spin_lock_irqsave(&p->pi_lock, flags); @@ -371,7 +358,7 @@ index 97ee9ac..b2ddabc 100644 if (dest_cpu == smp_processor_id()) goto unlock; -@@ -3030,8 +3058,10 @@ void scheduler_tick(void) +@@ -3078,8 +3106,10 @@ void scheduler_tick(void) #ifdef CONFIG_SMP rq->idle_balance = idle_cpu(cpu); @@ -382,7 +369,7 @@ index 97ee9ac..b2ddabc 100644 rq_last_tick_reset(rq); } -@@ -7262,7 +7292,9 @@ int sched_cpu_dying(unsigned int cpu) +@@ -7313,7 +7343,9 @@ int sched_cpu_dying(unsigned int cpu) raw_spin_unlock_irqrestore(&rq->lock, flags); calc_load_migrate(rq); update_max_interval(); @@ -392,7 +379,7 @@ index 97ee9ac..b2ddabc 100644 hrtick_clear(rq); return 0; } -@@ -7468,6 +7500,15 @@ void __init sched_init(void) +@@ -7519,6 +7551,15 @@ void __init sched_init(void) #endif /* CONFIG_SMP */ init_rq_hrtick(rq); atomic_set(&rq->nr_iowait, 0); @@ -408,7 +395,7 @@ index 97ee9ac..b2ddabc 100644 } set_load_weight(&init_task); -@@ -7510,6 +7551,9 @@ void __init sched_init(void) +@@ -7561,6 +7602,9 @@ void __init sched_init(void) init_schedstats(); scheduler_running = 1; @@ -419,10 +406,10 @@ index 97ee9ac..b2ddabc 100644 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c -index c8c5d2d..5b694b3 100644 +index 039de34..f823e5b 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c -@@ -4880,6 +4880,7 @@ static unsigned long cpu_avg_load_per_task(int cpu) +@@ -4924,6 +4924,7 @@ static unsigned long cpu_avg_load_per_task(int cpu) return 0; } @@ -430,7 +417,7 @@ index c8c5d2d..5b694b3 100644 #ifdef CONFIG_FAIR_GROUP_SCHED /* * effective_load() calculates the load change as seen from the root_task_group -@@ -5411,6 +5412,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f +@@ -5455,6 +5456,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f return new_cpu; } @@ -438,7 +425,7 @@ index c8c5d2d..5b694b3 100644 /* * Called immediately before a task is migrated to a new cpu; task_cpu(p) and -@@ -5741,6 +5743,7 @@ idle: +@@ -5785,6 +5787,7 @@ idle: * further scheduler activity on it and we're being very careful to * re-start the picking loop. */ @@ -446,7 +433,7 @@ index c8c5d2d..5b694b3 100644 lockdep_unpin_lock(&rq->lock, cookie); new_tasks = idle_balance(rq); lockdep_repin_lock(&rq->lock, cookie); -@@ -5754,7 +5757,7 @@ idle: +@@ -5798,7 +5801,7 @@ idle: if (new_tasks > 0) goto again; @@ -455,7 +442,7 @@ index c8c5d2d..5b694b3 100644 return NULL; } -@@ -6415,8 +6418,9 @@ static unsigned long task_h_load(struct task_struct *p) +@@ -6459,8 +6462,9 @@ static unsigned long task_h_load(struct task_struct *p) } #endif @@ -466,7 +453,7 @@ index c8c5d2d..5b694b3 100644 enum group_type { group_other = 0, group_imbalanced, -@@ -6507,6 +6511,7 @@ static inline int get_sd_load_idx(struct sched_domain *sd, +@@ -6551,6 +6555,7 @@ static inline int get_sd_load_idx(struct sched_domain *sd, return load_idx; } @@ -474,7 +461,7 @@ index c8c5d2d..5b694b3 100644 static unsigned long scale_rt_capacity(int cpu) { -@@ -6615,6 +6620,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu) +@@ -6659,6 +6664,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu) sdg->sgc->capacity = capacity; } @@ -482,7 +469,7 @@ index c8c5d2d..5b694b3 100644 /* * Check whether the capacity of the rq has been noticeably reduced by side * activity. The imbalance_pct is used for the threshold. -@@ -7848,6 +7854,7 @@ static inline int on_null_domain(struct rq *rq) +@@ -7892,6 +7898,7 @@ static inline int on_null_domain(struct rq *rq) { return unlikely(!rcu_dereference_sched(rq->sd)); } @@ -490,7 +477,7 @@ index c8c5d2d..5b694b3 100644 #ifdef CONFIG_NO_HZ_COMMON /* -@@ -7856,12 +7863,39 @@ static inline int on_null_domain(struct rq *rq) +@@ -7900,12 +7907,39 @@ static inline int on_null_domain(struct rq *rq) * needed, they will kick the idle load balancer, which then does idle * load balancing for all the idle CPUs. */ @@ -530,7 +517,7 @@ index c8c5d2d..5b694b3 100644 static inline int find_new_ilb(void) { int ilb = cpumask_first(nohz.idle_cpus_mask); -@@ -7900,20 +7934,6 @@ static void nohz_balancer_kick(void) +@@ -7944,20 +7978,6 @@ static void nohz_balancer_kick(void) return; } @@ -551,7 +538,7 @@ index c8c5d2d..5b694b3 100644 static inline void set_cpu_sd_state_busy(void) { struct sched_domain *sd; -@@ -7930,6 +7950,8 @@ static inline void set_cpu_sd_state_busy(void) +@@ -7974,6 +7994,8 @@ static inline void set_cpu_sd_state_busy(void) unlock: rcu_read_unlock(); } @@ -560,7 +547,7 @@ index c8c5d2d..5b694b3 100644 void set_cpu_sd_state_idle(void) { -@@ -7954,6 +7976,7 @@ unlock: +@@ -7998,6 +8020,7 @@ unlock: */ void nohz_balance_enter_idle(int cpu) { @@ -568,7 +555,7 @@ index c8c5d2d..5b694b3 100644 /* * If this cpu is going down, then nothing needs to be done. */ -@@ -7972,10 +7995,8 @@ void nohz_balance_enter_idle(int cpu) +@@ -8016,10 +8039,8 @@ void nohz_balance_enter_idle(int cpu) cpumask_set_cpu(cpu, nohz.idle_cpus_mask); atomic_inc(&nohz.nr_cpus); set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); @@ -580,7 +567,7 @@ index c8c5d2d..5b694b3 100644 /* * Scale the max load_balance interval with the number of CPUs in the system. -@@ -7986,6 +8007,9 @@ void update_max_interval(void) +@@ -8030,6 +8051,9 @@ void update_max_interval(void) max_load_balance_interval = HZ*num_online_cpus()/10; } @@ -590,7 +577,7 @@ index c8c5d2d..5b694b3 100644 /* * It checks each scheduling domain to see if it is due to be balanced, * and initiates a balancing operation if so. -@@ -8273,6 +8297,7 @@ void trigger_load_balance(struct rq *rq) +@@ -8317,6 +8341,7 @@ void trigger_load_balance(struct rq *rq) nohz_balancer_kick(); #endif } @@ -598,7 +585,7 @@ index c8c5d2d..5b694b3 100644 static void rq_online_fair(struct rq *rq) { -@@ -8288,7 +8313,6 @@ static void rq_offline_fair(struct rq *rq) +@@ -8332,7 +8357,6 @@ static void rq_offline_fair(struct rq *rq) /* Ensure any throttled groups are reachable by pick_next_task */ unthrottle_offline_cfs_rqs(rq); } @@ -606,7 +593,7 @@ index c8c5d2d..5b694b3 100644 #endif /* CONFIG_SMP */ /* -@@ -8716,7 +8740,9 @@ const struct sched_class fair_sched_class = { +@@ -8791,7 +8815,9 @@ const struct sched_class fair_sched_class = { .put_prev_task = put_prev_task_fair, #ifdef CONFIG_SMP @@ -616,7 +603,7 @@ index c8c5d2d..5b694b3 100644 .migrate_task_rq = migrate_task_rq_fair, .rq_online = rq_online_fair, -@@ -8777,6 +8803,7 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m) +@@ -8852,6 +8878,7 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m) __init void init_sched_fair_class(void) { @@ -624,7 +611,7 @@ index c8c5d2d..5b694b3 100644 #ifdef CONFIG_SMP open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); -@@ -8785,5 +8812,5 @@ __init void init_sched_fair_class(void) +@@ -8860,5 +8887,5 @@ __init void init_sched_fair_class(void) zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); #endif #endif /* SMP */ @@ -662,10 +649,10 @@ index d5690b7..6f3589e 100644 .set_cpus_allowed = set_cpus_allowed_common, .rq_online = rq_online_rt, diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h -index 898c0d2..720d524 100644 +index c64fc51..a1d329b 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h -@@ -415,9 +415,8 @@ struct cfs_rq { +@@ -416,9 +416,8 @@ struct cfs_rq { #endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_SMP */ @@ -676,7 +663,7 @@ index 898c0d2..720d524 100644 /* * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in * a hierarchy). Non-leaf lrqs hold other higher schedulable entities -@@ -441,6 +440,11 @@ struct cfs_rq { +@@ -442,6 +441,11 @@ struct cfs_rq { struct list_head throttled_list; #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ @@ -688,7 +675,7 @@ index 898c0d2..720d524 100644 }; static inline int rt_bandwidth_enabled(void) -@@ -486,12 +490,16 @@ struct rt_rq { +@@ -487,12 +491,16 @@ struct rt_rq { /* Nests inside the rq lock: */ raw_spinlock_t rt_runtime_lock; diff --git a/patches/uksm-0.1.2.5-for-v4.7.patch b/patches_disabled/uksm-0.1.2.5-for-v4.8.patch similarity index 97% rename from patches/uksm-0.1.2.5-for-v4.7.patch rename to patches_disabled/uksm-0.1.2.5-for-v4.8.patch index 4ba4cbb..d308f87 100644 --- a/patches/uksm-0.1.2.5-for-v4.7.patch +++ b/patches_disabled/uksm-0.1.2.5-for-v4.8.patch @@ -78,7 +78,7 @@ index 0000000..8fce86f +2015-04-22 UKSM 0.1.2.4 Fix a race condition that can sometimes trigger anonying warnings. +2016-09-10 UKSM 0.1.2.5 Fix a bug in dedup ratio calculation. diff --git a/fs/exec.c b/fs/exec.c -index 887c1c9..2bee16e 100644 +index 6fcfb3f..ef87e0f 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -19,7 +19,7 @@ @@ -98,7 +98,7 @@ index 887c1c9..2bee16e 100644 #include #include -@@ -1273,6 +1274,7 @@ void setup_new_exec(struct linux_binprm * bprm) +@@ -1309,6 +1310,7 @@ void setup_new_exec(struct linux_binprm * bprm) /* An exec changes our domain. We are no longer part of the thread group */ current->self_exec_id++; @@ -107,7 +107,7 @@ index 887c1c9..2bee16e 100644 do_close_on_exec(current->files); } diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c -index 8372046..82aa2f4 100644 +index b9a8c81..9765269 100644 --- a/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c @@ -89,6 +89,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) @@ -120,9 +120,9 @@ index 8372046..82aa2f4 100644 #ifdef CONFIG_QUICKLIST "Quicklists: %8lu kB\n" #endif -@@ -147,6 +150,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) +@@ -149,6 +152,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) K(global_page_state(NR_SLAB_UNRECLAIMABLE)), - global_page_state(NR_KERNEL_STACK) * THREAD_SIZE / 1024, + global_page_state(NR_KERNEL_STACK_KB), K(global_page_state(NR_PAGETABLE)), +#ifdef CONFIG_UKSM + K(global_page_state(NR_UKSM_ZERO_PAGES)), @@ -171,7 +171,7 @@ index d4458b6..172ceb9 100644 static inline unsigned long my_zero_pfn(unsigned long addr) diff --git a/include/linux/ksm.h b/include/linux/ksm.h -index 7ae216a..06861d8 100644 +index 481c8c4..5329b23 100644 --- a/include/linux/ksm.h +++ b/include/linux/ksm.h @@ -19,21 +19,6 @@ struct mem_cgroup; @@ -196,7 +196,7 @@ index 7ae216a..06861d8 100644 static inline struct stable_node *page_stable_node(struct page *page) { -@@ -64,6 +49,33 @@ struct page *ksm_might_need_to_copy(struct page *page, +@@ -63,6 +48,33 @@ struct page *ksm_might_need_to_copy(struct page *page, int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); void ksm_migrate_page(struct page *newpage, struct page *oldpage); @@ -230,7 +230,7 @@ index 7ae216a..06861d8 100644 #else /* !CONFIG_KSM */ static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) -@@ -106,4 +118,6 @@ static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) +@@ -105,4 +117,6 @@ static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) #endif /* CONFIG_MMU */ #endif /* !CONFIG_KSM */ @@ -238,10 +238,10 @@ index 7ae216a..06861d8 100644 + #endif /* __LINUX_KSM_H */ diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h -index ca3e517..ae62e7d1 100644 +index 903200f..6c7d900 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h -@@ -357,6 +357,9 @@ struct vm_area_struct { +@@ -358,6 +358,9 @@ struct vm_area_struct { struct mempolicy *vm_policy; /* NUMA policy for the VMA */ #endif struct vm_userfaultfd_ctx vm_userfaultfd_ctx; @@ -252,20 +252,20 @@ index ca3e517..ae62e7d1 100644 struct core_thread { diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h -index 02069c2..f7cce50 100644 +index 7f2ae99..89f7dd8 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h -@@ -153,6 +153,9 @@ enum zone_stat_item { - WORKINGSET_NODERECLAIM, - NR_ANON_TRANSPARENT_HUGEPAGES, +@@ -138,6 +138,9 @@ enum zone_stat_item { + NUMA_OTHER, /* allocation from other node */ + #endif NR_FREE_CMA_PAGES, +#ifdef CONFIG_UKSM + NR_UKSM_ZERO_PAGES, +#endif NR_VM_ZONE_STAT_ITEMS }; - /* -@@ -817,7 +820,7 @@ static inline int is_highmem_idx(enum zone_type idx) + enum node_stat_item { +@@ -869,7 +872,7 @@ static inline int is_highmem_idx(enum zone_type idx) } /** @@ -513,10 +513,10 @@ index 0000000..825f05e +#endif /* !CONFIG_UKSM */ +#endif /* __LINUX_UKSM_H */ diff --git a/kernel/fork.c b/kernel/fork.c -index aea4f4d..f93e114 100644 +index beb3172..569893a 100644 --- a/kernel/fork.c +++ b/kernel/fork.c -@@ -459,7 +459,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) +@@ -457,7 +457,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) goto fail_nomem; charge = len; } @@ -525,7 +525,7 @@ index aea4f4d..f93e114 100644 if (!tmp) goto fail_nomem; *tmp = *mpnt; -@@ -512,7 +512,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) +@@ -510,7 +510,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) __vma_link_rb(mm, tmp, rb_link, rb_parent); rb_link = &tmp->vm_rb.rb_right; rb_parent = &tmp->vm_rb; @@ -535,17 +535,17 @@ index aea4f4d..f93e114 100644 retval = copy_page_range(mm, oldmm, mpnt); diff --git a/lib/Makefile b/lib/Makefile -index ff6a7a6..ac0bb55 100644 +index 5dc77a8..b63a823 100644 --- a/lib/Makefile +++ b/lib/Makefile -@@ -20,7 +20,7 @@ KCOV_INSTRUMENT_dynamic_debug.o := n - KCOV_INSTRUMENT_hweight.o := n +@@ -17,7 +17,7 @@ KCOV_INSTRUMENT_debugobjects.o := n + KCOV_INSTRUMENT_dynamic_debug.o := n lib-y := ctype.o string.o vsprintf.o cmdline.o \ - rbtree.o radix-tree.o dump_stack.o timerqueue.o\ + rbtree.o radix-tree.o sradix-tree.o dump_stack.o timerqueue.o\ idr.o int_sqrt.o extable.o \ - sha1.o md5.o irq_regs.o argv_split.o \ + sha1.o chacha20.o md5.o irq_regs.o argv_split.o \ flex_proportions.o ratelimit.o show_mem.o \ diff --git a/lib/sradix-tree.c b/lib/sradix-tree.c new file mode 100644 @@ -1030,10 +1030,10 @@ index 0000000..8d06329 + return 0; +} diff --git a/mm/Kconfig b/mm/Kconfig -index 3e2daef..165b60e 100644 +index be0ee11..64fd3bc 100644 --- a/mm/Kconfig +++ b/mm/Kconfig -@@ -332,6 +332,32 @@ config KSM +@@ -340,6 +340,32 @@ config KSM See Documentation/vm/ksm.txt for more information: KSM is inactive until a program has madvised that an area is MADV_MERGEABLE, and root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). @@ -1067,10 +1067,10 @@ index 3e2daef..165b60e 100644 config DEFAULT_MMAP_MIN_ADDR int "Low address space to protect from user allocation" diff --git a/mm/Makefile b/mm/Makefile -index 78c6f7d..7e7cd8a 100644 +index 2ca1faf..980c8dd 100644 --- a/mm/Makefile +++ b/mm/Makefile -@@ -63,7 +63,8 @@ obj-$(CONFIG_SPARSEMEM) += sparse.o +@@ -66,7 +66,8 @@ obj-$(CONFIG_SPARSEMEM) += sparse.o obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o obj-$(CONFIG_SLOB) += slob.o obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o @@ -1081,10 +1081,10 @@ index 78c6f7d..7e7cd8a 100644 obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_SLUB) += slub.o diff --git a/mm/memory.c b/mm/memory.c -index 9e04681..02200d3 100644 +index 793fe0f..0464507 100644 --- a/mm/memory.c +++ b/mm/memory.c -@@ -124,6 +124,28 @@ unsigned long highest_memmap_pfn __read_mostly; +@@ -124,6 +124,25 @@ unsigned long highest_memmap_pfn __read_mostly; EXPORT_SYMBOL(zero_pfn); @@ -1095,14 +1095,11 @@ index 9e04681..02200d3 100644 + +static int __init setup_uksm_zero_page(void) +{ -+ unsigned long addr; -+ addr = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 0); -+ if (!addr) ++ empty_uksm_zero_page = alloc_pages(__GFP_ZERO & ~__GFP_MOVABLE, 0); ++ if (!empty_uksm_zero_page) + panic("Oh boy, that early out of memory?"); + -+ empty_uksm_zero_page = virt_to_page((void *) addr); + SetPageReserved(empty_uksm_zero_page); -+ + uksm_zero_pfn = page_to_pfn(empty_uksm_zero_page); + + return 0; @@ -1113,7 +1110,7 @@ index 9e04681..02200d3 100644 /* * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() */ -@@ -135,6 +157,7 @@ static int __init init_zero_pfn(void) +@@ -135,6 +154,7 @@ static int __init init_zero_pfn(void) core_initcall(init_zero_pfn); @@ -1121,7 +1118,7 @@ index 9e04681..02200d3 100644 #if defined(SPLIT_RSS_COUNTING) void sync_mm_rss(struct mm_struct *mm) -@@ -905,6 +928,11 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, +@@ -914,6 +934,11 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, get_page(page); page_dup_rmap(page, false); rss[mm_counter(page)]++; @@ -1133,7 +1130,7 @@ index 9e04681..02200d3 100644 } out_set_pte: -@@ -1138,8 +1166,10 @@ again: +@@ -1148,8 +1173,10 @@ again: ptent = ptep_get_and_clear_full(mm, addr, pte, tlb->fullmm); tlb_remove_tlb_entry(tlb, pte, addr); @@ -1145,7 +1142,7 @@ index 9e04681..02200d3 100644 if (!PageAnon(page)) { if (pte_dirty(ptent)) { -@@ -1995,8 +2025,10 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo +@@ -2010,8 +2037,10 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo clear_page(kaddr); kunmap_atomic(kaddr); flush_dcache_page(dst); @@ -1157,15 +1154,15 @@ index 9e04681..02200d3 100644 } static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) -@@ -2141,6 +2173,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, - new_page = alloc_zeroed_user_highpage_movable(vma, address); +@@ -2154,6 +2183,7 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte, + new_page = alloc_zeroed_user_highpage_movable(vma, fe->address); if (!new_page) goto oom; + uksm_cow_pte(vma, orig_pte); } else { - new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); - if (!new_page) -@@ -2166,7 +2199,9 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, + new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, + fe->address); +@@ -2180,7 +2210,9 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte, mm_counter_file(old_page)); inc_mm_counter_fast(mm, MM_ANONPAGES); } @@ -1174,12 +1171,12 @@ index 9e04681..02200d3 100644 + uksm_unmap_zero_page(orig_pte); inc_mm_counter_fast(mm, MM_ANONPAGES); } - flush_cache_page(vma, address, pte_pfn(orig_pte)); + flush_cache_page(vma, fe->address, pte_pfn(orig_pte)); diff --git a/mm/mmap.c b/mm/mmap.c -index de2c176..ce60715 100644 +index ca9d91b..cf565b7 100644 --- a/mm/mmap.c +++ b/mm/mmap.c -@@ -43,6 +43,7 @@ +@@ -44,6 +44,7 @@ #include #include #include @@ -1187,7 +1184,7 @@ index de2c176..ce60715 100644 #include #include -@@ -164,6 +165,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) +@@ -165,6 +166,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) if (vma->vm_file) fput(vma->vm_file); mpol_put(vma_policy(vma)); @@ -1206,13 +1203,13 @@ index de2c176..ce60715 100644 + uksm_remove_vma(vma); + if (next && !insert) { - struct vm_area_struct *exporter = NULL; + struct vm_area_struct *exporter = NULL, *importer = NULL; + uksm_remove_vma(next); if (end >= next->vm_end) { /* * vma expands, overlapping all the next, and -@@ -725,6 +734,7 @@ again: remove_next = 1 + (end > next->vm_end); +@@ -733,6 +742,7 @@ again: end_changed = true; } vma->vm_pgoff = pgoff; @@ -1220,14 +1217,13 @@ index de2c176..ce60715 100644 if (adjust_next) { next->vm_start += adjust_next << PAGE_SHIFT; next->vm_pgoff += adjust_next; -@@ -795,16 +805,22 @@ again: remove_next = 1 + (end > next->vm_end); - * up the code too much to do both in one go. - */ - next = vma->vm_next; -- if (remove_next == 2) -+ if (remove_next == 2) { +@@ -806,16 +816,21 @@ again: + if (remove_next == 2) { + remove_next = 1; + end = next->vm_end; + uksm_remove_vma(next); goto again; +- } - else if (next) + } else if (next) { vma_gap_update(next); @@ -1246,7 +1242,7 @@ index de2c176..ce60715 100644 validate_mm(mm); return 0; -@@ -1196,6 +1212,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr, +@@ -1207,6 +1222,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr, vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; @@ -1256,7 +1252,7 @@ index de2c176..ce60715 100644 if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM; -@@ -1534,6 +1553,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, +@@ -1545,6 +1563,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, allow_write_access(file); } file = vma->vm_file; @@ -1264,7 +1260,7 @@ index de2c176..ce60715 100644 out: perf_event_mmap(vma); -@@ -1575,6 +1595,7 @@ allow_write_and_free_vma: +@@ -1586,6 +1605,7 @@ allow_write_and_free_vma: if (vm_flags & VM_DENYWRITE) allow_write_access(file); free_vma: @@ -1272,7 +1268,7 @@ index de2c176..ce60715 100644 kmem_cache_free(vm_area_cachep, vma); unacct_error: if (charged) -@@ -2369,6 +2390,8 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, +@@ -2391,6 +2411,8 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, else err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); @@ -1281,7 +1277,7 @@ index de2c176..ce60715 100644 /* Success. */ if (!err) return 0; -@@ -2639,6 +2662,7 @@ static int do_brk(unsigned long addr, unsigned long len) +@@ -2669,6 +2691,7 @@ static int do_brk(unsigned long addr, unsigned long request) return 0; flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; @@ -1289,7 +1285,7 @@ index de2c176..ce60715 100644 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); if (offset_in_page(error)) -@@ -2696,6 +2720,7 @@ static int do_brk(unsigned long addr, unsigned long len) +@@ -2726,6 +2749,7 @@ static int do_brk(unsigned long addr, unsigned long request) vma->vm_flags = flags; vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); @@ -1297,7 +1293,7 @@ index de2c176..ce60715 100644 out: perf_event_mmap(vma); mm->total_vm += len >> PAGE_SHIFT; -@@ -2734,6 +2759,12 @@ void exit_mmap(struct mm_struct *mm) +@@ -2764,6 +2788,12 @@ void exit_mmap(struct mm_struct *mm) /* mm's last user has gone, and its about to be pulled down */ mmu_notifier_release(mm); @@ -1310,7 +1306,7 @@ index de2c176..ce60715 100644 if (mm->locked_vm) { vma = mm->mmap; while (vma) { -@@ -2769,6 +2800,11 @@ void exit_mmap(struct mm_struct *mm) +@@ -2799,6 +2829,11 @@ void exit_mmap(struct mm_struct *mm) vma = remove_vma(vma); } vm_unacct_memory(nr_accounted); @@ -1322,7 +1318,7 @@ index de2c176..ce60715 100644 } /* Insert vm structure into process list sorted by address -@@ -2878,6 +2914,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, +@@ -2908,6 +2943,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, new_vma->vm_ops->open(new_vma); vma_link(mm, new_vma, prev, rb_link, rb_parent); *need_rmap_locks = false; @@ -1330,7 +1326,7 @@ index de2c176..ce60715 100644 } return new_vma; -@@ -3015,6 +3052,7 @@ static struct vm_area_struct *__install_special_mapping( +@@ -3055,6 +3091,7 @@ static struct vm_area_struct *__install_special_mapping( vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); perf_event_mmap(vma); @@ -1339,7 +1335,7 @@ index de2c176..ce60715 100644 return vma; diff --git a/mm/rmap.c b/mm/rmap.c -index 701b93f..64ba784 100644 +index 1ef3640..1c40463 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -1110,9 +1110,9 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) @@ -1356,10 +1352,10 @@ index 701b93f..64ba784 100644 static void __page_set_anon_rmap(struct page *page, diff --git a/mm/uksm.c b/mm/uksm.c new file mode 100644 -index 0000000..039192f +index 0000000..56852a5 --- /dev/null +++ b/mm/uksm.c -@@ -0,0 +1,5518 @@ +@@ -0,0 +1,5524 @@ +/* + * Ultra KSM. Copyright (C) 2011-2012 Nai Xia + * @@ -1558,7 +1554,8 @@ index 0000000..039192f +static struct sradix_tree_node *slot_tree_node_alloc(void) +{ + struct slot_tree_node *p; -+ p = kmem_cache_zalloc(slot_tree_node_cachep, GFP_KERNEL); ++ p = kmem_cache_zalloc(slot_tree_node_cachep, GFP_KERNEL | ++ __GFP_NORETRY | __GFP_NOWARN); + if (!p) + return NULL; + @@ -2044,7 +2041,8 @@ index 0000000..039192f +static inline struct node_vma *alloc_node_vma(void) +{ + struct node_vma *node_vma; -+ node_vma = kmem_cache_zalloc(node_vma_cache, GFP_KERNEL); ++ node_vma = kmem_cache_zalloc(node_vma_cache, GFP_KERNEL | ++ __GFP_NORETRY | __GFP_NOWARN); + if (node_vma) { + INIT_HLIST_HEAD(&node_vma->rmap_hlist); + INIT_HLIST_NODE(&node_vma->hlist); @@ -2069,7 +2067,8 @@ index 0000000..039192f + if (!vma_slot_cache) + return NULL; + -+ slot = kmem_cache_zalloc(vma_slot_cache, GFP_KERNEL); ++ slot = kmem_cache_zalloc(vma_slot_cache, GFP_KERNEL | ++ __GFP_NORETRY | __GFP_NOWARN); + if (slot) { + INIT_LIST_HEAD(&slot->slot_list); + INIT_LIST_HEAD(&slot->dedup_list); @@ -2089,7 +2088,8 @@ index 0000000..039192f +{ + struct rmap_item *rmap_item; + -+ rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); ++ rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL | ++ __GFP_NORETRY | __GFP_NOWARN); + if (rmap_item) { + /* bug on lowest bit is not clear for flag use */ + BUG_ON(is_addr(rmap_item)); @@ -2106,7 +2106,8 @@ index 0000000..039192f +static inline struct stable_node *alloc_stable_node(void) +{ + struct stable_node *node; -+ node = kmem_cache_alloc(stable_node_cache, GFP_KERNEL | GFP_ATOMIC); ++ node = kmem_cache_alloc(stable_node_cache, GFP_KERNEL | ++ __GFP_NORETRY | __GFP_NOWARN); + if (!node) + return NULL; + @@ -2124,7 +2125,8 @@ index 0000000..039192f +static inline struct tree_node *alloc_tree_node(struct list_head *list) +{ + struct tree_node *node; -+ node = kmem_cache_zalloc(tree_node_cache, GFP_KERNEL | GFP_ATOMIC); ++ node = kmem_cache_zalloc(tree_node_cache, GFP_KERNEL | ++ __GFP_NORETRY | __GFP_NOWARN); + if (!node) + return NULL; + @@ -2241,8 +2243,8 @@ index 0000000..039192f + void *expected_mapping; + + page = pfn_to_page(stable_node->kpfn); -+ expected_mapping = (void *)stable_node + -+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); ++ expected_mapping = (void *)((unsigned long)stable_node | ++ PAGE_MAPPING_KSM); + rcu_read_lock(); + if (page->mapping != expected_mapping) + goto stale; @@ -2919,6 +2921,7 @@ index 0000000..039192f + (page_to_pfn(kpage) == zero_pfn)) { + entry = pte_mkspecial(entry); + dec_mm_counter(mm, MM_ANONPAGES); ++ inc_zone_page_state(page, NR_UKSM_ZERO_PAGES); + } else { + get_page(kpage); + page_add_anon_rmap(kpage, vma, addr, false); @@ -3986,7 +3989,7 @@ index 0000000..039192f + if (IS_ERR_OR_NULL(page)) + break; + if (PageKsm(page)) { -+ ret = handle_mm_fault(vma->vm_mm, vma, addr, ++ ret = handle_mm_fault(vma, addr, + FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE); + } else + ret = VM_FAULT_WRITE; @@ -4634,7 +4637,6 @@ index 0000000..039192f + if (find_zero_page_hash(hash_strength, *hash)) { + if (!cmp_and_merge_zero_page(slot->vma, page)) { + slot->pages_merged++; -+ inc_zone_page_state(page, NR_UKSM_ZERO_PAGES); + + /* For full-zero pages, no need to create rmap item */ + goto putpage; @@ -6879,12 +6881,12 @@ index 0000000..039192f +#endif + diff --git a/mm/vmstat.c b/mm/vmstat.c -index cb2a67b..912b86f 100644 +index 89cec42..188ce43 100644 --- a/mm/vmstat.c +++ b/mm/vmstat.c -@@ -733,6 +733,9 @@ const char * const vmstat_text[] = { - "nr_anon_transparent_hugepages", - "nr_free_cma", +@@ -974,6 +974,9 @@ const char * const vmstat_text[] = { + "nr_dirtied", + "nr_written", +#ifdef CONFIG_UKSM + "nr_uksm_zero_pages",