1
0
Fork 0

4.10으로 업

This commit is contained in:
Sangbum Kim 2017-04-05 13:31:46 +09:00
parent 876c08a90f
commit aea60ce944
27 changed files with 23539 additions and 1901 deletions

642
PKGBUILD
View File

@ -7,9 +7,9 @@ pkgname=$pkgbase
# comment the following line to build a single package containing the kernel and the headers # comment the following line to build a single package containing the kernel and the headers
(( 1 )) && pkgname=("$pkgbase" "$pkgbase-headers" "$pkgbase-docs") (( 1 )) && pkgname=("$pkgbase" "$pkgbase-headers" "$pkgbase-docs")
pkgdesc="The Linux Kernel and modules from Linus' git tree" pkgdesc="The Linux Kernel and modules from Linus' git tree"
depends=('coreutils' 'linux-firmware-git' 'mkinitcpio') depends=('coreutils' 'linux-firmware' 'module-init-tools' 'mkinitcpio')
pkgver=4.8.rc8 pkgver=4.11.rc5
pkgrel=1 pkgrel=1
url="http://www.kernel.org/" url="http://www.kernel.org/"
arch=(i686 x86_64) arch=(i686 x86_64)
@ -24,7 +24,7 @@ sha256sums=('SKIP')
# set _gitrev to a git revision (man gitrevisions) like a tag, a commit sha1 # set _gitrev to a git revision (man gitrevisions) like a tag, a commit sha1
# hash or a branch name to build from this tree instead of master # hash or a branch name to build from this tree instead of master
_gitrev="v4.7.5" _gitrev="v4.10.8"
#################################################################### ####################################################################
# KERNEL CONFIG FILES # KERNEL CONFIG FILES
@ -32,9 +32,9 @@ _gitrev="v4.7.5"
# This PKGBUILD searches for config files in the current directory # This PKGBUILD searches for config files in the current directory
# and will use the first one it finds from the following # and will use the first one it finds from the following
# list as base configuration: # list as base configuration:
# config.local # config.local
# config.saved.$CARCH # config.saved.$CARCH
# config.$CARCH # config.$CARCH
# #
#################################################################### ####################################################################
@ -61,7 +61,15 @@ _gitrev="v4.7.5"
# #
# Uncomment desired options # Uncomment desired options
############################# #############################
#_make_modules=0 #_make_modules=1
#######
# Skip the merge of Linus's kernel tree
#
# _skip_merge=1
MAKEFLAGS="-j $(expr $(cat /proc/cpuinfo |grep processor |wc -l) \* 2)" MAKEFLAGS="-j $(expr $(cat /proc/cpuinfo |grep processor |wc -l) \* 2)"
####### #######
@ -87,12 +95,29 @@ _config_cmd="${_config_cmd:-menuconfig}"
# _configure_only=1 # _configure_only=1
#######
# The directory where the kernel should be built
#
# Can be useful, for example, if you want to compile on a
# tmpfs mount, which can speed up the compilation process
#
#_build_dir="${_build_dir:-$srcdir}"
####### #######
# Append the date to the localversion # Append the date to the localversion
# #
# e.g. -ARCH -> -ARCH-20090422 # e.g. -ARCH -> -ARCH-20090422
# #
# _date_localversion=1 #_date_localversion=0
#######
# Set the pkgver to the kernel version
# rather than the build date
#
# _kernel_pkgver=1
####### #######
@ -105,7 +130,7 @@ _save_config=1
####### #######
# Do not compress kernel modules # Do not compress kernel modules
# #
# _no_modules_compression=1 _no_modules_compression=0
####### #######
@ -117,245 +142,250 @@ _save_config=1
# internal variables # internal variables
(( 1 )) && _kernel_src="$pkgname" (( 1 )) && _kernel_src="$pkgname"
#(( 1 )) && _kernel_src="$BUILDDIR/$(find . -maxdepth 1 -type d -name "linux-*" -printf "%f\n" | head -1)" #(( 1 )) && _kernel_src="$BUILDDIR/$(find . -maxdepth 1 -type d -name "linux-*" -printf "%f\n" | head -1)"
#(( 1 )) && _kernel_src="$_build_dir/$pkgname_$"
####### #######
# define required functions # define required functions
pkgver() { pkgver() {
cd "$_kernel_src" cd "$_kernel_src"
git describe --always | sed 's/^v//;s/-/./g' git describe --always | sed 's/^v//;s/-/./g'
} }
# single package # single package
package() { package() {
eval package_$pkgbase-headers eval package_$pkgbase-headers
eval package_$pkgbase eval package_$pkgbase
} }
# split package functions # split package functions
eval "package_$pkgbase() { _generic_package_linux; }" eval "package_$pkgbase() { _generic_package_kernel; }"
eval "package_$pkgbase-headers() { _generic_package_linux-headers; }" eval "package_$pkgbase-headers() { _generic_package_kernel-headers; }"
eval "package_$pkgbase-docs() { _generic_package_linux-docs; }" eval "package_$pkgbase-docs() { _generic_package_kernel-docs; }"
############################## ##############################
# where the magic happens... # where the magic happens...
############################## ##############################
build() { build() {
cd "$_kernel_src" cd "$_kernel_src"
msg "Sanitizing source tree.." msg "Sanitizing source tree.."
[[ -n $_gitrev ]] && git reset --hard "$_gitrev" [[ -n $_gitrev ]] && git reset --hard "$_gitrev"
# cleaning source trees # cleaning source trees
git clean -f git clean -f
################# #################
# Apply patches # Apply patches
################# #################
msg "Applying patches..." msg "Applying patches..."
local i patches local i patches
for i in "${source[@]}"; do for i in "${source[@]}"; do
i=${i##*/} i=${i##*/}
[[ $i =~ .*\.patch$ ]] && patches=("${patches[@]}" "$srcdir/$i") [[ $i =~ .*\.patch$ ]] && patches=("${patches[@]}" "$srcdir/$i")
[[ ${i%.*} =~ .*\.patch$ ]] && patches=("${patches[@]}" "$srcdir/${i%.*}") [[ ${i%.*} =~ .*\.patch$ ]] && patches=("${patches[@]}" "$srcdir/${i%.*}")
done done
shopt -s nullglob shopt -s nullglob
for i in "${patches[@]}" "$startdir/patches/"*; do for i in "${patches[@]}" "$startdir/patches/"*; do
msg2 "Applying ${i##*/}..." msg2 "Applying ${i##*/}..."
patch -Np1 -i "$i" || (error "Applying ${i##*/} failed" && return 1) patch -Np1 -i "$i" || (error "Applying ${i##*/} failed" && return 1)
done done
shopt -u nullglob shopt -u nullglob
################# #################
# CONFIGURATION # CONFIGURATION
################# #################
######################### #########################
# Loading configuration # Loading configuration
######################### #########################
msg "Loading configuration..." msg "Loading configuration..."
for i in local "saved.$CARCH" "$CARCH"; do for i in local "saved.$CARCH" "$CARCH"; do
if [[ -e $startdir/config.$i ]]; then if [[ -e $startdir/config.$i ]]; then
msg2 "Using kernel config file config.$i..." msg2 "Using kernel config file config.$i..."
cp -f "$startdir/config.$i" .config cp -f "$startdir/config.$i" .config
break break
fi fi
done done
[[ ! -e .config ]] && [[ ! -e .config ]] &&
warning "No suitable kernel config file was found. You'll have to configure the kernel from scratch." warning "No suitable kernel config file was found. You'll have to configure the kernel from scratch."
########################### ###########################
# Start the configuration # Start the configuration
########################### ###########################
msg "Updating configuration..." msg "Updating configuration..."
yes "" | make config > /dev/null yes "" | make config > /dev/null
if [[ -f "$startdir/config.saved.$CARCH" ]]; then if [[ -f "$startdir/config.saved.$CARCH" ]]; then
msg2 "migrating previous config..." msg2 "migrating previous config..."
cp "$startdir/config.saved.$CARCH" .config cp "$startdir/config.saved.$CARCH" .config
make oldconfig make oldconfig
else else
msg2 "migrating default config..." msg2 "migrating default config..."
cp "$startdir/config.$CARCH" .config cp "$startdir/config.$CARCH" .config
make oldconfig make oldconfig
fi fi
if [[ -n ${_config_cmd// /} ]]; then if [[ -n ${_config_cmd// /} ]]; then
msg2 "Running make $_config_cmd..." msg2 "Running make $_config_cmd..."
make $_config_cmd make $_config_cmd
else else
warning "Unknown config command: $_config_cmd" warning "Unknown config command: $_config_cmd"
fi fi
############################################## ##############################################
# Save the config file the package directory # Save the config file the package directory
############################################## ##############################################
if [[ -n $_save_config || -n $_configure_only ]]; then if [[ -n $_save_config || -n $_configure_only ]]; then
msg "Saving configuration..." msg "Saving configuration..."
msg2 "Saving $_kernel_src/.config as $startdir/config.saved.$CARCH" msg2 "Saving $_kernel_src/.config as $startdir/config.saved.$CARCH"
cp .config "$startdir/config.saved.$CARCH" cp .config "$startdir/config.saved.$CARCH"
fi fi
####################################### #######################################
# Stop after configuration if desired # Stop after configuration if desired
####################################### #######################################
if [[ -n $_configure_only ]]; then if [[ -n $_configure_only ]]; then
rm -rf "$srcdir" "$pkgdir" rm -rf "$_kernel_src" "$srcdir" "$pkgdir"
return 1 return 1
fi fi
############################### ###############################
# Append date to localversion # Append date to localversion
############################### ###############################
if [[ -n $_date_localversion ]]; then if [[ -n $_date_localversion ]]; then
local _localversion="$(sed -rn 's/^CONFIG_LOCALVERSION="([^"]*)"$/\1/p' .config)" local _localversion="$(sed -rn 's/^CONFIG_LOCALVERSION="([^"]*)"$/\1/p' .config)"
[[ -n $_localversion ]] && msg2 "CONFIG_LOCALVERSION is set to: $_localversion" [[ -n $_localversion ]] && msg2 "CONFIG_LOCALVERSION is set to: $_localversion"
# since this is a git package, the $pkgver is equal to $(date +%Y%m%d) # since this is a git package, the $pkgver is equal to $(date +%Y%m%d)
msg2 "Appending $pkgver to CONFIG_LOCALVERSION..." msg2 "Appending $pkgver to CONFIG_LOCALVERSION..."
sed -ri "s/^(CONFIG_LOCALVERSION=).*$/\1\"$_localversion-$pkgver\"/" .config sed -ri "s/^(CONFIG_LOCALVERSION=).*$/\1\"$_localversion-$pkgver\"/" .config
fi fi
####################################
# Append pkgrel to kernel version
####################################
sed -ri "s/^(EXTRAVERSION =).*$/\1 -$pkgrel/" Makefile
#################
# BUILD PROCESS
#################
#################################### ################################
# don't run depmod on 'make install' # Build the kernel and modules
#################################### ################################
sed -i '2iexit 0' scripts/depmod.sh msg "Building kernel and modules..."
git update-index --assume-unchanged scripts/depmod.sh if [[ -n $_make_modules ]]; then
make $MAKEFLAGS V="$_verbose" bzImage modules
else
make $MAKEFLAGS V="$_verbose" bzImage
fi
############
# CLEANUP
############
################# ###################################
# BUILD PROCESS # Copy files from build directory
################# ####################################
# if (( ! CLEANUP )) && [[ $_build_dir != $srcdir ]]; then
################################ # msg "Saving $_kernel_src to $srcdir/${_kernel_src##*/}..."
# Build the kernel and modules # mv "$_kernel_src" "$srcdir"
################################ # rm -rf "$_kernel_src"
msg "Building kernel and modules..." # fi
if [[ -n $_make_modules ]]; then
make $MAKEFLAGS V="$_verbose" bzImage modules
else
make $MAKEFLAGS V="$_verbose" bzImage
fi
} }
_generic_package_initialization() { _generic_package_initialization() {
cd "$srcdir/${_kernel_src##*/}" cd "$_kernel_src"
_karch="x86" _karch="x86"
######################
# Get kernel version
######################
_kernver=$(make kernelrelease)
_basekernel=${_kernver%%-*}
############################################################
# Use kernel version instead of the current date as pkgver
############################################################
if [[ -n $_kernel_pkgver ]]; then
pkgver=${_kernver//-/_}
msg "Setting pkgver to kernel version: $pkgver"
fi
######################
# Get kernel version
######################
_kernver=$(make kernelrelease)
_basekernver=${_kernver%%-*}
} }
_generic_package_linux() { _generic_package_kernel() {
pkgdesc="The Linux Kernel and modules from Linus' git tree" pkgdesc="The Linux Kernel and modules from Linus' git tree"
backup=(etc/mkinitcpio.d/$pkgname.preset) depends=('coreutils' 'linux-firmware' 'module-init-tools' 'mkinitcpio')
install=$pkgname.install backup=(etc/mkinitcpio.d/$pkgname.preset)
changelog=$pkgname.changelog install=$pkgname.install
changelog=$pkgname.changelog
# set required variables # set required variables
_generic_package_initialization _generic_package_initialization
############################################################# #############################################################
# Provide linux # Provide linux
# (probably someone wants to use this kernel exclusively?) # (probably someone wants to use this kernel exclusively?)
############################################################# #############################################################
provides=("${provides[@]}" "linux=${_kernver//-/_}") provides=("${provides[@]}" "linux=${_kernver//-/_}")
################ ################
# INSTALLATION # INSTALLATION
################ ################
##################### #####################
# Install the image # Install the image
##################### #####################
msg "Installing kernel image..." msg "Installing kernel image..."
install -Dm644 arch/$_karch/boot/bzImage "$pkgdir/boot/vmlinuz-$pkgname" install -Dm644 arch/$_karch/boot/bzImage "$pkgdir/boot/vmlinuz-$pkgname"
########################## ##########################
# Install kernel modules # Install kernel modules
########################## ##########################
msg "Installing kernel modules..." if [[ -n $_make_modules ]]; then
if [[ -n $_make_modules ]]; then msg "Installing kernel modules..."
# force -j1 to work around make 3.82 bug make INSTALL_MOD_PATH="$pkgdir" modules_install
make -j1 INSTALL_MOD_PATH="$pkgdir/usr" modules_install [[ -z $_no_modules_compression ]] && find "$pkgdir" -name "*.ko"
[[ -z $_no_modules_compression ]] && find "$pkgdir" -name "*.ko" -exec gzip -9 {} + -exec gzip -9 {} +
#########################################################
# Set up extramodules directory (for external modules)
#########################################################
local extramodules="$pkgdir/usr/lib/modules/extramodules-$(cut -d. -f1,2 <<<$_basekernver)"
local modversion=$(grep '^CONFIG_LOCALVERSION=' .config | cut -d'"' -f2)
[[ -n $modversion ]] && extramodules+=$modversion
install -dm755 "${extramodules}${_pkgext}"
echo $_kernver > "${extramodules}${_pkgext}/version"
ln -s "../${extramodules##*/}${_pkgext}" "$pkgdir/usr/lib/modules/$_kernver/extramodules"
##################################
################################## # Create important symlinks
# Create important symlinks ##################################
################################## msg "Creating important symlinks..."
msg "Creating important symlinks..."
# Create generic modules symlink # Create generic modules symlink
if [[ $_kernver != ${_basekernver}${_pkgext} ]]; then
cd "$pkgdir/usr/lib/modules" if [[ $_kernver != ${_basekernel}${_pkgext} ]]; then
ln -s "$_kernver" "${_basekernver}${_pkgext}" cd "$pkgdir/lib/modules"
cd "$OLDPWD" ln -s "$_kernver" "${_basekernel}${_pkgext}"
cd "$OLDPWD"
# remove header symlinks
cd "$pkgdir/lib/modules/$_kernver"
rm -rf source build
cd "$OLDPWD"
fi
fi fi
# remove header symlinks ############################
cd "$pkgdir/usr/lib/modules/$_kernver" # Install mkinitcpio files
rm -rf source build ############################
cd "$OLDPWD" install -d "$pkgdir/etc/mkinitcpio.d"
fi
msg "Generating $pkgname.preset..."
cat > "$pkgdir/etc/mkinitcpio.d/$pkgname.preset" <<EOF
# mkinitcpio preset file for $pkgname
############################
# Install mkinitcpio files
############################
install -d "$pkgdir/etc/mkinitcpio.d"
msg "Generating $pkgname.preset..."
cat > "$pkgdir/etc/mkinitcpio.d/$pkgname.preset" <<EOF
# mkinitcpio preset file for the '$pkgname' package
ALL_config="/etc/mkinitcpio.conf" ALL_config="/etc/mkinitcpio.conf"
ALL_kver="/boot/vmlinuz-$pkgname" ALL_kver="/boot/vmlinuz-$pkgname"
@ -368,149 +398,159 @@ COMPRESSION="lz4" # since kernel 2.6.34
EOF EOF
msg "Generating $pkgname.kver..."
####################### echo -e "# DO NOT EDIT THIS FILE\nALL_kver='$_kernver'" \
# Update install file > "$pkgdir/etc/mkinitcpio.d/$pkgname.kver"
#######################
msg "Updating install file..."
sed -ri "s/^(pkgname=).*$/\1$pkgname/" "$startdir/$pkgname.install"
sed -ri "s/^(kernver=).*$/\1$_kernver/" "$startdir/$pkgname.install"
#######################
# Remove the firmware #######################
####################### # Update install file
rm -rf "$pkgdir/usr/lib/firmware" #######################
msg "Updating install file..."
sed -ri "s/^(pkgname=).*$/\1$pkgname/" "$startdir/$pkgname.install"
sed -ri "s/^(kernver=).*$/\1$_kernver/" "$startdir/$pkgname.install"
#######################
# Run depmod #######################
####################### # Remove the firmware
if [[ -n $_make_modules ]]; then #######################
depmod -a "$pkgdir/usr"
depmod -b "$pkgdir/usr" -F System.map "$_kernver" # remove the firmware
rm -rf "${pkgdir}/lib/firmware"
if [[ -n $_make_modules ]]; then
# Now we call depmod...
depmod -b "${pkgdir}" -F System.map "${_kernver}"
# move module tree /lib -> /usr/lib
mkdir -p "${pkgdir}/usr"
mv "${pkgdir}/lib" "${pkgdir}/usr/"
fi fi
} }
_generic_package_kernel-headers() {
pkgdesc="Header files and scripts for building modules for $pkgbase"
depends=("$pkgbase")
_generic_package_linux-headers() { # set required variables
pkgdesc="Header files and scripts for building modules for $pkgbase" _generic_package_initialization
depends=("$pkgbase")
# set required variables #############################################################
_generic_package_initialization # Provide linux-headers
# (probably someone wants to use this kernel exclusively?)
############################################################# #############################################################
# Provide linux-headers provides=("${provides[@]}" "linux-headers=${_kernver//-/_}")
# (probably someone wants to use this kernel exclusively?)
#############################################################
provides=("${provides[@]}" "linux-headers=${_kernver//-/_}")
############################## ##############################
# Install fake kernel source # Install fake kernel source
############################## ##############################
install -Dm644 Module.symvers "$pkgdir/usr/src/linux-$_kernver/Module.symvers" install -Dm644 Module.symvers "$pkgdir/usr/src/linux-$_kernver/Module.symvers"
install -Dm644 Makefile "$pkgdir/usr/src/linux-$_kernver/Makefile" install -Dm644 Makefile "$pkgdir/usr/src/linux-$_kernver/Makefile"
install -Dm644 kernel/Makefile "$pkgdir/usr/src/linux-$_kernver/kernel/Makefile" install -Dm644 kernel/Makefile "$pkgdir/usr/src/linux-$_kernver/kernel/Makefile"
install -Dm644 .config "$pkgdir/usr/src/linux-$_kernver/.config" install -Dm644 .config "$pkgdir/usr/lib/modules/$_kernver/.config"
install -Dm644 .config "$pkgdir/usr/lib/modules/$_kernver/.config"
#######################################################
# Install scripts directory and fix permissions on it #######################################################
####################################################### # Install scripts directory and fix permissions on it
cp -a scripts "$pkgdir/usr/src/linux-$_kernver" #######################################################
cp -a scripts "$pkgdir/usr/src/linux-$_kernver"
########################## ##########################
# Install header files # Install header files
########################## ##########################
msg "Installing header files..." msg "Installing header files..."
for i in net/ipv4/netfilter/ipt_CLUSTERIP.c \ for i in net/ipv4/netfilter/ipt_CLUSTERIP.c \
$(find include/ net/mac80211/ drivers/md -iname "*.h") \ $(find include/ net/mac80211/ drivers/{md,media/video/} -iname "*.h") \
$(find include/config/ -type f) \ $(find include/config/ -type f) \
$(find . -name "Kconfig*") $(find . -name "Kconfig*")
do do
mkdir -p "$pkgdir/usr/src/linux-$_kernver/${i%/*}" mkdir -p "$pkgdir/usr/src/linux-$_kernver/${i%/*}"
cp -af "$i" "$pkgdir/usr/src/linux-$_kernver/$i" cp -af "$i" "$pkgdir/usr/src/linux-$_kernver/$i"
done done
# required by virtualbox and probably others # required by virtualbox and probably others
ln -s "../generated/autoconf.h" "$pkgdir/usr/src/linux-$_kernver/include/linux/" ln -s "../generated/autoconf.h" "$pkgdir/usr/src/linux-$_kernver/include/linux/"
######################################## ########################################
# Install architecture dependent files # Install architecture dependent files
######################################## ########################################
msg "Installing architecture files..." msg "Installing architecture files..."
mkdir -p "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/kernel" mkdir -p "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/kernel"
cp -a arch/$_karch/kernel/asm-offsets.s "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/kernel/" cp -a arch/$_karch/kernel/asm-offsets.s "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/kernel/"
cp -a arch/$_karch/Makefile* "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/" cp -a arch/$_karch/Makefile* "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/"
cp -a arch/$_karch/configs "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/" cp -a arch/$_karch/configs "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/"
# copy arch includes for external modules and fix the nVidia issue # copy arch includes for external modules and fix the nVidia issue
mkdir -p "$pkgdir/usr/src/linux-$_kernver/arch/$_karch" mkdir -p "$pkgdir/usr/src/linux-$_kernver/arch/$_karch"
cp -a "arch/$_karch/include" "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/" cp -a "arch/$_karch/include" "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/"
# create a necessary symlink to the arch folder # create a necessary symlink to the arch folder
cd "$pkgdir/usr/src/linux-$_kernver/arch" cd "$pkgdir/usr/src/linux-$_kernver/arch"
if [[ $CARCH = "x86_64" ]]; then if [[ $CARCH = "x86_64" ]]; then
ln -s $_karch x86_64 ln -s $_karch x86_64
else else
ln -s $_karch i386 ln -s $_karch i386
fi
cd "$OLDPWD"
################################
# Remove unneeded architecures
################################
msg "Removing unneeded architectures..."
for i in "$pkgdir/usr/src/linux-$_kernver/arch/"*; do
[[ ${i##*/} =~ ($_karch|Kconfig) ]] || rm -rf "$i"
done
############################
# Remove .gitignore files
############################
msg "Removing .gitignore files from kernel source..."
find "$pkgdir/usr/src/linux-$_kernver/" -name ".gitignore" -delete
##################################
# Create important symlinks
##################################
msg "Creating important symlinks..."
# the build symlink needs to be relative
if [[ -n $_make_modules ]]; then
cd "$pkgdir/usr/lib/modules/$_kernver"
rm -rf source build
ln -s "/usr/src/linux-$_kernver" build
cd "$OLDPWD"
fi fi
cd "$OLDPWD" if [[ $_kernver != ${_basekernver}${_pkgext} ]]; then
cd "$pkgdir/usr/src"
ln -s "linux-$_kernver" "linux-${_basekernel}${_pkgext}"
################################ cd "$OLDPWD"
# Remove unneeded architecures fi
################################
msg "Removing unneeded architectures..."
for i in "$pkgdir/usr/src/linux-$_kernver/arch/"*; do
[[ ${i##*/} =~ ($_karch|Kconfig) ]] || rm -rf "$i"
done
############################
# Remove .gitignore files
############################
msg "Removing .gitignore files from kernel source..."
find "$pkgdir/usr/src/linux-$_kernver/" -name ".gitignore" -delete
##################################
# Create important symlinks
##################################
msg "Creating important symlinks..."
# the build symlink needs to be relative
cd "$pkgdir/usr/lib/modules/$_kernver"
rm -rf source build
ln -s "/usr/src/linux-$_kernver" build
cd "$OLDPWD"
if [[ $_kernver != ${_basekernver}${_pkgext} ]]; then
cd "$pkgdir/usr/src"
ln -s "linux-$_kernver" "linux-${_basekernver}${_pkgext}"
cd "$OLDPWD"
fi
} }
_generic_package_linux-docs() { _generic_package_kernel-docs() {
pkgdesc="Kernel hackers manual - HTML documentation that comes with the Linux kernel." pkgdesc="Kernel hackers manual - HTML documentation that comes with the Linux kernel."
depends=("$pkgbase") depends=("$pkgbase")
# set required variables # set required variables
_generic_package_initialization _generic_package_initialization
mkdir -p "$pkgdir/usr/src/linux-$_kernver" mkdir -p "$pkgdir/usr/src/linux-$_kernver"
cp -aL Documentation "$pkgdir/usr/src/linux-$_kernver/" cp -a Documentation "$pkgdir/usr/src/linux-$_kernver/"
} }
# vim: set fenc=utf-8 ts=2 sw=2 noet: # vim: set fenc=utf-8 ts=2 sw=2 noet:

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,5 @@
pkgname=linux-spica pkgname=linux-spica
kernver=4.7.5-1spica-dirty kernver=4.10.8spica-dirty
#bootdevice="BOOT_IMAGE=/boot/vmlinuz-$pkgname root=UUID=d670564f-2cb3-4981-9d51-6ed9c1327d47" #bootdevice="BOOT_IMAGE=/boot/vmlinuz-$pkgname root=UUID=d670564f-2cb3-4981-9d51-6ed9c1327d47"
#option="rw quiet clocksource=hpet initrd=EFI/spi-ca/initrd intel_iommu=on pci-stub.ids=1002:683f,1002:aab0 vfio_iommu_type1.allow_unsafe_interrupts=1,kvm.ignore_msrs=1" #option="rw quiet clocksource=hpet initrd=EFI/spi-ca/initrd intel_iommu=on pci-stub.ids=1002:683f,1002:aab0 vfio_iommu_type1.allow_unsafe_interrupts=1,kvm.ignore_msrs=1"
#option="rw quiet clocksource=hpet initrd=EFI/spi-ca/initrd quiet intremap=no_x2apic_optout zswap.enabled=1 zswap.max_pool_percent=25 zswap.compressor=lz4" #option="rw quiet clocksource=hpet initrd=EFI/spi-ca/initrd quiet intremap=no_x2apic_optout zswap.enabled=1 zswap.max_pool_percent=25 zswap.compressor=lz4"
@ -12,19 +12,19 @@ post_install () {
echo "> Generating initramfs, using mkinitcpio. Please wait..." echo "> Generating initramfs, using mkinitcpio. Please wait..."
echo ">" echo ">"
mkinitcpio -p $pkgname mkinitcpio -p $pkgname
echo "> Modifing efibootmgr..." # echo "> Modifing efibootmgr..."
efibootmgr|awk 'match($0,/^Boot([0-9a-fA-F]{4})\* spi-ca_v(.+)$/,m){printf "efibootmgr -b %s -B;echo \">> remove entry : %s\";",m[1],m[2]}'|sh # efibootmgr|awk 'match($0,/^Boot([0-9a-fA-F]{4})\* spi-ca_v(.+)$/,m){printf "efibootmgr -b %s -B;echo \">> remove entry : %s\";",m[1],m[2]}'|sh
echo "> Copy efistub from boot" # echo "> Copy efistub from boot"
cp -fv "boot/vmlinuz-$pkgname" "boot/efi/EFI/spi-ca/kernel.efi" # cp -fv "boot/vmlinuz-$pkgname" "boot/efi/EFI/spi-ca/kernel.efi"
cp -fv "boot/initramfs-$pkgname.img" "boot/efi/EFI/spi-ca/initrd" # cp -fv "boot/initramfs-$pkgname.img" "boot/efi/EFI/spi-ca/initrd"
echo "> Registering efistub " # echo "> Registering efistub "
#echo 'efibootmgr -c -g -d /dev/sda -p 1 -L "spi-ca_v$kernver" -l "\EFI\spi-ca\kernel.efi" #-u "$bootdevice $option"' #echo 'efibootmgr -c -g -d /dev/sda -p 1 -L "spi-ca_v$kernver" -l "\EFI\spi-ca\kernel.efi" #-u "$bootdevice $option"'
efibootmgr -c -g -d /dev/sde -p 1 -L "spi-ca_v$kernver" -l "\EFI\spi-ca\kernel" # -u "$bootdevice $option" # efibootmgr -c -g -d /dev/sde -p 1 -L "spi-ca_v$kernver" -l "\EFI\spi-ca\kernel" # -u "$bootdevice $option"
echo "> Reordering Bootorder..." # echo "> Reordering Bootorder..."
newentry=`efibootmgr|awk 'match($0,/^Boot([0-9a-fA-F]{4})\* spi-ca_v(.+)$/,m){print m[1]}'` # newentry=`efibootmgr|awk 'match($0,/^Boot([0-9a-fA-F]{4})\* spi-ca_v(.+)$/,m){print m[1]}'`
prebootorder=`efibootmgr |grep BootOrder |cut -d : -f 2 |tr -d ' '` # prebootorder=`efibootmgr |grep BootOrder |cut -d : -f 2 |tr -d ' '`
efibootmgr -O # efibootmgr -O
efibootmgr -o ${newentry},${prebootorder} # efibootmgr -o ${newentry},${prebootorder}
echo "> OK!" echo "> OK!"
} }

View File

@ -1,7 +1,7 @@
From 22ee35ec82fa543b65c1b6d516a086a21f723846 Mon Sep 17 00:00:00 2001 From 8500f47272575b4616beb487c483019248d8c501 Mon Sep 17 00:00:00 2001
From: Paolo Valente <paolo.valente@unimore.it> From: Paolo Valente <paolo.valente@unimore.it>
Date: Tue, 7 Apr 2015 13:39:12 +0200 Date: Tue, 7 Apr 2015 13:39:12 +0200
Subject: [PATCH 1/4] block: cgroups, kconfig, build bits for BFQ-v7r11-4.7.0 Subject: [PATCH 1/4] block: cgroups, kconfig, build bits for BFQ-v7r11-4.10.0
Update Kconfig.iosched and do the related Makefile changes to include Update Kconfig.iosched and do the related Makefile changes to include
kernel configuration options for BFQ. Also increase the number of kernel configuration options for BFQ. Also increase the number of
@ -74,7 +74,7 @@ index 421bef9..0ee5f0f 100644
endmenu endmenu
diff --git a/block/Makefile b/block/Makefile diff --git a/block/Makefile b/block/Makefile
index 9eda232..4a36683 100644 index a827f98..3b14703 100644
--- a/block/Makefile --- a/block/Makefile
+++ b/block/Makefile +++ b/block/Makefile
@@ -18,6 +18,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o @@ -18,6 +18,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
@ -86,18 +86,18 @@ index 9eda232..4a36683 100644
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3d9cf32..8d862a0 100644 index 1ca8e8f..8e2d6ed 100644
--- a/include/linux/blkdev.h --- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h +++ b/include/linux/blkdev.h
@@ -45,7 +45,7 @@ struct pr_ops; @@ -47,7 +47,7 @@ struct rq_wb;
* Maximum number of blkcg policies allowed to be registered concurrently. * Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency. * Defined here to simplify include dependency.
*/ */
-#define BLKCG_MAX_POLS 2 -#define BLKCG_MAX_POLS 2
+#define BLKCG_MAX_POLS 3 +#define BLKCG_MAX_POLS 3
struct request;
typedef void (rq_end_io_fn)(struct request *, int); typedef void (rq_end_io_fn)(struct request *, int);
-- --
1.9.1 2.10.0

View File

@ -1,7 +1,7 @@
From 2aae32be2a18a7d0da104ae42c08cb9bce9d9c7c Mon Sep 17 00:00:00 2001 From 2f56e91506b329ffc29d0f184924ad0123c9ba9e Mon Sep 17 00:00:00 2001
From: Paolo Valente <paolo.valente@unimore.it> From: Paolo Valente <paolo.valente@unimore.it>
Date: Thu, 9 May 2013 19:10:02 +0200 Date: Thu, 9 May 2013 19:10:02 +0200
Subject: [PATCH 2/4] block: introduce the BFQ-v7r11 I/O sched for 4.7.0 Subject: [PATCH 2/4] block: introduce the BFQ-v7r11 I/O sched for 4.10.0
The general structure is borrowed from CFQ, as much of the code for The general structure is borrowed from CFQ, as much of the code for
handling I/O contexts. Over time, several useful features have been handling I/O contexts. Over time, several useful features have been
@ -56,12 +56,12 @@ Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
Signed-off-by: Arianna Avanzini <avanzini@google.com> Signed-off-by: Arianna Avanzini <avanzini@google.com>
--- ---
block/Kconfig.iosched | 6 +- block/Kconfig.iosched | 6 +-
block/bfq-cgroup.c | 1182 ++++++++++++++++ block/bfq-cgroup.c | 1186 ++++++++++++++++
block/bfq-ioc.c | 36 + block/bfq-ioc.c | 36 +
block/bfq-iosched.c | 3754 +++++++++++++++++++++++++++++++++++++++++++++++++ block/bfq-iosched.c | 3763 +++++++++++++++++++++++++++++++++++++++++++++++++
block/bfq-sched.c | 1200 ++++++++++++++++ block/bfq-sched.c | 1199 ++++++++++++++++
block/bfq.h | 801 +++++++++++ block/bfq.h | 801 +++++++++++
6 files changed, 6975 insertions(+), 4 deletions(-) 6 files changed, 6987 insertions(+), 4 deletions(-)
create mode 100644 block/bfq-cgroup.c create mode 100644 block/bfq-cgroup.c
create mode 100644 block/bfq-ioc.c create mode 100644 block/bfq-ioc.c
create mode 100644 block/bfq-iosched.c create mode 100644 block/bfq-iosched.c
@ -91,10 +91,10 @@ index 0ee5f0f..f78cd1a 100644
prompt "Default I/O scheduler" prompt "Default I/O scheduler"
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
new file mode 100644 new file mode 100644
index 0000000..8610cd6 index 0000000..8b08a57
--- /dev/null --- /dev/null
+++ b/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c
@@ -0,0 +1,1182 @@ @@ -0,0 +1,1186 @@
+/* +/*
+ * BFQ: CGROUPS support. + * BFQ: CGROUPS support.
+ * + *
@ -259,7 +259,9 @@ index 0000000..8610cd6
+static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg) +static struct bfq_group *blkg_to_bfqg(struct blkcg_gq *blkg)
+{ +{
+ struct blkg_policy_data *pd = blkg_to_pd(blkg, &blkcg_policy_bfq); + struct blkg_policy_data *pd = blkg_to_pd(blkg, &blkcg_policy_bfq);
+
+ BUG_ON(!pd); + BUG_ON(!pd);
+
+ return pd_to_bfqg(pd); + return pd_to_bfqg(pd);
+} +}
+ +
@ -379,7 +381,8 @@ index 0000000..8610cd6
+ blkg_stat_add_aux(&from->time, &from->time); + blkg_stat_add_aux(&from->time, &from->time);
+ blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time); + blkg_stat_add_aux(&to->unaccounted_time, &from->unaccounted_time);
+ blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum); + blkg_stat_add_aux(&to->avg_queue_size_sum, &from->avg_queue_size_sum);
+ blkg_stat_add_aux(&to->avg_queue_size_samples, &from->avg_queue_size_samples); + blkg_stat_add_aux(&to->avg_queue_size_samples,
+ &from->avg_queue_size_samples);
+ blkg_stat_add_aux(&to->dequeue, &from->dequeue); + blkg_stat_add_aux(&to->dequeue, &from->dequeue);
+ blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time); + blkg_stat_add_aux(&to->group_wait_time, &from->group_wait_time);
+ blkg_stat_add_aux(&to->idle_time, &from->idle_time); + blkg_stat_add_aux(&to->idle_time, &from->idle_time);
@ -471,9 +474,9 @@ index 0000000..8610cd6
+} +}
+ +
+static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd) +static struct bfq_group_data *cpd_to_bfqgd(struct blkcg_policy_data *cpd)
+ { +{
+ return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL; + return cpd ? container_of(cpd, struct bfq_group_data, pd) : NULL;
+ } +}
+ +
+static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg) +static struct bfq_group_data *blkcg_to_bfqgd(struct blkcg *blkcg)
+{ +{
@ -562,8 +565,8 @@ index 0000000..8610cd6
+} +}
+ +
+/* to be used by recursive prfill, sums live and dead rwstats recursively */ +/* to be used by recursive prfill, sums live and dead rwstats recursively */
+static struct blkg_rwstat bfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd, +static struct blkg_rwstat
+ int off) +bfqg_rwstat_pd_recursive_sum(struct blkg_policy_data *pd, int off)
+{ +{
+ struct blkg_rwstat a, b; + struct blkg_rwstat a, b;
+ +
@ -776,7 +779,6 @@ index 0000000..8610cd6
+ +
+ BUG_ON(!bfqq); + BUG_ON(!bfqq);
+ bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group); + bfq_bfqq_move(bfqd, bfqq, entity, bfqd->root_group);
+ return;
+} +}
+ +
+/** +/**
@ -804,8 +806,6 @@ index 0000000..8610cd6
+ if (bfqg->sched_data.in_service_entity) + if (bfqg->sched_data.in_service_entity)
+ bfq_reparent_leaf_entity(bfqd, + bfq_reparent_leaf_entity(bfqd,
+ bfqg->sched_data.in_service_entity); + bfqg->sched_data.in_service_entity);
+
+ return;
+} +}
+ +
+/** +/**
@ -930,6 +930,7 @@ index 0000000..8610cd6
+ bfqgd->weight = (unsigned short)val; + bfqgd->weight = (unsigned short)val;
+ hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) { + hlist_for_each_entry(blkg, &blkcg->blkg_list, blkcg_node) {
+ struct bfq_group *bfqg = blkg_to_bfqg(blkg); + struct bfq_group *bfqg = blkg_to_bfqg(blkg);
+
+ if (!bfqg) + if (!bfqg)
+ continue; + continue;
+ /* + /*
@ -1043,7 +1044,8 @@ index 0000000..8610cd6
+ return 0; + return 0;
+} +}
+ +
+static struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) +static struct bfq_group *
+bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
+{ +{
+ int ret; + int ret;
+ +
@ -1051,22 +1053,22 @@ index 0000000..8610cd6
+ if (ret) + if (ret)
+ return NULL; + return NULL;
+ +
+ return blkg_to_bfqg(bfqd->queue->root_blkg); + return blkg_to_bfqg(bfqd->queue->root_blkg);
+} +}
+ +
+static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp) +static struct blkcg_policy_data *bfq_cpd_alloc(gfp_t gfp)
+{ +{
+ struct bfq_group_data *bgd; + struct bfq_group_data *bgd;
+ +
+ bgd = kzalloc(sizeof(*bgd), GFP_KERNEL); + bgd = kzalloc(sizeof(*bgd), GFP_KERNEL);
+ if (!bgd) + if (!bgd)
+ return NULL; + return NULL;
+ return &bgd->pd; + return &bgd->pd;
+} +}
+ +
+static void bfq_cpd_free(struct blkcg_policy_data *cpd) +static void bfq_cpd_free(struct blkcg_policy_data *cpd)
+{ +{
+ kfree(cpd_to_bfqgd(cpd)); + kfree(cpd_to_bfqgd(cpd));
+} +}
+ +
+static struct cftype bfqio_files_dfl[] = { +static struct cftype bfqio_files_dfl[] = {
@ -1201,20 +1203,19 @@ index 0000000..8610cd6
+}; +};
+ +
+static struct blkcg_policy blkcg_policy_bfq = { +static struct blkcg_policy blkcg_policy_bfq = {
+ .dfl_cftypes = bfqio_files_dfl, + .dfl_cftypes = bfqio_files_dfl,
+ .legacy_cftypes = bfqio_files, + .legacy_cftypes = bfqio_files,
+ +
+ .pd_alloc_fn = bfq_pd_alloc, + .pd_alloc_fn = bfq_pd_alloc,
+ .pd_init_fn = bfq_pd_init, + .pd_init_fn = bfq_pd_init,
+ .pd_offline_fn = bfq_pd_offline, + .pd_offline_fn = bfq_pd_offline,
+ .pd_free_fn = bfq_pd_free, + .pd_free_fn = bfq_pd_free,
+ .pd_reset_stats_fn = bfq_pd_reset_stats, + .pd_reset_stats_fn = bfq_pd_reset_stats,
+
+ .cpd_alloc_fn = bfq_cpd_alloc,
+ .cpd_init_fn = bfq_cpd_init,
+ .cpd_bind_fn = bfq_cpd_init,
+ .cpd_free_fn = bfq_cpd_free,
+ +
+ .cpd_alloc_fn = bfq_cpd_alloc,
+ .cpd_init_fn = bfq_cpd_init,
+ .cpd_bind_fn = bfq_cpd_init,
+ .cpd_free_fn = bfq_cpd_free,
+}; +};
+ +
+#else +#else
@ -1223,6 +1224,7 @@ index 0000000..8610cd6
+ struct bfq_group *bfqg) + struct bfq_group *bfqg)
+{ +{
+ struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity); + struct bfq_queue *bfqq = bfq_entity_to_bfqq(entity);
+
+ entity->weight = entity->new_weight; + entity->weight = entity->new_weight;
+ entity->orig_weight = entity->new_weight; + entity->orig_weight = entity->new_weight;
+ if (bfqq) { + if (bfqq) {
@ -1236,6 +1238,7 @@ index 0000000..8610cd6
+bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio) +bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
+{ +{
+ struct bfq_data *bfqd = bic_to_bfqd(bic); + struct bfq_data *bfqd = bic_to_bfqd(bic);
+
+ return bfqd->root_group; + return bfqd->root_group;
+} +}
+ +
@ -1257,12 +1260,13 @@ index 0000000..8610cd6
+} +}
+ +
+static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, +static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
+ struct blkcg *blkcg) + struct blkcg *blkcg)
+{ +{
+ return bfqd->root_group; + return bfqd->root_group;
+} +}
+ +
+static struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node) +static struct bfq_group *
+bfq_create_group_hierarchy(struct bfq_data *bfqd, int node)
+{ +{
+ struct bfq_group *bfqg; + struct bfq_group *bfqg;
+ int i; + int i;
@ -1321,10 +1325,10 @@ index 0000000..fb7bb8f
+} +}
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
new file mode 100644 new file mode 100644
index 0000000..f9787a6 index 0000000..85e2169
--- /dev/null --- /dev/null
+++ b/block/bfq-iosched.c +++ b/block/bfq-iosched.c
@@ -0,0 +1,3754 @@ @@ -0,0 +1,3763 @@
+/* +/*
+ * Budget Fair Queueing (BFQ) disk scheduler. + * Budget Fair Queueing (BFQ) disk scheduler.
+ * + *
@ -1542,7 +1546,7 @@ index 0000000..f9787a6
+ unsigned long back_max; + unsigned long back_max;
+#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */ +#define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */
+#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */ +#define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */
+ unsigned wrap = 0; /* bit mask: requests behind the disk head? */ + unsigned int wrap = 0; /* bit mask: requests behind the disk head? */
+ +
+ if (!rq1 || rq1 == rq2) + if (!rq1 || rq1 == rq2)
+ return rq2; + return rq2;
@ -1597,12 +1601,11 @@ index 0000000..f9787a6
+ return rq1; + return rq1;
+ else if (d2 < d1) + else if (d2 < d1)
+ return rq2; + return rq2;
+ else { +
+ if (s1 >= s2) + if (s1 >= s2)
+ return rq1; + return rq1;
+ else + else
+ return rq2; + return rq2;
+ }
+ +
+ case BFQ_RQ2_WRAP: + case BFQ_RQ2_WRAP:
+ return rq1; + return rq1;
@ -1889,7 +1892,7 @@ index 0000000..f9787a6
+ */ + */
+ hlist_for_each_entry(bfqq_item, &bfqd->burst_list, + hlist_for_each_entry(bfqq_item, &bfqd->burst_list,
+ burst_list_node) + burst_list_node)
+ bfq_mark_bfqq_in_large_burst(bfqq_item); + bfq_mark_bfqq_in_large_burst(bfqq_item);
+ bfq_mark_bfqq_in_large_burst(bfqq); + bfq_mark_bfqq_in_large_burst(bfqq);
+ +
+ /* + /*
@ -2288,7 +2291,7 @@ index 0000000..f9787a6
+ bfqd->rq_in_driver++; + bfqd->rq_in_driver++;
+ bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); + bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq);
+ bfq_log(bfqd, "activate_request: new bfqd->last_position %llu", + bfq_log(bfqd, "activate_request: new bfqd->last_position %llu",
+ (long long unsigned)bfqd->last_position); + (unsigned long long) bfqd->last_position);
+} +}
+ +
+static void bfq_deactivate_request(struct request_queue *q, struct request *rq) +static void bfq_deactivate_request(struct request_queue *q, struct request *rq)
@ -2595,6 +2598,7 @@ index 0000000..f9787a6
+{ +{
+ struct bfq_queue *bfqq = bfqd->in_service_queue; + struct bfq_queue *bfqq = bfqd->in_service_queue;
+ unsigned int timeout_coeff; + unsigned int timeout_coeff;
+
+ if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) + if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time)
+ timeout_coeff = 1; + timeout_coeff = 1;
+ else + else
@ -2667,6 +2671,7 @@ index 0000000..f9787a6
+static int bfq_bfqq_budget_left(struct bfq_queue *bfqq) +static int bfq_bfqq_budget_left(struct bfq_queue *bfqq)
+{ +{
+ struct bfq_entity *entity = &bfqq->entity; + struct bfq_entity *entity = &bfqq->entity;
+
+ return entity->budget - entity->service; + return entity->budget - entity->service;
+} +}
+ +
@ -2906,6 +2911,7 @@ index 0000000..f9787a6
+ if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES && + if (bfqd->peak_rate_samples == BFQ_PEAK_RATE_SAMPLES &&
+ update) { + update) {
+ int dev_type = blk_queue_nonrot(bfqd->queue); + int dev_type = blk_queue_nonrot(bfqd->queue);
+
+ if (bfqd->bfq_user_max_budget == 0) { + if (bfqd->bfq_user_max_budget == 0) {
+ bfqd->bfq_max_budget = + bfqd->bfq_max_budget =
+ bfq_calc_max_budget(bfqd->peak_rate, + bfq_calc_max_budget(bfqd->peak_rate,
@ -3065,6 +3071,7 @@ index 0000000..f9787a6
+ enum bfqq_expiration reason) + enum bfqq_expiration reason)
+{ +{
+ bool slow; + bool slow;
+
+ BUG_ON(bfqq != bfqd->in_service_queue); + BUG_ON(bfqq != bfqd->in_service_queue);
+ +
+ /* + /*
@ -3098,7 +3105,7 @@ index 0000000..f9787a6
+ } + }
+ +
+ if (reason == BFQ_BFQQ_TOO_IDLE && + if (reason == BFQ_BFQQ_TOO_IDLE &&
+ bfqq->entity.service <= 2 * bfqq->entity.budget / 10 ) + bfqq->entity.service <= 2 * bfqq->entity.budget / 10)
+ bfq_clear_bfqq_IO_bound(bfqq); + bfq_clear_bfqq_IO_bound(bfqq);
+ +
+ if (bfqd->low_latency && bfqq->wr_coeff == 1) + if (bfqd->low_latency && bfqq->wr_coeff == 1)
@ -3244,7 +3251,7 @@ index 0000000..f9787a6
+ */ + */
+ idling_boosts_thr = !bfqd->hw_tag || + idling_boosts_thr = !bfqd->hw_tag ||
+ (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) && + (!blk_queue_nonrot(bfqd->queue) && bfq_bfqq_IO_bound(bfqq) &&
+ bfq_bfqq_idle_window(bfqq)) ; + bfq_bfqq_idle_window(bfqq));
+ +
+ /* + /*
+ * The value of the next variable, + * The value of the next variable,
@ -3356,7 +3363,7 @@ index 0000000..f9787a6
+ * (i) each of these processes must get the same throughput as + * (i) each of these processes must get the same throughput as
+ * the others; + * the others;
+ * (ii) all these processes have the same I/O pattern + * (ii) all these processes have the same I/O pattern
+ (either sequential or random). + * (either sequential or random).
+ * In fact, in such a scenario, the drive will tend to treat + * In fact, in such a scenario, the drive will tend to treat
+ * the requests of each of these processes in about the same + * the requests of each of these processes in about the same
+ * way as the requests of the others, and thus to provide + * way as the requests of the others, and thus to provide
@ -3553,6 +3560,7 @@ index 0000000..f9787a6
+static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) +static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
+{ +{
+ struct bfq_entity *entity = &bfqq->entity; + struct bfq_entity *entity = &bfqq->entity;
+
+ if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ + if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */
+ bfq_log_bfqq(bfqd, bfqq, + bfq_log_bfqq(bfqd, bfqq,
+ "raising period dur %u/%u msec, old coeff %u, w %d(%d)", + "raising period dur %u/%u msec, old coeff %u, w %d(%d)",
@ -3643,7 +3651,7 @@ index 0000000..f9787a6
+ bfq_log_bfqq(bfqd, bfqq, + bfq_log_bfqq(bfqd, bfqq,
+ "dispatched %u sec req (%llu), budg left %d", + "dispatched %u sec req (%llu), budg left %d",
+ blk_rq_sectors(rq), + blk_rq_sectors(rq),
+ (long long unsigned)blk_rq_pos(rq), + (unsigned long long) blk_rq_pos(rq),
+ bfq_bfqq_budget_left(bfqq)); + bfq_bfqq_budget_left(bfqq));
+ +
+ dispatched++; + dispatched++;
@ -3841,7 +3849,8 @@ index 0000000..f9787a6
+ * Update the entity prio values; note that the new values will not + * Update the entity prio values; note that the new values will not
+ * be used until the next (re)activation. + * be used until the next (re)activation.
+ */ + */
+static void bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) +static void
+bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic)
+{ +{
+ struct task_struct *tsk = current; + struct task_struct *tsk = current;
+ int ioprio_class; + int ioprio_class;
@ -3874,8 +3883,8 @@ index 0000000..f9787a6
+ } + }
+ +
+ if (bfqq->new_ioprio < 0 || bfqq->new_ioprio >= IOPRIO_BE_NR) { + if (bfqq->new_ioprio < 0 || bfqq->new_ioprio >= IOPRIO_BE_NR) {
+ printk(KERN_CRIT "bfq_set_next_ioprio_data: new_ioprio %d\n", + pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n",
+ bfqq->new_ioprio); + bfqq->new_ioprio);
+ BUG(); + BUG();
+ } + }
+ +
@ -3999,7 +4008,7 @@ index 0000000..f9787a6
+ +
+ if (bfqq) { + if (bfqq) {
+ bfq_init_bfqq(bfqd, bfqq, bic, current->pid, + bfq_init_bfqq(bfqd, bfqq, bic, current->pid,
+ is_sync); + is_sync);
+ bfq_init_entity(&bfqq->entity, bfqg); + bfq_init_entity(&bfqq->entity, bfqg);
+ bfq_log_bfqq(bfqd, bfqq, "allocated"); + bfq_log_bfqq(bfqd, bfqq, "allocated");
+ } else { + } else {
@ -4187,7 +4196,7 @@ index 0000000..f9787a6
+ bfq_log_bfqq(bfqd, bfqq, + bfq_log_bfqq(bfqd, bfqq,
+ "rq_enqueued: idle_window=%d (seeky %d, mean %llu)", + "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
+ bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq), + bfq_bfqq_idle_window(bfqq), BFQQ_SEEKY(bfqq),
+ (long long unsigned)bfqq->seek_mean); + (unsigned long long) bfqq->seek_mean);
+ +
+ bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); + bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq);
+ +
@ -4738,8 +4747,7 @@ index 0000000..f9787a6
+ +
+static void bfq_slab_kill(void) +static void bfq_slab_kill(void)
+{ +{
+ if (bfq_pool) + kmem_cache_destroy(bfq_pool);
+ kmem_cache_destroy(bfq_pool);
+} +}
+ +
+static int __init bfq_slab_setup(void) +static int __init bfq_slab_setup(void)
@ -4770,6 +4778,7 @@ index 0000000..f9787a6
+static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page) +static ssize_t bfq_wr_max_time_show(struct elevator_queue *e, char *page)
+{ +{
+ struct bfq_data *bfqd = e->elevator_data; + struct bfq_data *bfqd = e->elevator_data;
+
+ return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ? + return sprintf(page, "%d\n", bfqd->bfq_wr_max_time > 0 ?
+ jiffies_to_msecs(bfqd->bfq_wr_max_time) : + jiffies_to_msecs(bfqd->bfq_wr_max_time) :
+ jiffies_to_msecs(bfq_wr_duration(bfqd))); + jiffies_to_msecs(bfq_wr_duration(bfqd)));
@ -4788,25 +4797,29 @@ index 0000000..f9787a6
+ +
+ num_char += sprintf(page + num_char, "Active:\n"); + num_char += sprintf(page + num_char, "Active:\n");
+ list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) { + list_for_each_entry(bfqq, &bfqd->active_list, bfqq_list) {
+ num_char += sprintf(page + num_char, + num_char += sprintf(page + num_char,
+ "pid%d: weight %hu, nr_queued %d %d, dur %d/%u\n", + "pid%d: weight %hu, nr_queued %d %d, ",
+ bfqq->pid, + bfqq->pid,
+ bfqq->entity.weight, + bfqq->entity.weight,
+ bfqq->queued[0], + bfqq->queued[0],
+ bfqq->queued[1], + bfqq->queued[1]);
+ jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), + num_char += sprintf(page + num_char,
+ jiffies_to_msecs(bfqq->wr_cur_max_time)); + "dur %d/%u\n",
+ jiffies_to_msecs(
+ jiffies -
+ bfqq->last_wr_start_finish),
+ jiffies_to_msecs(bfqq->wr_cur_max_time));
+ } + }
+ +
+ num_char += sprintf(page + num_char, "Idle:\n"); + num_char += sprintf(page + num_char, "Idle:\n");
+ list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) { + list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) {
+ num_char += sprintf(page + num_char, + num_char += sprintf(page + num_char,
+ "pid%d: weight %hu, dur %d/%u\n", + "pid%d: weight %hu, dur %d/%u\n",
+ bfqq->pid, + bfqq->pid,
+ bfqq->entity.weight, + bfqq->entity.weight,
+ jiffies_to_msecs(jiffies - + jiffies_to_msecs(jiffies -
+ bfqq->last_wr_start_finish), + bfqq->last_wr_start_finish),
+ jiffies_to_msecs(bfqq->wr_cur_max_time)); + jiffies_to_msecs(bfqq->wr_cur_max_time));
+ } + }
+ +
+ spin_unlock_irq(bfqd->queue->queue_lock); + spin_unlock_irq(bfqd->queue->queue_lock);
@ -5081,10 +5094,10 @@ index 0000000..f9787a6
+MODULE_LICENSE("GPL"); +MODULE_LICENSE("GPL");
diff --git a/block/bfq-sched.c b/block/bfq-sched.c diff --git a/block/bfq-sched.c b/block/bfq-sched.c
new file mode 100644 new file mode 100644
index 0000000..a64fec1 index 0000000..a5ed694
--- /dev/null --- /dev/null
+++ b/block/bfq-sched.c +++ b/block/bfq-sched.c
@@ -0,0 +1,1200 @@ @@ -0,0 +1,1199 @@
+/* +/*
+ * BFQ: Hierarchical B-WF2Q+ scheduler. + * BFQ: Hierarchical B-WF2Q+ scheduler.
+ * + *
@ -5715,8 +5728,7 @@ index 0000000..a64fec1
+ if (entity->new_weight != entity->orig_weight) { + if (entity->new_weight != entity->orig_weight) {
+ if (entity->new_weight < BFQ_MIN_WEIGHT || + if (entity->new_weight < BFQ_MIN_WEIGHT ||
+ entity->new_weight > BFQ_MAX_WEIGHT) { + entity->new_weight > BFQ_MAX_WEIGHT) {
+ printk(KERN_CRIT "update_weight_prio: " + pr_crit("update_weight_prio: new_weight %d\n",
+ "new_weight %d\n",
+ entity->new_weight); + entity->new_weight);
+ BUG(); + BUG();
+ } + }
@ -6287,7 +6299,7 @@ index 0000000..a64fec1
+} +}
diff --git a/block/bfq.h b/block/bfq.h diff --git a/block/bfq.h b/block/bfq.h
new file mode 100644 new file mode 100644
index 0000000..485d0c9 index 0000000..2bf54ae
--- /dev/null --- /dev/null
+++ b/block/bfq.h +++ b/block/bfq.h
@@ -0,0 +1,801 @@ @@ -0,0 +1,801 @@
@ -6722,10 +6734,10 @@ index 0000000..485d0c9
+ * @last_ins_in_burst. + * @last_ins_in_burst.
+ * @burst_size: number of queues in the current burst of queue activations. + * @burst_size: number of queues in the current burst of queue activations.
+ * @bfq_large_burst_thresh: maximum burst size above which the current + * @bfq_large_burst_thresh: maximum burst size above which the current
+ * queue-activation burst is deemed as 'large'. + * queue-activation burst is deemed as 'large'.
+ * @large_burst: true if a large queue-activation burst is in progress. + * @large_burst: true if a large queue-activation burst is in progress.
+ * @burst_list: head of the burst list (as for the above fields, more details + * @burst_list: head of the burst list (as for the above fields, more details
+ * in the comments to the function bfq_handle_burst). + * in the comments to the function bfq_handle_burst).
+ * @low_latency: if set to true, low-latency heuristics are enabled. + * @low_latency: if set to true, low-latency heuristics are enabled.
+ * @bfq_wr_coeff: maximum factor by which the weight of a weight-raised + * @bfq_wr_coeff: maximum factor by which the weight of a weight-raised
+ * queue is multiplied. + * queue is multiplied.
@ -7093,5 +7105,5 @@ index 0000000..485d0c9
+ +
+#endif /* _BFQ_H */ +#endif /* _BFQ_H */
-- --
1.9.1 2.10.0

View File

@ -1,8 +1,8 @@
From 47de1e46ef5f462e9694e5b0607aec6ad658f1e0 Mon Sep 17 00:00:00 2001 From e4d9bed2dfdec562b23491e44602c89c4a2a5ea4 Mon Sep 17 00:00:00 2001
From: Mauro Andreolini <mauro.andreolini@unimore.it> From: Mauro Andreolini <mauro.andreolini@unimore.it>
Date: Sun, 6 Sep 2015 16:09:05 +0200 Date: Sun, 6 Sep 2015 16:09:05 +0200
Subject: [PATCH 3/4] block, bfq: add Early Queue Merge (EQM) to BFQ-v7r11 for Subject: [PATCH 3/4] block, bfq: add Early Queue Merge (EQM) to BFQ-v7r11 for
4.7.0 4.10.0
A set of processes may happen to perform interleaved reads, i.e.,requests A set of processes may happen to perform interleaved reads, i.e.,requests
whose union would give rise to a sequential read pattern. There are two whose union would give rise to a sequential read pattern. There are two
@ -35,16 +35,16 @@ Signed-off-by: Arianna Avanzini <avanzini@google.com>
Signed-off-by: Paolo Valente <paolo.valente@unimore.it> Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
Signed-off-by: Linus Walleij <linus.walleij@linaro.org> Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
--- ---
block/bfq-cgroup.c | 4 + block/bfq-cgroup.c | 5 +
block/bfq-iosched.c | 687 ++++++++++++++++++++++++++++++++++++++++++++++++++-- block/bfq-iosched.c | 685 +++++++++++++++++++++++++++++++++++++++++++++++++++-
block/bfq.h | 66 +++++ block/bfq.h | 66 +++++
3 files changed, 743 insertions(+), 14 deletions(-) 3 files changed, 743 insertions(+), 13 deletions(-)
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 8610cd6..5ee99ec 100644 index 8b08a57..0367996 100644
--- a/block/bfq-cgroup.c --- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c +++ b/block/bfq-cgroup.c
@@ -437,6 +437,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd) @@ -440,6 +440,7 @@ static void bfq_pd_init(struct blkg_policy_data *pd)
*/ */
bfqg->bfqd = bfqd; bfqg->bfqd = bfqd;
bfqg->active_entities = 0; bfqg->active_entities = 0;
@ -52,16 +52,17 @@ index 8610cd6..5ee99ec 100644
} }
static void bfq_pd_free(struct blkg_policy_data *pd) static void bfq_pd_free(struct blkg_policy_data *pd)
@@ -530,6 +531,8 @@ static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd, @@ -533,6 +534,9 @@ static struct bfq_group *bfq_find_alloc_group(struct bfq_data *bfqd,
return bfqg; return bfqg;
} }
+static void bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq); +static void bfq_pos_tree_add_move(struct bfq_data *bfqd,
+ struct bfq_queue *bfqq);
+ +
/** /**
* bfq_bfqq_move - migrate @bfqq to @bfqg. * bfq_bfqq_move - migrate @bfqq to @bfqg.
* @bfqd: queue descriptor. * @bfqd: queue descriptor.
@@ -577,6 +580,7 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq, @@ -580,6 +584,7 @@ static void bfq_bfqq_move(struct bfq_data *bfqd, struct bfq_queue *bfqq,
bfqg_get(bfqg); bfqg_get(bfqg);
if (busy) { if (busy) {
@ -70,10 +71,10 @@ index 8610cd6..5ee99ec 100644
bfq_activate_bfqq(bfqd, bfqq); bfq_activate_bfqq(bfqd, bfqq);
} }
diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c diff --git a/block/bfq-iosched.c b/block/bfq-iosched.c
index f9787a6..d1f648d 100644 index 85e2169..cf3e9b1 100644
--- a/block/bfq-iosched.c --- a/block/bfq-iosched.c
+++ b/block/bfq-iosched.c +++ b/block/bfq-iosched.c
@@ -296,6 +296,72 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd, @@ -295,6 +295,72 @@ static struct request *bfq_choose_req(struct bfq_data *bfqd,
} }
} }
@ -112,7 +113,7 @@ index f9787a6..d1f648d 100644
+ *rb_link = p; + *rb_link = p;
+ +
+ bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d", + bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d",
+ (long long unsigned)sector, + (unsigned long long) sector,
+ bfqq ? bfqq->pid : 0); + bfqq ? bfqq->pid : 0);
+ +
+ return bfqq; + return bfqq;
@ -146,11 +147,11 @@ index f9787a6..d1f648d 100644
/* /*
* Tell whether there are active queues or groups with differentiated weights. * Tell whether there are active queues or groups with differentiated weights.
*/ */
@@ -528,6 +594,57 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd) @@ -527,6 +593,57 @@ static unsigned int bfq_wr_duration(struct bfq_data *bfqd)
return dur; return dur;
} }
+static unsigned bfq_bfqq_cooperations(struct bfq_queue *bfqq) +static unsigned int bfq_bfqq_cooperations(struct bfq_queue *bfqq)
+{ +{
+ return bfqq->bic ? bfqq->bic->cooperations : 0; + return bfqq->bic ? bfqq->bic->cooperations : 0;
+} +}
@ -204,7 +205,7 @@ index f9787a6..d1f648d 100644
/* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */ /* Empty burst list and add just bfqq (see comments to bfq_handle_burst) */
static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{ {
@@ -764,8 +881,14 @@ static void bfq_add_request(struct request *rq) @@ -763,8 +880,14 @@ static void bfq_add_request(struct request *rq)
BUG_ON(!next_rq); BUG_ON(!next_rq);
bfqq->next_rq = next_rq; bfqq->next_rq = next_rq;
@ -220,7 +221,7 @@ index f9787a6..d1f648d 100644
idle_for_long_time = time_is_before_jiffies( idle_for_long_time = time_is_before_jiffies(
bfqq->budget_timeout + bfqq->budget_timeout +
bfqd->bfq_wr_min_idle_time); bfqd->bfq_wr_min_idle_time);
@@ -793,11 +916,12 @@ static void bfq_add_request(struct request *rq) @@ -792,11 +915,12 @@ static void bfq_add_request(struct request *rq)
bfqd->last_ins_in_burst = jiffies; bfqd->last_ins_in_burst = jiffies;
} }
@ -236,7 +237,7 @@ index f9787a6..d1f648d 100644
entity->budget = max_t(unsigned long, bfqq->max_budget, entity->budget = max_t(unsigned long, bfqq->max_budget,
bfq_serv_to_charge(next_rq, bfqq)); bfq_serv_to_charge(next_rq, bfqq));
@@ -816,6 +940,9 @@ static void bfq_add_request(struct request *rq) @@ -815,6 +939,9 @@ static void bfq_add_request(struct request *rq)
if (!bfqd->low_latency) if (!bfqd->low_latency)
goto add_bfqq_busy; goto add_bfqq_busy;
@ -246,7 +247,7 @@ index f9787a6..d1f648d 100644
/* /*
* If the queue: * If the queue:
* - is not being boosted, * - is not being boosted,
@@ -840,7 +967,7 @@ static void bfq_add_request(struct request *rq) @@ -839,7 +966,7 @@ static void bfq_add_request(struct request *rq)
} else if (old_wr_coeff > 1) { } else if (old_wr_coeff > 1) {
if (interactive) if (interactive)
bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); bfqq->wr_cur_max_time = bfq_wr_duration(bfqd);
@ -255,7 +256,7 @@ index f9787a6..d1f648d 100644
(bfqq->wr_cur_max_time == (bfqq->wr_cur_max_time ==
bfqd->bfq_wr_rt_max_time && bfqd->bfq_wr_rt_max_time &&
!soft_rt)) { !soft_rt)) {
@@ -905,6 +1032,7 @@ static void bfq_add_request(struct request *rq) @@ -904,6 +1031,7 @@ static void bfq_add_request(struct request *rq)
bfqd->bfq_wr_rt_max_time; bfqd->bfq_wr_rt_max_time;
} }
} }
@ -263,7 +264,7 @@ index f9787a6..d1f648d 100644
if (old_wr_coeff != bfqq->wr_coeff) if (old_wr_coeff != bfqq->wr_coeff)
entity->prio_changed = 1; entity->prio_changed = 1;
add_bfqq_busy: add_bfqq_busy:
@@ -1047,6 +1175,15 @@ static void bfq_merged_request(struct request_queue *q, struct request *req, @@ -1046,6 +1174,15 @@ static void bfq_merged_request(struct request_queue *q, struct request *req,
bfqd->last_position); bfqd->last_position);
BUG_ON(!next_rq); BUG_ON(!next_rq);
bfqq->next_rq = next_rq; bfqq->next_rq = next_rq;
@ -279,7 +280,7 @@ index f9787a6..d1f648d 100644
} }
} }
@@ -1129,11 +1266,346 @@ static void bfq_end_wr(struct bfq_data *bfqd) @@ -1128,11 +1265,346 @@ static void bfq_end_wr(struct bfq_data *bfqd)
spin_unlock_irq(bfqd->queue->queue_lock); spin_unlock_irq(bfqd->queue->queue_lock);
} }
@ -572,7 +573,7 @@ index f9787a6..d1f648d 100644
+ struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) + struct bfq_queue *bfqq, struct bfq_queue *new_bfqq)
+{ +{
+ bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu", + bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu",
+ (long unsigned)new_bfqq->pid); + (unsigned long) new_bfqq->pid);
+ /* Save weight raising and idle window of the merged queues */ + /* Save weight raising and idle window of the merged queues */
+ bfq_bfqq_save_state(bfqq); + bfq_bfqq_save_state(bfqq);
+ bfq_bfqq_save_state(new_bfqq); + bfq_bfqq_save_state(new_bfqq);
@ -626,7 +627,7 @@ index f9787a6..d1f648d 100644
/* /*
* Disallow merge of a sync bio into an async request. * Disallow merge of a sync bio into an async request.
@@ -1150,7 +1622,26 @@ static int bfq_allow_merge(struct request_queue *q, struct request *rq, @@ -1149,7 +1621,26 @@ static int bfq_allow_merge(struct request_queue *q, struct request *rq,
if (!bic) if (!bic)
return 0; return 0;
@ -654,7 +655,7 @@ index f9787a6..d1f648d 100644
} }
static void __bfq_set_in_service_queue(struct bfq_data *bfqd, static void __bfq_set_in_service_queue(struct bfq_data *bfqd,
@@ -1349,6 +1840,15 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) @@ -1350,6 +1841,15 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
__bfq_bfqd_reset_in_service(bfqd); __bfq_bfqd_reset_in_service(bfqd);
@ -670,7 +671,7 @@ index f9787a6..d1f648d 100644
if (RB_EMPTY_ROOT(&bfqq->sort_list)) { if (RB_EMPTY_ROOT(&bfqq->sort_list)) {
/* /*
* Overloading budget_timeout field to store the time * Overloading budget_timeout field to store the time
@@ -1357,8 +1857,13 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq) @@ -1358,8 +1858,13 @@ static void __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq)
*/ */
bfqq->budget_timeout = jiffies; bfqq->budget_timeout = jiffies;
bfq_del_bfqq_busy(bfqd, bfqq, 1); bfq_del_bfqq_busy(bfqd, bfqq, 1);
@ -685,7 +686,7 @@ index f9787a6..d1f648d 100644
} }
/** /**
@@ -2242,10 +2747,12 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) @@ -2246,10 +2751,12 @@ static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq)
/* /*
* If the queue was activated in a burst, or * If the queue was activated in a burst, or
* too much time has elapsed from the beginning * too much time has elapsed from the beginning
@ -700,7 +701,7 @@ index f9787a6..d1f648d 100644
time_is_before_jiffies(bfqq->last_wr_start_finish + time_is_before_jiffies(bfqq->last_wr_start_finish +
bfqq->wr_cur_max_time)) { bfqq->wr_cur_max_time)) {
bfqq->last_wr_start_finish = jiffies; bfqq->last_wr_start_finish = jiffies;
@@ -2474,6 +2981,25 @@ static void bfq_put_queue(struct bfq_queue *bfqq) @@ -2478,6 +2985,25 @@ static void bfq_put_queue(struct bfq_queue *bfqq)
#endif #endif
} }
@ -726,7 +727,7 @@ index f9787a6..d1f648d 100644
static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
{ {
if (bfqq == bfqd->in_service_queue) { if (bfqq == bfqd->in_service_queue) {
@@ -2484,6 +3010,8 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) @@ -2488,6 +3014,8 @@ static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq)
bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq, bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d", bfqq,
atomic_read(&bfqq->ref)); atomic_read(&bfqq->ref));
@ -735,7 +736,7 @@ index f9787a6..d1f648d 100644
bfq_put_queue(bfqq); bfq_put_queue(bfqq);
} }
@@ -2492,6 +3020,25 @@ static void bfq_init_icq(struct io_cq *icq) @@ -2496,6 +3024,25 @@ static void bfq_init_icq(struct io_cq *icq)
struct bfq_io_cq *bic = icq_to_bic(icq); struct bfq_io_cq *bic = icq_to_bic(icq);
bic->ttime.last_end_request = jiffies; bic->ttime.last_end_request = jiffies;
@ -761,7 +762,7 @@ index f9787a6..d1f648d 100644
} }
static void bfq_exit_icq(struct io_cq *icq) static void bfq_exit_icq(struct io_cq *icq)
@@ -2505,6 +3052,13 @@ static void bfq_exit_icq(struct io_cq *icq) @@ -2509,6 +3056,13 @@ static void bfq_exit_icq(struct io_cq *icq)
} }
if (bic->bfqq[BLK_RW_SYNC]) { if (bic->bfqq[BLK_RW_SYNC]) {
@ -775,7 +776,7 @@ index f9787a6..d1f648d 100644
bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]); bfq_exit_bfqq(bfqd, bic->bfqq[BLK_RW_SYNC]);
bic->bfqq[BLK_RW_SYNC] = NULL; bic->bfqq[BLK_RW_SYNC] = NULL;
} }
@@ -2809,6 +3363,10 @@ static void bfq_update_idle_window(struct bfq_data *bfqd, @@ -2814,6 +3368,10 @@ static void bfq_update_idle_window(struct bfq_data *bfqd,
if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq)) if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq))
return; return;
@ -786,7 +787,7 @@ index f9787a6..d1f648d 100644
enable_idle = bfq_bfqq_idle_window(bfqq); enable_idle = bfq_bfqq_idle_window(bfqq);
if (atomic_read(&bic->icq.ioc->active_ref) == 0 || if (atomic_read(&bic->icq.ioc->active_ref) == 0 ||
@@ -2856,6 +3414,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, @@ -2861,6 +3419,7 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 || if (bfqq->entity.service > bfq_max_budget(bfqd) / 8 ||
!BFQQ_SEEKY(bfqq)) !BFQQ_SEEKY(bfqq))
bfq_update_idle_window(bfqd, bfqq, bic); bfq_update_idle_window(bfqd, bfqq, bic);
@ -794,7 +795,7 @@ index f9787a6..d1f648d 100644
bfq_log_bfqq(bfqd, bfqq, bfq_log_bfqq(bfqd, bfqq,
"rq_enqueued: idle_window=%d (seeky %d, mean %llu)", "rq_enqueued: idle_window=%d (seeky %d, mean %llu)",
@@ -2920,12 +3479,47 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, @@ -2925,12 +3484,47 @@ static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq,
static void bfq_insert_request(struct request_queue *q, struct request *rq) static void bfq_insert_request(struct request_queue *q, struct request *rq)
{ {
struct bfq_data *bfqd = q->elevator->elevator_data; struct bfq_data *bfqd = q->elevator->elevator_data;
@ -843,7 +844,7 @@ index f9787a6..d1f648d 100644
rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; rq->fifo_time = jiffies + bfqd->bfq_fifo_expire[rq_is_sync(rq)];
list_add_tail(&rq->queuelist, &bfqq->fifo); list_add_tail(&rq->queuelist, &bfqq->fifo);
@@ -3094,6 +3688,32 @@ static void bfq_put_request(struct request *rq) @@ -3099,6 +3693,32 @@ static void bfq_put_request(struct request *rq)
} }
/* /*
@ -876,7 +877,7 @@ index f9787a6..d1f648d 100644
* Allocate bfq data structures associated with this request. * Allocate bfq data structures associated with this request.
*/ */
static int bfq_set_request(struct request_queue *q, struct request *rq, static int bfq_set_request(struct request_queue *q, struct request *rq,
@@ -3105,6 +3725,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, @@ -3110,6 +3730,7 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
const int is_sync = rq_is_sync(rq); const int is_sync = rq_is_sync(rq);
struct bfq_queue *bfqq; struct bfq_queue *bfqq;
unsigned long flags; unsigned long flags;
@ -884,7 +885,7 @@ index f9787a6..d1f648d 100644
might_sleep_if(gfpflags_allow_blocking(gfp_mask)); might_sleep_if(gfpflags_allow_blocking(gfp_mask));
@@ -3117,15 +3738,30 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, @@ -3122,15 +3743,30 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
bfq_bic_update_cgroup(bic, bio); bfq_bic_update_cgroup(bic, bio);
@ -900,12 +901,11 @@ index f9787a6..d1f648d 100644
+ bic->saved_in_large_burst) + bic->saved_in_large_burst)
bfq_mark_bfqq_in_large_burst(bfqq); bfq_mark_bfqq_in_large_burst(bfqq);
- else - else
- bfq_clear_bfqq_in_large_burst(bfqq);
+ else { + else {
+ bfq_clear_bfqq_in_large_burst(bfqq); bfq_clear_bfqq_in_large_burst(bfqq);
+ if (bic->was_in_burst_list) + if (bic->was_in_burst_list)
+ hlist_add_head(&bfqq->burst_list_node, + hlist_add_head(&bfqq->burst_list_node,
+ &bfqd->burst_list); + &bfqd->burst_list);
+ } + }
+ } + }
+ } else { + } else {
@ -919,7 +919,7 @@ index f9787a6..d1f648d 100644
} }
} }
@@ -3137,6 +3773,26 @@ static int bfq_set_request(struct request_queue *q, struct request *rq, @@ -3142,6 +3778,26 @@ static int bfq_set_request(struct request_queue *q, struct request *rq,
rq->elv.priv[0] = bic; rq->elv.priv[0] = bic;
rq->elv.priv[1] = bfqq; rq->elv.priv[1] = bfqq;
@ -946,7 +946,7 @@ index f9787a6..d1f648d 100644
spin_unlock_irqrestore(q->queue_lock, flags); spin_unlock_irqrestore(q->queue_lock, flags);
return 0; return 0;
@@ -3290,6 +3946,7 @@ static void bfq_init_root_group(struct bfq_group *root_group, @@ -3295,6 +3951,7 @@ static void bfq_init_root_group(struct bfq_group *root_group,
root_group->my_entity = NULL; root_group->my_entity = NULL;
root_group->bfqd = bfqd; root_group->bfqd = bfqd;
#endif #endif
@ -954,7 +954,7 @@ index f9787a6..d1f648d 100644
for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) for (i = 0; i < BFQ_IOPRIO_CLASSES; i++)
root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT;
} }
@@ -3370,6 +4027,8 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) @@ -3375,6 +4032,8 @@ static int bfq_init_queue(struct request_queue *q, struct elevator_type *e)
bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async; bfqd->bfq_timeout[BLK_RW_ASYNC] = bfq_timeout_async;
bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync; bfqd->bfq_timeout[BLK_RW_SYNC] = bfq_timeout_sync;
@ -964,7 +964,7 @@ index f9787a6..d1f648d 100644
bfqd->bfq_large_burst_thresh = 11; bfqd->bfq_large_burst_thresh = 11;
diff --git a/block/bfq.h b/block/bfq.h diff --git a/block/bfq.h b/block/bfq.h
index 485d0c9..f73c942 100644 index 2bf54ae..fcce855 100644
--- a/block/bfq.h --- a/block/bfq.h
+++ b/block/bfq.h +++ b/block/bfq.h
@@ -183,6 +183,8 @@ struct bfq_group; @@ -183,6 +183,8 @@ struct bfq_group;
@ -1097,5 +1097,5 @@ index 485d0c9..f73c942 100644
static void bfq_put_queue(struct bfq_queue *bfqq); static void bfq_put_queue(struct bfq_queue *bfqq);
static void bfq_dispatch_insert(struct request_queue *q, struct request *rq); static void bfq_dispatch_insert(struct request_queue *q, struct request *rq);
-- --
1.9.1 2.10.0

View File

@ -0,0 +1,111 @@
From a7fb2842267fd275cae9cf44dd3037469f75eeef Mon Sep 17 00:00:00 2001
From: Con Kolivas <kernel@kolivas.org>
Date: Tue, 1 Nov 2016 12:54:20 +1100
Subject: [PATCH 09/25] Implement min and msec hrtimeout un/interruptible
schedule timeout variants with a lower resolution of 1ms to work around low
Hz time resolutions.
---
include/linux/sched.h | 6 +++++
kernel/time/hrtimer.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 77 insertions(+)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index d752ef6..46544f4 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -457,6 +457,12 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
extern signed long schedule_timeout_idle(signed long timeout);
+
+extern signed long schedule_msec_hrtimeout(signed long timeout);
+extern signed long schedule_min_hrtimeout(void);
+extern signed long schedule_msec_hrtimeout_interruptible(signed long timeout);
+extern signed long schedule_msec_hrtimeout_uninterruptible(signed long timeout);
+
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index c6ecedd..a47f5b3 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1796,3 +1796,74 @@ int __sched schedule_hrtimeout(ktime_t *expires,
return schedule_hrtimeout_range(expires, 0, mode);
}
EXPORT_SYMBOL_GPL(schedule_hrtimeout);
+
+/*
+ * As per schedule_hrtimeout but taskes a millisecond value and returns how
+ * many milliseconds are left.
+ */
+signed long __sched schedule_msec_hrtimeout(signed long timeout)
+{
+ struct hrtimer_sleeper t;
+ int delta, secs, jiffs;
+ ktime_t expires;
+
+ if (!timeout) {
+ __set_current_state(TASK_RUNNING);
+ return 0;
+ }
+
+ jiffs = msecs_to_jiffies(timeout);
+ /*
+ * If regular timer resolution is adequate or hrtimer resolution is not
+ * (yet) better than Hz, as would occur during startup, use regular
+ * timers.
+ */
+ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ)
+ return schedule_timeout(jiffs);
+
+ secs = timeout / 1000;
+ delta = (timeout % 1000) * NSEC_PER_MSEC;
+ expires = ktime_set(secs, delta);
+
+ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ hrtimer_set_expires_range_ns(&t.timer, expires, delta);
+
+ hrtimer_init_sleeper(&t, current);
+
+ hrtimer_start_expires(&t.timer, HRTIMER_MODE_REL);
+
+ if (likely(t.task))
+ schedule();
+
+ hrtimer_cancel(&t.timer);
+ destroy_hrtimer_on_stack(&t.timer);
+
+ __set_current_state(TASK_RUNNING);
+
+ expires = hrtimer_expires_remaining(&t.timer);
+ timeout = ktime_to_ms(expires);
+ return timeout < 0 ? 0 : timeout;
+}
+
+EXPORT_SYMBOL(schedule_msec_hrtimeout);
+
+signed long __sched schedule_min_hrtimeout(void)
+{
+ return schedule_msec_hrtimeout(1);
+}
+
+EXPORT_SYMBOL(schedule_min_hrtimeout);
+
+signed long __sched schedule_msec_hrtimeout_interruptible(signed long timeout)
+{
+ __set_current_state(TASK_INTERRUPTIBLE);
+ return schedule_msec_hrtimeout(timeout);
+}
+EXPORT_SYMBOL(schedule_msec_hrtimeout_interruptible);
+
+signed long __sched schedule_msec_hrtimeout_uninterruptible(signed long timeout)
+{
+ __set_current_state(TASK_UNINTERRUPTIBLE);
+ return schedule_msec_hrtimeout(timeout);
+}
+EXPORT_SYMBOL(schedule_msec_hrtimeout_uninterruptible);
--
2.9.3

View File

@ -0,0 +1,48 @@
From a4f3820228ebab3d5d480d720fecebd3f7e71771 Mon Sep 17 00:00:00 2001
From: Con Kolivas <kernel@kolivas.org>
Date: Sat, 5 Nov 2016 09:27:36 +1100
Subject: [PATCH 10/25] Special case calls of schedule_timeout(1) to use the
min hrtimeout of 1ms, working around low Hz resolutions.
---
kernel/time/timer.c | 15 +++++++++++++--
1 file changed, 13 insertions(+), 2 deletions(-)
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index ef3128f..3f72c13 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1750,6 +1750,17 @@ signed long __sched schedule_timeout(signed long timeout)
expire = timeout + jiffies;
+ if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
+ /*
+ * Special case 1 as being a request for the minimum timeout
+ * and use highres timers to timeout after 1ms to workaround
+ * the granularity of low Hz tick timers.
+ */
+ if (!schedule_min_hrtimeout())
+ return 0;
+ goto out_timeout;
+ }
+
setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
__mod_timer(&timer, expire, false);
schedule();
@@ -1757,10 +1768,10 @@ signed long __sched schedule_timeout(signed long timeout)
/* Remove the timer from the object tracker */
destroy_timer_on_stack(&timer);
-
+out_timeout:
timeout = expire - jiffies;
- out:
+out:
return timeout < 0 ? 0 : timeout;
}
EXPORT_SYMBOL(schedule_timeout);
--
2.9.3

View File

@ -0,0 +1,54 @@
From 534bc9d3e559420eaf57771f48d2c2f549dcc4d2 Mon Sep 17 00:00:00 2001
From: Con Kolivas <kernel@kolivas.org>
Date: Fri, 4 Nov 2016 09:25:54 +1100
Subject: [PATCH 11/25] Convert msleep to use hrtimers when active.
---
kernel/time/timer.c | 24 ++++++++++++++++++++++--
1 file changed, 22 insertions(+), 2 deletions(-)
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 3f72c13..bc53598 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1892,7 +1892,19 @@ void __init init_timers(void)
*/
void msleep(unsigned int msecs)
{
- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+ int jiffs = msecs_to_jiffies(msecs);
+ unsigned long timeout;
+
+ /*
+ * Use high resolution timers where the resolution of tick based
+ * timers is inadequate.
+ */
+ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
+ while (msecs)
+ msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
+ return;
+ }
+ timeout = msecs_to_jiffies(msecs) + 1;
while (timeout)
timeout = schedule_timeout_uninterruptible(timeout);
@@ -1906,7 +1918,15 @@ EXPORT_SYMBOL(msleep);
*/
unsigned long msleep_interruptible(unsigned int msecs)
{
- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
+ int jiffs = msecs_to_jiffies(msecs);
+ unsigned long timeout;
+
+ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
+ while (msecs && !signal_pending(current))
+ msecs = schedule_msec_hrtimeout_interruptible(msecs);
+ return msecs;
+ }
+ timeout = msecs_to_jiffies(msecs) + 1;
while (timeout && !signal_pending(current))
timeout = schedule_timeout_interruptible(timeout);
--
2.9.3

View File

@ -0,0 +1,226 @@
From 8fef7b75352d874af02881de3493f2ce2d47a341 Mon Sep 17 00:00:00 2001
From: Con Kolivas <kernel@kolivas.org>
Date: Mon, 20 Feb 2017 13:28:30 +1100
Subject: [PATCH 12/25] Replace all schedule timeout(1) with
schedule_min_hrtimeout()
---
drivers/block/swim.c | 6 +++---
drivers/char/ipmi/ipmi_msghandler.c | 2 +-
drivers/char/ipmi/ipmi_ssif.c | 2 +-
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 2 +-
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 2 +-
drivers/mfd/ucb1x00-core.c | 2 +-
drivers/misc/sgi-xp/xpc_channel.c | 2 +-
drivers/net/caif/caif_hsi.c | 2 +-
drivers/ntb/test/ntb_perf.c | 2 +-
drivers/staging/comedi/drivers/ni_mio_common.c | 2 +-
fs/afs/vlocation.c | 2 +-
fs/btrfs/extent-tree.c | 2 +-
fs/btrfs/inode-map.c | 2 +-
sound/usb/line6/pcm.c | 2 +-
14 files changed, 16 insertions(+), 16 deletions(-)
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
index b5afd49..7d09955 100644
--- a/drivers/block/swim.c
+++ b/drivers/block/swim.c
@@ -332,7 +332,7 @@ static inline void swim_motor(struct swim __iomem *base,
if (swim_readbit(base, MOTOR_ON))
break;
current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(1);
+ schedule_min_hrtimeout();
}
} else if (action == OFF) {
swim_action(base, MOTOR_OFF);
@@ -351,7 +351,7 @@ static inline void swim_eject(struct swim __iomem *base)
if (!swim_readbit(base, DISK_IN))
break;
current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(1);
+ schedule_min_hrtimeout();
}
swim_select(base, RELAX);
}
@@ -375,7 +375,7 @@ static inline int swim_step(struct swim __iomem *base)
for (wait = 0; wait < HZ; wait++) {
current->state = TASK_INTERRUPTIBLE;
- schedule_timeout(1);
+ schedule_min_hrtimeout();
swim_select(base, RELAX);
if (!swim_readbit(base, STEP))
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 92e53ac..a2418e7 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2953,7 +2953,7 @@ static void cleanup_smi_msgs(ipmi_smi_t intf)
/* Current message first, to preserve order */
while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
/* Wait for the message to clear out. */
- schedule_timeout(1);
+ schedule_min_hrtimeout();
}
/* No need for locks, the interface is down. */
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index cca6e5b..fd3c7da 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -1185,7 +1185,7 @@ static int ssif_remove(struct i2c_client *client)
/* make sure the driver is not looking for flags any more. */
while (ssif_info->ssif_state != SSIF_NORMAL)
- schedule_timeout(1);
+ schedule_min_hrtimeout();
ssif_info->stopping = true;
del_timer_sync(&ssif_info->retry_timer);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index b6a0806..b5b02cf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -235,7 +235,7 @@ static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
DRM_ERROR("SVGA device lockup.\n");
break;
}
- schedule_timeout(1);
+ schedule_min_hrtimeout();
if (interruptible && signal_pending(current)) {
ret = -ERESTARTSYS;
break;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 0c7e172..4c1555c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -156,7 +156,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
break;
}
if (lazy)
- schedule_timeout(1);
+ schedule_min_hrtimeout();
else if ((++count & 0x0F) == 0) {
/**
* FIXME: Use schedule_hr_timeout here for
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
index d6fb2e1..7ac951b 100644
--- a/drivers/mfd/ucb1x00-core.c
+++ b/drivers/mfd/ucb1x00-core.c
@@ -253,7 +253,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
break;
/* yield to other processes */
set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
+ schedule_min_hrtimeout();
}
return UCB_ADC_DAT(val);
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
index 128d561..38e68e9 100644
--- a/drivers/misc/sgi-xp/xpc_channel.c
+++ b/drivers/misc/sgi-xp/xpc_channel.c
@@ -837,7 +837,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
atomic_inc(&ch->n_on_msg_allocate_wq);
prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
- ret = schedule_timeout(1);
+ ret = schedule_min_hrtimeout();
finish_wait(&ch->msg_allocate_wq, &wait);
atomic_dec(&ch->n_on_msg_allocate_wq);
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
index ddabce7..67fb5ce 100644
--- a/drivers/net/caif/caif_hsi.c
+++ b/drivers/net/caif/caif_hsi.c
@@ -944,7 +944,7 @@ static void cfhsi_wake_down(struct work_struct *work)
break;
set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
+ schedule_min_hrtimeout();
retry--;
}
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
index 434e1d4..2f9543b 100644
--- a/drivers/ntb/test/ntb_perf.c
+++ b/drivers/ntb/test/ntb_perf.c
@@ -308,7 +308,7 @@ static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
if (unlikely((jiffies - last_sleep) > 5 * HZ)) {
last_sleep = jiffies;
set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(1);
+ schedule_min_hrtimeout();
}
if (unlikely(kthread_should_stop()))
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index b2e3828..beae38b 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -4655,7 +4655,7 @@ static int cs5529_wait_for_idle(struct comedi_device *dev)
if ((status & NI67XX_CAL_STATUS_BUSY) == 0)
break;
set_current_state(TASK_INTERRUPTIBLE);
- if (schedule_timeout(1))
+ if (schedule_min_hrtimeout())
return -EIO;
}
if (i == timeout) {
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index 45a8639..855d08e 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -129,7 +129,7 @@ static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vl,
if (vl->upd_busy_cnt > 1) {
/* second+ BUSY - sleep a little bit */
set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1);
+ schedule_min_hrtimeout();
}
continue;
}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index dcd2e79..16bf891 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5952,7 +5952,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
if (flush != BTRFS_RESERVE_NO_FLUSH &&
btrfs_transaction_in_commit(fs_info))
- schedule_timeout(1);
+ schedule_min_hrtimeout();
if (delalloc_lock)
mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 144b119..03d2e8e 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -89,7 +89,7 @@ static int caching_kthread(void *data)
btrfs_release_path(path);
root->ino_cache_progress = last;
up_read(&fs_info->commit_root_sem);
- schedule_timeout(1);
+ schedule_min_hrtimeout();
goto again;
} else
continue;
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
index fab53f5..fda1ab5 100644
--- a/sound/usb/line6/pcm.c
+++ b/sound/usb/line6/pcm.c
@@ -131,7 +131,7 @@ static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm,
if (!alive)
break;
set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1);
+ schedule_min_hrtimeout();
} while (--timeout > 0);
if (alive)
dev_err(line6pcm->line6->ifcdev,
--
2.9.3

View File

@ -0,0 +1,397 @@
From 56e8b01452fbb6c1aa85b0a52fbd352fddf7e959 Mon Sep 17 00:00:00 2001
From: Con Kolivas <kernel@kolivas.org>
Date: Mon, 20 Feb 2017 13:29:16 +1100
Subject: [PATCH 13/25] Change all schedule_timeout with msecs_to_jiffies
potentially under 50ms to use schedule_msec_hrtimeout.
---
drivers/bluetooth/hci_qca.c | 2 +-
drivers/char/snsc.c | 4 ++--
drivers/media/pci/ivtv/ivtv-ioctl.c | 2 +-
drivers/media/pci/ivtv/ivtv-streams.c | 2 +-
drivers/net/can/usb/peak_usb/pcan_usb.c | 2 +-
drivers/net/usb/lan78xx.c | 2 +-
drivers/net/usb/usbnet.c | 2 +-
drivers/scsi/fnic/fnic_scsi.c | 4 ++--
drivers/scsi/snic/snic_scsi.c | 2 +-
drivers/staging/lustre/lnet/lnet/lib-eq.c | 2 +-
drivers/staging/rts5208/rtsx.c | 2 +-
drivers/staging/speakup/speakup_acntpc.c | 4 ++--
drivers/staging/speakup/speakup_apollo.c | 2 +-
drivers/staging/speakup/speakup_decext.c | 2 +-
drivers/staging/speakup/speakup_decpc.c | 2 +-
drivers/staging/speakup/speakup_dectlk.c | 2 +-
drivers/staging/speakup/speakup_dtlk.c | 4 ++--
drivers/staging/speakup/speakup_keypc.c | 4 ++--
drivers/staging/speakup/synth.c | 2 +-
drivers/staging/unisys/visornic/visornic_main.c | 6 +++---
drivers/target/target_core_user.c | 2 +-
drivers/video/fbdev/omap/hwa742.c | 2 +-
drivers/video/fbdev/pxafb.c | 2 +-
23 files changed, 30 insertions(+), 30 deletions(-)
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
index 05c2307..6954d29 100644
--- a/drivers/bluetooth/hci_qca.c
+++ b/drivers/bluetooth/hci_qca.c
@@ -880,7 +880,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
* then host can communicate with new baudrate to controller
*/
set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
+ schedule_msec_hrtimeout((BAUDRATE_SETTLE_TIMEOUT_MS));
set_current_state(TASK_INTERRUPTIBLE);
return 0;
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
index ec07f0e..3410b46 100644
--- a/drivers/char/snsc.c
+++ b/drivers/char/snsc.c
@@ -198,7 +198,7 @@ scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
add_wait_queue(&sd->sd_rq, &wait);
spin_unlock_irqrestore(&sd->sd_rlock, flags);
- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
+ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
remove_wait_queue(&sd->sd_rq, &wait);
if (signal_pending(current)) {
@@ -294,7 +294,7 @@ scdrv_write(struct file *file, const char __user *buf,
add_wait_queue(&sd->sd_wq, &wait);
spin_unlock_irqrestore(&sd->sd_wlock, flags);
- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
+ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
remove_wait_queue(&sd->sd_wq, &wait);
if (signal_pending(current)) {
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
index 2dc4b20..8e061cf 100644
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
@@ -1151,7 +1151,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std)
TASK_UNINTERRUPTIBLE);
if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
break;
- schedule_timeout(msecs_to_jiffies(25));
+ schedule_msec_hrtimeout((25));
}
finish_wait(&itv->vsync_waitq, &wait);
mutex_lock(&itv->serialize_lock);
diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
index d27c6df..e9ffc4e 100644
--- a/drivers/media/pci/ivtv/ivtv-streams.c
+++ b/drivers/media/pci/ivtv/ivtv-streams.c
@@ -834,7 +834,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
time_before(jiffies,
then + msecs_to_jiffies(2000))) {
- schedule_timeout(msecs_to_jiffies(10));
+ schedule_msec_hrtimeout((10));
}
/* To convert jiffies to ms, we must multiply by 1000
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 838545c..34f8972 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -250,7 +250,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
} else {
/* the PCAN-USB needs time to init */
set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
+ schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT));
}
return err;
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 08f8703..3b3bc86 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -2544,7 +2544,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
while (!skb_queue_empty(&dev->rxq) &&
!skb_queue_empty(&dev->txq) &&
!skb_queue_empty(&dev->done)) {
- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
+ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
set_current_state(TASK_UNINTERRUPTIBLE);
netif_dbg(dev, ifdown, dev->net,
"waited for %d urb completions\n", temp);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 3de65ea..f8a4b18 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -767,7 +767,7 @@ static void wait_skb_queue_empty(struct sk_buff_head *q)
spin_lock_irqsave(&q->lock, flags);
while (!skb_queue_empty(q)) {
spin_unlock_irqrestore(&q->lock, flags);
- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
+ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
set_current_state(TASK_UNINTERRUPTIBLE);
spin_lock_irqsave(&q->lock, flags);
}
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index adb3d58..de73e78 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -217,7 +217,7 @@ int fnic_fw_reset_handler(struct fnic *fnic)
/* wait for io cmpl */
while (atomic_read(&fnic->in_flight))
- schedule_timeout(msecs_to_jiffies(1));
+ schedule_msec_hrtimeout((1));
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
@@ -2201,7 +2201,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
}
}
- schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
+ schedule_msec_hrtimeout((2 * fnic->config.ed_tov));
/* walk again to check, if IOs are still pending in fw */
if (fnic_is_abts_pending(fnic, lr_sc))
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
index abada16..0bf30dc 100644
--- a/drivers/scsi/snic/snic_scsi.c
+++ b/drivers/scsi/snic/snic_scsi.c
@@ -2356,7 +2356,7 @@ snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
/* Wait for all the IOs that are entered in Qcmd */
while (atomic_read(&snic->ios_inflight))
- schedule_timeout(msecs_to_jiffies(1));
+ schedule_msec_hrtimeout((1));
ret = snic_issue_hba_reset(snic, sc);
if (ret) {
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
index d05c6cc..3f62b6f 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
@@ -328,7 +328,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock)
schedule();
} else {
now = jiffies;
- schedule_timeout(msecs_to_jiffies(tms));
+ schedule_msec_hrtimeout((tms));
tms -= jiffies_to_msecs(jiffies - now);
if (tms < 0) /* no more wait but may have new event */
tms = 0;
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
index 68d75d0..aef88c4 100644
--- a/drivers/staging/rts5208/rtsx.c
+++ b/drivers/staging/rts5208/rtsx.c
@@ -537,7 +537,7 @@ static int rtsx_polling_thread(void *__dev)
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
+ schedule_msec_hrtimeout((POLLING_INTERVAL));
/* lock the device pointers */
mutex_lock(&dev->dev_mutex);
diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
index efb791b..fd02fb2 100644
--- a/drivers/staging/speakup/speakup_acntpc.c
+++ b/drivers/staging/speakup/speakup_acntpc.c
@@ -204,7 +204,7 @@ static void do_catch_up(struct spk_synth *synth)
full_time_val = full_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (synth_full()) {
- schedule_timeout(msecs_to_jiffies(full_time_val));
+ schedule_msec_hrtimeout((full_time_val));
continue;
}
set_current_state(TASK_RUNNING);
@@ -232,7 +232,7 @@ static void do_catch_up(struct spk_synth *synth)
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- schedule_timeout(msecs_to_jiffies(delay_time_val));
+ schedule_msec_hrtimeout((delay_time_val));
jiff_max = jiffies+jiffy_delta_val;
}
}
diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
index 3cbc8a7..3c17854 100644
--- a/drivers/staging/speakup/speakup_apollo.c
+++ b/drivers/staging/speakup/speakup_apollo.c
@@ -172,7 +172,7 @@ static void do_catch_up(struct spk_synth *synth)
outb(UART_MCR_DTR, speakup_info.port_tts + UART_MCR);
outb(UART_MCR_DTR | UART_MCR_RTS,
speakup_info.port_tts + UART_MCR);
- schedule_timeout(msecs_to_jiffies(full_time_val));
+ schedule_msec_hrtimeout((full_time_val));
continue;
}
if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
index 1a5cf3d..fa2b4e1 100644
--- a/drivers/staging/speakup/speakup_decext.c
+++ b/drivers/staging/speakup/speakup_decext.c
@@ -186,7 +186,7 @@ static void do_catch_up(struct spk_synth *synth)
if (ch == '\n')
ch = 0x0D;
if (synth_full() || !spk_serial_out(ch)) {
- schedule_timeout(msecs_to_jiffies(delay_time_val));
+ schedule_msec_hrtimeout((delay_time_val));
continue;
}
set_current_state(TASK_RUNNING);
diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
index d6479bd..f7554bf 100644
--- a/drivers/staging/speakup/speakup_decpc.c
+++ b/drivers/staging/speakup/speakup_decpc.c
@@ -403,7 +403,7 @@ static void do_catch_up(struct spk_synth *synth)
if (ch == '\n')
ch = 0x0D;
if (dt_sendchar(ch)) {
- schedule_timeout(msecs_to_jiffies(delay_time_val));
+ schedule_msec_hrtimeout((delay_time_val));
continue;
}
set_current_state(TASK_RUNNING);
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
index 7646567..639192e 100644
--- a/drivers/staging/speakup/speakup_dectlk.c
+++ b/drivers/staging/speakup/speakup_dectlk.c
@@ -251,7 +251,7 @@ static void do_catch_up(struct spk_synth *synth)
if (ch == '\n')
ch = 0x0D;
if (synth_full_val || !spk_serial_out(ch)) {
- schedule_timeout(msecs_to_jiffies(delay_time_val));
+ schedule_msec_hrtimeout((delay_time_val));
continue;
}
set_current_state(TASK_RUNNING);
diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
index 38aa401..1640519 100644
--- a/drivers/staging/speakup/speakup_dtlk.c
+++ b/drivers/staging/speakup/speakup_dtlk.c
@@ -217,7 +217,7 @@ static void do_catch_up(struct spk_synth *synth)
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (synth_full()) {
- schedule_timeout(msecs_to_jiffies(delay_time_val));
+ schedule_msec_hrtimeout((delay_time_val));
continue;
}
set_current_state(TASK_RUNNING);
@@ -233,7 +233,7 @@ static void do_catch_up(struct spk_synth *synth)
delay_time_val = delay_time->u.n.value;
jiffy_delta_val = jiffy_delta->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- schedule_timeout(msecs_to_jiffies(delay_time_val));
+ schedule_msec_hrtimeout((delay_time_val));
jiff_max = jiffies + jiffy_delta_val;
}
}
diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
index 5e2170b..30b5df7 100644
--- a/drivers/staging/speakup/speakup_keypc.c
+++ b/drivers/staging/speakup/speakup_keypc.c
@@ -206,7 +206,7 @@ spin_lock_irqsave(&speakup_info.spinlock, flags);
full_time_val = full_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
if (synth_full()) {
- schedule_timeout(msecs_to_jiffies(full_time_val));
+ schedule_msec_hrtimeout((full_time_val));
continue;
}
set_current_state(TASK_RUNNING);
@@ -239,7 +239,7 @@ spin_lock_irqsave(&speakup_info.spinlock, flags);
jiffy_delta_val = jiffy_delta->u.n.value;
delay_time_val = delay_time->u.n.value;
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
- schedule_timeout(msecs_to_jiffies(delay_time_val));
+ schedule_msec_hrtimeout((delay_time_val));
jiff_max = jiffies+jiffy_delta_val;
}
}
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
index a61c02b..14299e5 100644
--- a/drivers/staging/speakup/synth.c
+++ b/drivers/staging/speakup/synth.c
@@ -120,7 +120,7 @@ void spk_do_catch_up(struct spk_synth *synth)
if (ch == '\n')
ch = synth->procspeech;
if (!spk_serial_out(ch)) {
- schedule_timeout(msecs_to_jiffies(full_time_val));
+ schedule_msec_hrtimeout((full_time_val));
continue;
}
if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index c1f674f..4f30a7a 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -468,7 +468,7 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
}
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&devdata->priv_lock, flags);
- wait += schedule_timeout(msecs_to_jiffies(10));
+ wait += schedule_msec_hrtimeout((10));
spin_lock_irqsave(&devdata->priv_lock, flags);
}
@@ -479,7 +479,7 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&devdata->priv_lock, flags);
- schedule_timeout(msecs_to_jiffies(10));
+ schedule_msec_hrtimeout((10));
spin_lock_irqsave(&devdata->priv_lock, flags);
if (atomic_read(&devdata->usage))
break;
@@ -611,7 +611,7 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
}
set_current_state(TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(&devdata->priv_lock, flags);
- wait += schedule_timeout(msecs_to_jiffies(10));
+ wait += schedule_msec_hrtimeout((10));
spin_lock_irqsave(&devdata->priv_lock, flags);
}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 8041710..f907a81 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -451,7 +451,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
pr_debug("sleeping for ring space\n");
spin_unlock_irq(&udev->cmdr_lock);
- ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
+ ret = schedule_msec_hrtimeout((TCMU_TIME_OUT));
finish_wait(&udev->wait_cmdr, &__wait);
if (!ret) {
pr_warn("tcmu: command timed out\n");
diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
index a4ee65b..cf38bcb 100644
--- a/drivers/video/fbdev/omap/hwa742.c
+++ b/drivers/video/fbdev/omap/hwa742.c
@@ -926,7 +926,7 @@ static void hwa742_resume(void)
if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
break;
set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(5));
+ schedule_msec_hrtimeout((5));
}
hwa742_set_update_mode(hwa742.update_mode_before_suspend);
}
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
index ef73f14..7b5483b 100644
--- a/drivers/video/fbdev/pxafb.c
+++ b/drivers/video/fbdev/pxafb.c
@@ -1287,7 +1287,7 @@ static int pxafb_smart_thread(void *arg)
mutex_unlock(&fbi->ctrlr_lock);
set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(30));
+ schedule_msec_hrtimeout((30));
}
pr_debug("%s(): task ending\n", __func__);
--
2.9.3

View File

@ -0,0 +1,325 @@
From af94b59651831b7e176ce8cb98441bdccb87eac0 Mon Sep 17 00:00:00 2001
From: Con Kolivas <kernel@kolivas.org>
Date: Mon, 20 Feb 2017 13:30:07 +1100
Subject: [PATCH 14/25] Replace all calls to schedule_timeout_interruptible of
potentially under 50ms to use schedule_msec_hrtimeout_interruptible.
---
drivers/hwmon/fam15h_power.c | 2 +-
drivers/iio/light/tsl2563.c | 6 +-----
drivers/media/i2c/msp3400-driver.c | 4 ++--
drivers/media/pci/ivtv/ivtv-gpio.c | 6 +++---
drivers/media/radio/radio-mr800.c | 2 +-
drivers/media/radio/radio-tea5777.c | 2 +-
drivers/media/radio/tea575x.c | 2 +-
drivers/misc/panel.c | 2 +-
drivers/parport/ieee1284.c | 2 +-
drivers/parport/ieee1284_ops.c | 2 +-
drivers/platform/x86/intel_ips.c | 8 ++++----
net/core/pktgen.c | 2 +-
sound/soc/codecs/wm8350.c | 12 ++++++------
sound/soc/codecs/wm8900.c | 2 +-
sound/soc/codecs/wm9713.c | 4 ++--
15 files changed, 27 insertions(+), 31 deletions(-)
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index 15aa49d..991e8a7 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -238,7 +238,7 @@ static ssize_t acc_show_power(struct device *dev,
prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu];
}
- leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period));
+ leftover = schedule_msec_hrtimeout_interruptible((data->power_period));
if (leftover)
return 0;
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 04598ae..a8c095d 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -282,11 +282,7 @@ static void tsl2563_wait_adc(struct tsl2563_chip *chip)
default:
delay = 402;
}
- /*
- * TODO: Make sure that we wait at least required delay but why we
- * have to extend it one tick more?
- */
- schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2);
+ schedule_msec_hrtimeout_interruptible(delay + 1);
}
static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc)
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
index 201a9800..5cebabc 100644
--- a/drivers/media/i2c/msp3400-driver.c
+++ b/drivers/media/i2c/msp3400-driver.c
@@ -184,7 +184,7 @@ static int msp_read(struct i2c_client *client, int dev, int addr)
break;
dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
dev, addr);
- schedule_timeout_interruptible(msecs_to_jiffies(10));
+ schedule_msec_hrtimeout_interruptible((10));
}
if (err == 3) {
dev_warn(&client->dev, "resetting chip, sound will go off.\n");
@@ -225,7 +225,7 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val)
break;
dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
dev, addr);
- schedule_timeout_interruptible(msecs_to_jiffies(10));
+ schedule_msec_hrtimeout_interruptible((10));
}
if (err == 3) {
dev_warn(&client->dev, "resetting chip, sound will go off.\n");
diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
index f752f39..23372af6 100644
--- a/drivers/media/pci/ivtv/ivtv-gpio.c
+++ b/drivers/media/pci/ivtv/ivtv-gpio.c
@@ -117,7 +117,7 @@ void ivtv_reset_ir_gpio(struct ivtv *itv)
curout = (curout & ~0xF) | 1;
write_reg(curout, IVTV_REG_GPIO_OUT);
/* We could use something else for smaller time */
- schedule_timeout_interruptible(msecs_to_jiffies(1));
+ schedule_msec_hrtimeout_interruptible((1));
curout |= 2;
write_reg(curout, IVTV_REG_GPIO_OUT);
curdir &= ~0x80;
@@ -137,11 +137,11 @@ int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value)
curout = read_reg(IVTV_REG_GPIO_OUT);
curout &= ~(1 << itv->card->xceive_pin);
write_reg(curout, IVTV_REG_GPIO_OUT);
- schedule_timeout_interruptible(msecs_to_jiffies(1));
+ schedule_msec_hrtimeout_interruptible((1));
curout |= 1 << itv->card->xceive_pin;
write_reg(curout, IVTV_REG_GPIO_OUT);
- schedule_timeout_interruptible(msecs_to_jiffies(1));
+ schedule_msec_hrtimeout_interruptible((1));
return 0;
}
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
index c2927fd..bdee269 100644
--- a/drivers/media/radio/radio-mr800.c
+++ b/drivers/media/radio/radio-mr800.c
@@ -382,7 +382,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *priv,
retval = -ENODATA;
break;
}
- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
+ if (schedule_msec_hrtimeout_interruptible((10))) {
retval = -ERESTARTSYS;
break;
}
diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c
index 83fe7ab..aaae5fa 100644
--- a/drivers/media/radio/radio-tea5777.c
+++ b/drivers/media/radio/radio-tea5777.c
@@ -249,7 +249,7 @@ static int radio_tea5777_update_read_reg(struct radio_tea5777 *tea, int wait)
}
if (wait) {
- if (schedule_timeout_interruptible(msecs_to_jiffies(wait)))
+ if (schedule_msec_hrtimeout_interruptible((wait)))
return -ERESTARTSYS;
}
diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
index 4dc2067..29f4416 100644
--- a/drivers/media/radio/tea575x.c
+++ b/drivers/media/radio/tea575x.c
@@ -416,7 +416,7 @@ int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea,
for (;;) {
if (time_after(jiffies, timeout))
break;
- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
+ if (schedule_msec_hrtimeout_interruptible((10))) {
/* some signal arrived, stop search */
tea->val &= ~TEA575X_BIT_SEARCH;
snd_tea575x_set_freq(tea);
diff --git a/drivers/misc/panel.c b/drivers/misc/panel.c
index 6030ac5..f0c1a101 100644
--- a/drivers/misc/panel.c
+++ b/drivers/misc/panel.c
@@ -760,7 +760,7 @@ static void long_sleep(int ms)
if (in_interrupt())
mdelay(ms);
else
- schedule_timeout_interruptible(msecs_to_jiffies(ms));
+ schedule_msec_hrtimeout_interruptible((ms));
}
/*
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
index f9fd4b3..00ad2f3 100644
--- a/drivers/parport/ieee1284.c
+++ b/drivers/parport/ieee1284.c
@@ -215,7 +215,7 @@ int parport_wait_peripheral(struct parport *port,
/* parport_wait_event didn't time out, but the
* peripheral wasn't actually ready either.
* Wait for another 10ms. */
- schedule_timeout_interruptible(msecs_to_jiffies(10));
+ schedule_msec_hrtimeout_interruptible((10));
}
}
diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
index c0e7d21..e1b4fd4 100644
--- a/drivers/parport/ieee1284_ops.c
+++ b/drivers/parport/ieee1284_ops.c
@@ -536,7 +536,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
/* Yield the port for a while. */
if (count && dev->port->irq != PARPORT_IRQ_NONE) {
parport_release (dev);
- schedule_timeout_interruptible(msecs_to_jiffies(40));
+ schedule_msec_hrtimeout_interruptible((40));
parport_claim_or_block (dev);
}
else
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index 55663b3..0363fed 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -812,7 +812,7 @@ static int ips_adjust(void *data)
ips_gpu_lower(ips);
sleep:
- schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
+ schedule_msec_hrtimeout_interruptible((IPS_ADJUST_PERIOD));
} while (!kthread_should_stop());
dev_dbg(&ips->dev->dev, "ips-adjust thread stopped\n");
@@ -991,7 +991,7 @@ static int ips_monitor(void *data)
seqno_timestamp = get_jiffies_64();
old_cpu_power = thm_readl(THM_CEC);
- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
+ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
/* Collect an initial average */
for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
@@ -1018,7 +1018,7 @@ static int ips_monitor(void *data)
mchp_samples[i] = mchp;
}
- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
+ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
if (kthread_should_stop())
break;
}
@@ -1045,7 +1045,7 @@ static int ips_monitor(void *data)
* us to reduce the sample frequency if the CPU and GPU are idle.
*/
old_cpu_power = thm_readl(THM_CEC);
- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
+ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
last_sample_period = IPS_SAMPLE_PERIOD;
setup_deferrable_timer_on_stack(&timer, monitor_timeout,
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 8e69ce4..0227415 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -1992,7 +1992,7 @@ static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
mutex_unlock(&pktgen_thread_lock);
pr_debug("%s: waiting for %s to disappear....\n",
__func__, ifname);
- schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
+ schedule_msec_hrtimeout_interruptible((msec_per_try));
mutex_lock(&pktgen_thread_lock);
if (++i >= max_tries) {
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
index 2efc5b4..3e3248c 100644
--- a/sound/soc/codecs/wm8350.c
+++ b/sound/soc/codecs/wm8350.c
@@ -236,10 +236,10 @@ static void wm8350_pga_work(struct work_struct *work)
out2->ramp == WM8350_RAMP_UP) {
/* delay is longer over 0dB as increases are larger */
if (i >= WM8350_OUTn_0dB)
- schedule_timeout_interruptible(msecs_to_jiffies
+ schedule_msec_hrtimeout_interruptible(
(2));
else
- schedule_timeout_interruptible(msecs_to_jiffies
+ schedule_msec_hrtimeout_interruptible(
(1));
} else
udelay(50); /* doesn't matter if we delay longer */
@@ -1123,7 +1123,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
(platform->dis_out4 << 6));
/* wait for discharge */
- schedule_timeout_interruptible(msecs_to_jiffies
+ schedule_msec_hrtimeout_interruptible(
(platform->
cap_discharge_msecs));
@@ -1139,7 +1139,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
WM8350_VBUFEN);
/* wait for vmid */
- schedule_timeout_interruptible(msecs_to_jiffies
+ schedule_msec_hrtimeout_interruptible(
(platform->
vmid_charge_msecs));
@@ -1190,7 +1190,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1);
/* wait */
- schedule_timeout_interruptible(msecs_to_jiffies
+ schedule_msec_hrtimeout_interruptible(
(platform->
vmid_discharge_msecs));
@@ -1208,7 +1208,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
pm1 | WM8350_OUTPUT_DRAIN_EN);
/* wait */
- schedule_timeout_interruptible(msecs_to_jiffies
+ schedule_msec_hrtimeout_interruptible(
(platform->drain_msecs));
pm1 &= ~WM8350_BIASEN;
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index c77b49a..fc50456 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -1112,7 +1112,7 @@ static int wm8900_set_bias_level(struct snd_soc_codec *codec,
/* Need to let things settle before stopping the clock
* to ensure that restart works, see "Stopping the
* master clock" in the datasheet. */
- schedule_timeout_interruptible(msecs_to_jiffies(1));
+ schedule_msec_hrtimeout_interruptible((1));
snd_soc_write(codec, WM8900_REG_POWER2,
WM8900_REG_POWER2_SYSCLK_ENA);
break;
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index 7e48221..0c85a20 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -199,7 +199,7 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
/* Gracefully shut down the voice interface. */
snd_soc_update_bits(codec, AC97_HANDSET_RATE, 0x0f00, 0x0200);
- schedule_timeout_interruptible(msecs_to_jiffies(1));
+ schedule_msec_hrtimeout_interruptible((1));
snd_soc_update_bits(codec, AC97_HANDSET_RATE, 0x0f00, 0x0f00);
snd_soc_update_bits(codec, AC97_EXTENDED_MID, 0x1000, 0x1000);
@@ -868,7 +868,7 @@ static int wm9713_set_pll(struct snd_soc_codec *codec,
wm9713->pll_in = freq_in;
/* wait 10ms AC97 link frames for the link to stabilise */
- schedule_timeout_interruptible(msecs_to_jiffies(10));
+ schedule_msec_hrtimeout_interruptible((10));
return 0;
}
--
2.9.3

View File

@ -0,0 +1,160 @@
From 1137ff2bfa5eb63b53747fe303fdb3937c5e1077 Mon Sep 17 00:00:00 2001
From: Con Kolivas <kernel@kolivas.org>
Date: Mon, 20 Feb 2017 13:30:32 +1100
Subject: [PATCH 15/25] Replace all calls to schedule_timeout_uninterruptible
of potentially under 50ms to use schedule_msec_hrtimeout_uninterruptible
---
drivers/media/pci/cx18/cx18-gpio.c | 4 ++--
drivers/net/wireless/intel/ipw2x00/ipw2100.c | 4 ++--
drivers/rtc/rtc-wm8350.c | 6 +++---
drivers/scsi/lpfc/lpfc_scsi.c | 2 +-
sound/pci/maestro3.c | 4 ++--
sound/soc/codecs/rt5631.c | 4 ++--
sound/soc/soc-dapm.c | 2 +-
7 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
index 38dc6b8..3cd3098 100644
--- a/drivers/media/pci/cx18/cx18-gpio.c
+++ b/drivers/media/pci/cx18/cx18-gpio.c
@@ -95,11 +95,11 @@ static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi,
/* Assert */
gpio_update(cx, mask, ~active_lo);
- schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs));
+ schedule_msec_hrtimeout_uninterruptible((assert_msecs));
/* Deassert */
gpio_update(cx, mask, ~active_hi);
- schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs));
+ schedule_msec_hrtimeout_uninterruptible((recovery_msecs));
}
/*
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
index 356aba9..d2cc761 100644
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
@@ -830,7 +830,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
* doesn't seem to have as many firmware restart cycles...
*
* As a test, we're sticking in a 1/100s delay here */
- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
+ schedule_msec_hrtimeout_uninterruptible((10));
return 0;
@@ -1281,7 +1281,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv)
IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
i = 5000;
do {
- schedule_timeout_uninterruptible(msecs_to_jiffies(40));
+ schedule_msec_hrtimeout_uninterruptible((40));
/* Todo... wait for sync command ... */
read_register(priv->net_dev, IPW_REG_INTA, &inta);
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
index fa247de..f1a28d8 100644
--- a/drivers/rtc/rtc-wm8350.c
+++ b/drivers/rtc/rtc-wm8350.c
@@ -121,7 +121,7 @@ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm)
/* Wait until confirmation of stopping */
do {
rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
+ schedule_msec_hrtimeout_uninterruptible((1));
} while (--retries && !(rtc_ctrl & WM8350_RTC_STS));
if (!retries) {
@@ -204,7 +204,7 @@ static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350)
/* Wait until confirmation of stopping */
do {
rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
+ schedule_msec_hrtimeout_uninterruptible((1));
} while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS));
if (!(rtc_ctrl & WM8350_RTC_ALMSTS))
@@ -227,7 +227,7 @@ static int wm8350_rtc_start_alarm(struct wm8350 *wm8350)
/* Wait until confirmation */
do {
rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
+ schedule_msec_hrtimeout_uninterruptible((1));
} while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS);
if (rtc_ctrl & WM8350_RTC_ALMSTS)
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index ad350d9..69a58a8 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -5109,7 +5109,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
tgt_id, lun_id, context);
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
while (time_after(later, jiffies) && cnt) {
- schedule_timeout_uninterruptible(msecs_to_jiffies(20));
+ schedule_msec_hrtimeout_uninterruptible((20));
cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
}
if (cnt) {
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index cafea6d..d374514 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -2016,7 +2016,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
outw(0, io + GPIO_DATA);
outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION);
- schedule_timeout_uninterruptible(msecs_to_jiffies(delay1));
+ schedule_msec_hrtimeout_uninterruptible((delay1));
outw(GPO_PRIMARY_AC97, io + GPIO_DATA);
udelay(5);
@@ -2024,7 +2024,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A);
outw(~0, io + GPIO_MASK);
- schedule_timeout_uninterruptible(msecs_to_jiffies(delay2));
+ schedule_msec_hrtimeout_uninterruptible((delay2));
if (! snd_m3_try_read_vendor(chip))
break;
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
index 0e41808..611cb9f 100644
--- a/sound/soc/codecs/rt5631.c
+++ b/sound/soc/codecs/rt5631.c
@@ -419,7 +419,7 @@ static void onebit_depop_mute_stage(struct snd_soc_codec *codec, int enable)
hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2);
snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
if (enable) {
- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
+ schedule_msec_hrtimeout_uninterruptible((10));
/* config one-bit depop parameter */
rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x307f);
snd_soc_update_bits(codec, RT5631_HP_OUT_VOL,
@@ -529,7 +529,7 @@ static void depop_seq_mute_stage(struct snd_soc_codec *codec, int enable)
hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2);
snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
if (enable) {
- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
+ schedule_msec_hrtimeout_uninterruptible((10));
/* config depop sequence parameter */
rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x302f);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 27dd02e..7ba49f4 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -134,7 +134,7 @@ static void dapm_assert_locked(struct snd_soc_dapm_context *dapm)
static void pop_wait(u32 pop_time)
{
if (pop_time)
- schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time));
+ schedule_msec_hrtimeout_uninterruptible((pop_time));
}
static void pop_dbg(struct device *dev, u32 pop_time, const char *fmt, ...)
--
2.9.3

View File

@ -0,0 +1,84 @@
From 37496baeea800e745a77620e90660496135f7fa5 Mon Sep 17 00:00:00 2001
From: Con Kolivas <kernel@kolivas.org>
Date: Mon, 20 Feb 2017 13:31:42 +1100
Subject: [PATCH 16/25] Fix build for disabled highres timers with hrtimeout
code.
---
include/linux/freezer.h | 1 +
include/linux/sched.h | 22 ++++++++++++++++++++++
kernel/time/timer.c | 2 ++
3 files changed, 25 insertions(+)
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index dd03e83..2fda682 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -296,6 +296,7 @@ static inline void set_freezable(void) {}
#define wait_event_freezekillable_unsafe(wq, condition) \
wait_event_killable(wq, condition)
+#define pm_freezing (false)
#endif /* !CONFIG_FREEZER */
#endif /* FREEZER_H_INCLUDED */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 46544f4..680494d 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -458,10 +458,32 @@ extern signed long schedule_timeout_killable(signed long timeout);
extern signed long schedule_timeout_uninterruptible(signed long timeout);
extern signed long schedule_timeout_idle(signed long timeout);
+#ifdef CONFIG_HIGH_RES_TIMERS
extern signed long schedule_msec_hrtimeout(signed long timeout);
extern signed long schedule_min_hrtimeout(void);
extern signed long schedule_msec_hrtimeout_interruptible(signed long timeout);
extern signed long schedule_msec_hrtimeout_uninterruptible(signed long timeout);
+#else
+static inline signed long schedule_msec_hrtimeout(signed long timeout)
+{
+ return schedule_timeout(msecs_to_jiffies(timeout));
+}
+
+static inline signed long schedule_min_hrtimeout(void)
+{
+ return schedule_timeout(1);
+}
+
+static inline signed long schedule_msec_hrtimeout_interruptible(signed long timeout)
+{
+ return schedule_timeout_interruptible(msecs_to_jiffies(timeout));
+}
+
+static inline signed long schedule_msec_hrtimeout_uninterruptible(signed long timeout)
+{
+ return schedule_timeout_uninterruptible(msecs_to_jiffies(timeout));
+}
+#endif
asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void);
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index bc53598..0e22641 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -1750,6 +1750,7 @@ signed long __sched schedule_timeout(signed long timeout)
expire = timeout + jiffies;
+#ifdef CONFIG_HIGH_RES_TIMERS
if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
/*
* Special case 1 as being a request for the minimum timeout
@@ -1760,6 +1761,7 @@ signed long __sched schedule_timeout(signed long timeout)
return 0;
goto out_timeout;
}
+#endif
setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
__mod_timer(&timer, expire, false);
--
2.9.3

View File

@ -0,0 +1,61 @@
From a0d29f014dbcb29649dff1a9b8df58bad7be3926 Mon Sep 17 00:00:00 2001
From: Con Kolivas <kernel@kolivas.org>
Date: Wed, 7 Dec 2016 21:13:16 +1100
Subject: [PATCH 18/25] Make threaded IRQs optionally the default which can be
disabled.
---
kernel/irq/Kconfig | 14 ++++++++++++++
kernel/irq/manage.c | 10 ++++++++++
2 files changed, 24 insertions(+)
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 3bbfd6a..351bf16 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -95,6 +95,20 @@ config IRQ_DOMAIN_DEBUG
config IRQ_FORCED_THREADING
bool
+config FORCE_IRQ_THREADING
+ bool "Make IRQ threading compulsory"
+ depends on IRQ_FORCED_THREADING
+ default y
+ ---help---
+
+ Make IRQ threading mandatory for any IRQ handlers that support it
+ instead of being optional and requiring the threadirqs kernel
+ parameter. Instead they can be optionally disabled with the
+ nothreadirqs kernel parameter.
+
+ Enable if you are building for a desktop or low latency system,
+ otherwise say N.
+
config SPARSE_IRQ
bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ
---help---
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 6b66959..6b3fb17 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -22,7 +22,17 @@
#include "internals.h"
#ifdef CONFIG_IRQ_FORCED_THREADING
+#ifdef CONFIG_FORCE_IRQ_THREADING
+__read_mostly bool force_irqthreads = true;
+#else
__read_mostly bool force_irqthreads;
+#endif
+static int __init setup_noforced_irqthreads(char *arg)
+{
+ force_irqthreads = false;
+ return 0;
+}
+early_param("nothreadirqs", setup_noforced_irqthreads);
static int __init setup_forced_irqthreads(char *arg)
{
--
2.9.3

View File

@ -0,0 +1,69 @@
From a278cad439033005610ddda23882f2c681c669d1 Mon Sep 17 00:00:00 2001
From: Con Kolivas <kernel@kolivas.org>
Date: Mon, 20 Feb 2017 13:32:58 +1100
Subject: [PATCH 20/25] Don't use hrtimer overlay when pm_freezing since some
drivers still don't correctly use freezable timeouts.
---
kernel/time/hrtimer.c | 2 +-
kernel/time/timer.c | 9 +++++----
2 files changed, 6 insertions(+), 5 deletions(-)
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 26ac1f8..25e1555 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1818,7 +1818,7 @@ signed long __sched schedule_msec_hrtimeout(signed long timeout)
* (yet) better than Hz, as would occur during startup, use regular
* timers.
*/
- if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ)
+ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing)
return schedule_timeout(jiffs);
delta = (timeout % 1000) * NSEC_PER_MSEC;
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
index 0e22641..45a6e1f 100644
--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -42,6 +42,7 @@
#include <linux/sched/sysctl.h>
#include <linux/slab.h>
#include <linux/compat.h>
+#include <linux/freezer.h>
#include <linux/uaccess.h>
#include <asm/unistd.h>
@@ -1901,12 +1902,12 @@ void msleep(unsigned int msecs)
* Use high resolution timers where the resolution of tick based
* timers is inadequate.
*/
- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
+ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
while (msecs)
msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
return;
}
- timeout = msecs_to_jiffies(msecs) + 1;
+ timeout = jiffs + 1;
while (timeout)
timeout = schedule_timeout_uninterruptible(timeout);
@@ -1923,12 +1924,12 @@ unsigned long msleep_interruptible(unsigned int msecs)
int jiffs = msecs_to_jiffies(msecs);
unsigned long timeout;
- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
+ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
while (msecs && !signal_pending(current))
msecs = schedule_msec_hrtimeout_interruptible(msecs);
return msecs;
}
- timeout = msecs_to_jiffies(msecs) + 1;
+ timeout = jiffs + 1;
while (timeout && !signal_pending(current))
timeout = schedule_timeout_interruptible(timeout);
--
2.9.3

View File

@ -0,0 +1,34 @@
From da915e0f3abeb61f6a132bb77b7d0a9bf0573233 Mon Sep 17 00:00:00 2001
From: Con Kolivas <kernel@kolivas.org>
Date: Mon, 20 Feb 2017 13:38:23 +1100
Subject: [PATCH 21/25] Make writeback throttling default enabled.
---
block/Kconfig | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/block/Kconfig b/block/Kconfig
index 8bf114a..83e6f9d 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -123,7 +123,7 @@ config BLK_CMDLINE_PARSER
config BLK_WBT
bool "Enable support for block device writeback throttling"
- default n
+ default y
---help---
Enabling this option enables the block layer to throttle buffered
background writeback from the VM, making it more smooth and having
@@ -133,7 +133,7 @@ config BLK_WBT
config BLK_WBT_SQ
bool "Single queue writeback throttling"
- default n
+ default y
depends on BLK_WBT
---help---
Enable writeback throttling by default on legacy single queue devices
--
2.9.3

View File

@ -0,0 +1,43 @@
From 2f96168f72bbd431c0e6d28b44393e98b49ca787 Mon Sep 17 00:00:00 2001
From: Con Kolivas <kernel@kolivas.org>
Date: Mon, 20 Feb 2017 13:48:54 +1100
Subject: [PATCH 22/25] Swap sucks.
---
include/linux/swap.h | 6 +-----
mm/vmscan.c | 2 +-
2 files changed, 2 insertions(+), 6 deletions(-)
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 7f47b70..1c2ed28 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -361,11 +361,7 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t,
extern atomic_long_t nr_swap_pages;
extern long total_swap_pages;
-/* Swap 50% full? Release swapcache more aggressively.. */
-static inline bool vm_swap_full(void)
-{
- return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
-}
+#define vm_swap_full() 1
static inline long get_nr_swap_pages(void)
{
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 532a2a7..15e4260 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -141,7 +141,7 @@ struct scan_control {
/*
* From 0 .. 100. Higher means more swappy.
*/
-int vm_swappiness = 60;
+int vm_swappiness = 33;
/*
* The total number of pages which are beyond the high watermark within all
* zones.
--
2.9.3

View File

@ -1,33 +1,51 @@
WARNING - this version of the patch works with version 4.9+ of gcc and with WARNING
kernel version 3.15.x+ and should NOT be applied when compiling on older This patch works with gcc versions 4.9+ and with kernel version 3.15+ and should
versions due to name changes of the flags with the 4.9 release of gcc. NOT be applied when compiling on older versions of gcc due to key name changes
of the march flags introduced with the version 4.9 release of gcc.[1]
Use the older version of this patch hosted on the same github for older Use the older version of this patch hosted on the same github for older
versions of gcc. For example: versions of gcc.
corei7 --> nehalem FEATURES
corei7-avx --> sandybridge This patch adds additional CPU options to the Linux kernel accessible under:
core-avx-i --> ivybridge Processor type and features --->
core-avx2 --> haswell Processor family --->
For more, see: https://gcc.gnu.org/gcc-4.9/changes.html The expanded microarchitectures include:
* AMD Improved K8-family
* AMD K10-family
* AMD Family 10h (Barcelona)
* AMD Family 14h (Bobcat)
* AMD Family 16h (Jaguar)
* AMD Family 15h (Bulldozer)
* AMD Family 15h (Piledriver)
* AMD Family 15h (Steamroller)
* AMD Family 15h (Excavator)
* AMD Family 17h (Zen)
* Intel Silvermont low-power processors
* Intel 1st Gen Core i3/i5/i7 (Nehalem)
* Intel 1.5 Gen Core i3/i5/i7 (Westmere)
* Intel 2nd Gen Core i3/i5/i7 (Sandybridge)
* Intel 3rd Gen Core i3/i5/i7 (Ivybridge)
* Intel 4th Gen Core i3/i5/i7 (Haswell)
* Intel 5th Gen Core i3/i5/i7 (Broadwell)
* Intel 6th Gen Core i3/i5.i7 (Skylake)
It also changes 'atom' to 'bonnell' in accordance with the gcc v4.9 changes. It also offers to compile passing the 'native' option which, "selects the CPU
Note that upstream is using the deprecated 'match=atom' flags when I believe it to generate code for at compilation time by determining the processor type of
should use the newer 'march=bonnell' flag for atom processors. the compiling machine. Using -march=native enables all instruction subsets
supported by the local machine and will produce code optimized for the local
machine under the constraints of the selected instruction set."[3]
I have made that change to this patch set as well. See the following kernel MINOR NOTES
bug report to see if I'm right: https://bugzilla.kernel.org/show_bug.cgi?id=77461 This patch also changes 'atom' to 'bonnell' in accordance with the gcc v4.9
changes. Note that upstream is using the deprecated 'match=atom' flags when I
believe it should use the newer 'march=bonnell' flag for atom processors.[2]
This patch will expand the number of microarchitectures to include newer It is not recommended to compile on Atom-CPUs with the 'native' option.[4] The
processors including: AMD K10-family, AMD Family 10h (Barcelona), AMD Family recommendation is use to the 'atom' option instead.
14h (Bobcat), AMD Family 15h (Bulldozer), AMD Family 15h (Piledriver), AMD
Family 15h (Steamroller), Family 16h (Jaguar), Intel 1st Gen Core i3/i5/i7
(Nehalem), Intel 1.5 Gen Core i3/i5/i7 (Westmere), Intel 2nd Gen Core i3/i5/i7
(Sandybridge), Intel 3rd Gen Core i3/i5/i7 (Ivybridge), Intel 4th Gen Core
i3/i5/i7 (Haswell), Intel 5th Gen Core i3/i5/i7 (Broadwell), and the low power
Silvermont series of Atom processors (Silvermont). It also offers the compiler
the 'native' flag.
BENEFITS
Small but real speed increases are measurable using a make endpoint comparing Small but real speed increases are measurable using a make endpoint comparing
a generic kernel to one built with one of the respective microarchs. a generic kernel to one built with one of the respective microarchs.
@ -38,8 +56,18 @@ REQUIREMENTS
linux version >=3.15 linux version >=3.15
gcc version >=4.9 gcc version >=4.9
--- a/arch/x86/include/asm/module.h 2015-08-30 14:34:09.000000000 -0400 ACKNOWLEDGMENTS
+++ b/arch/x86/include/asm/module.h 2015-11-06 14:18:24.234941036 -0500 This patch builds on the seminal work by Jeroen.[5]
REFERENCES
1. https://gcc.gnu.org/gcc-4.9/changes.html
2. https://bugzilla.kernel.org/show_bug.cgi?id=77461
3. https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html
4. https://github.com/graysky2/kernel_gcc_patch/issues/15
5. http://www.linuxforge.net/docs/linux/linux-gcc.php
--- a/arch/x86/include/asm/module.h 2016-12-11 14:17:54.000000000 -0500
+++ b/arch/x86/include/asm/module.h 2017-01-06 20:44:36.602227264 -0500
@@ -15,6 +15,24 @@ @@ -15,6 +15,24 @@
#define MODULE_PROC_FAMILY "586MMX " #define MODULE_PROC_FAMILY "586MMX "
#elif defined CONFIG_MCORE2 #elif defined CONFIG_MCORE2
@ -65,7 +93,7 @@ gcc version >=4.9
#elif defined CONFIG_MATOM #elif defined CONFIG_MATOM
#define MODULE_PROC_FAMILY "ATOM " #define MODULE_PROC_FAMILY "ATOM "
#elif defined CONFIG_M686 #elif defined CONFIG_M686
@@ -33,6 +51,22 @@ @@ -33,6 +51,26 @@
#define MODULE_PROC_FAMILY "K7 " #define MODULE_PROC_FAMILY "K7 "
#elif defined CONFIG_MK8 #elif defined CONFIG_MK8
#define MODULE_PROC_FAMILY "K8 " #define MODULE_PROC_FAMILY "K8 "
@ -80,17 +108,29 @@ gcc version >=4.9
+#elif defined CONFIG_MBULLDOZER +#elif defined CONFIG_MBULLDOZER
+#define MODULE_PROC_FAMILY "BULLDOZER " +#define MODULE_PROC_FAMILY "BULLDOZER "
+#elif defined CONFIG_MPILEDRIVER +#elif defined CONFIG_MPILEDRIVER
+#define MODULE_PROC_FAMILY "STEAMROLLER "
+#elif defined CONFIG_MSTEAMROLLER
+#define MODULE_PROC_FAMILY "PILEDRIVER " +#define MODULE_PROC_FAMILY "PILEDRIVER "
+#elif defined CONFIG_MSTEAMROLLER
+#define MODULE_PROC_FAMILY "STEAMROLLER "
+#elif defined CONFIG_MJAGUAR +#elif defined CONFIG_MJAGUAR
+#define MODULE_PROC_FAMILY "JAGUAR " +#define MODULE_PROC_FAMILY "JAGUAR "
+#elif defined CONFIG_MEXCAVATOR
+#define MODULE_PROC_FAMILY "EXCAVATOR "
+#elif defined CONFIG_MZEN
+#define MODULE_PROC_FAMILY "ZEN "
#elif defined CONFIG_MELAN #elif defined CONFIG_MELAN
#define MODULE_PROC_FAMILY "ELAN " #define MODULE_PROC_FAMILY "ELAN "
#elif defined CONFIG_MCRUSOE #elif defined CONFIG_MCRUSOE
--- a/arch/x86/Kconfig.cpu 2015-08-30 14:34:09.000000000 -0400 --- a/arch/x86/Kconfig.cpu 2016-12-11 14:17:54.000000000 -0500
+++ b/arch/x86/Kconfig.cpu 2015-11-06 14:20:14.948369244 -0500 +++ b/arch/x86/Kconfig.cpu 2017-01-06 20:46:14.004109597 -0500
@@ -137,9 +137,8 @@ config MPENTIUM4 @@ -115,6 +115,7 @@ config MPENTIUMM
config MPENTIUM4
bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
depends on X86_32
+ select X86_P6_NOP
---help---
Select this for Intel Pentium 4 chips. This includes the
Pentium 4, Pentium D, P4-based Celeron and Xeon, and
@@ -147,9 +148,8 @@ config MPENTIUM4
-Paxville -Paxville
-Dempsey -Dempsey
@ -101,7 +141,7 @@ gcc version >=4.9
depends on X86_32 depends on X86_32
---help--- ---help---
Select this for an AMD K6-family processor. Enables use of Select this for an AMD K6-family processor. Enables use of
@@ -147,7 +146,7 @@ config MK6 @@ -157,7 +157,7 @@ config MK6
flags to GCC. flags to GCC.
config MK7 config MK7
@ -110,7 +150,7 @@ gcc version >=4.9
depends on X86_32 depends on X86_32
---help--- ---help---
Select this for an AMD Athlon K7-family processor. Enables use of Select this for an AMD Athlon K7-family processor. Enables use of
@@ -155,12 +154,69 @@ config MK7 @@ -165,12 +165,83 @@ config MK7
flags to GCC. flags to GCC.
config MK8 config MK8
@ -139,54 +179,77 @@ gcc version >=4.9
+config MBARCELONA +config MBARCELONA
+ bool "AMD Barcelona" + bool "AMD Barcelona"
+ ---help--- + ---help---
+ Select this for AMD Barcelona and newer processors. + Select this for AMD Family 10h Barcelona processors.
+ +
+ Enables -march=barcelona + Enables -march=barcelona
+ +
+config MBOBCAT +config MBOBCAT
+ bool "AMD Bobcat" + bool "AMD Bobcat"
+ ---help--- + ---help---
+ Select this for AMD Bobcat processors. + Select this for AMD Family 14h Bobcat processors.
+ +
+ Enables -march=btver1 + Enables -march=btver1
+ +
+config MJAGUAR
+ bool "AMD Jaguar"
+ ---help---
+ Select this for AMD Family 16h Jaguar processors.
+
+ Enables -march=btver2
+
+config MBULLDOZER +config MBULLDOZER
+ bool "AMD Bulldozer" + bool "AMD Bulldozer"
+ ---help--- + ---help---
+ Select this for AMD Bulldozer processors. + Select this for AMD Family 15h Bulldozer processors.
+ +
+ Enables -march=bdver1 + Enables -march=bdver1
+ +
+config MPILEDRIVER +config MPILEDRIVER
+ bool "AMD Piledriver" + bool "AMD Piledriver"
+ ---help--- + ---help---
+ Select this for AMD Piledriver processors. + Select this for AMD Family 15h Piledriver processors.
+ +
+ Enables -march=bdver2 + Enables -march=bdver2
+ +
+config MSTEAMROLLER +config MSTEAMROLLER
+ bool "AMD Steamroller" + bool "AMD Steamroller"
+ ---help--- + ---help---
+ Select this for AMD Steamroller processors. + Select this for AMD Family 15h Steamroller processors.
+ +
+ Enables -march=bdver3 + Enables -march=bdver3
+ +
+config MJAGUAR +config MEXCAVATOR
+ bool "AMD Jaguar" + bool "AMD Excavator"
+ ---help--- + ---help---
+ Select this for AMD Jaguar processors. + Select this for AMD Family 15h Excavator processors.
+ +
+ Enables -march=btver2 + Enables -march=bdver4
+
+config MZEN
+ bool "AMD Zen"
+ ---help---
+ Select this for AMD Family 17h Zen processors.
+
+ Enables -march=znver1
+ +
config MCRUSOE config MCRUSOE
bool "Crusoe" bool "Crusoe"
depends on X86_32 depends on X86_32
@@ -251,8 +307,17 @@ config MPSC @@ -252,6 +323,7 @@ config MVIAC7
config MPSC
bool "Intel P4 / older Netburst based Xeon"
+ select X86_P6_NOP
depends on X86_64
---help---
Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
@@ -261,8 +333,19 @@ config MPSC
using the cpu family field using the cpu family field
in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one. in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
+config MATOM +config MATOM
+ bool "Intel Atom" + bool "Intel Atom"
+ select X86_P6_NOP
+ ---help--- + ---help---
+ +
+ Select this for the Intel Atom platform. Intel Atom CPUs have an + Select this for the Intel Atom platform. Intel Atom CPUs have an
@ -197,10 +260,11 @@ gcc version >=4.9
config MCORE2 config MCORE2
- bool "Core 2/newer Xeon" - bool "Core 2/newer Xeon"
+ bool "Intel Core 2" + bool "Intel Core 2"
+ select X86_P6_NOP
---help--- ---help---
Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
@@ -260,14 +325,71 @@ config MCORE2 @@ -270,14 +353,79 @@ config MCORE2
family in /proc/cpuinfo. Newer ones have 6 and older ones 15 family in /proc/cpuinfo. Newer ones have 6 and older ones 15
(not a typo) (not a typo)
@ -210,6 +274,7 @@ gcc version >=4.9
+ +
+config MNEHALEM +config MNEHALEM
+ bool "Intel Nehalem" + bool "Intel Nehalem"
+ select X86_P6_NOP
---help--- ---help---
- Select this for the Intel Atom platform. Intel Atom CPUs have an - Select this for the Intel Atom platform. Intel Atom CPUs have an
@ -222,6 +287,7 @@ gcc version >=4.9
+ +
+config MWESTMERE +config MWESTMERE
+ bool "Intel Westmere" + bool "Intel Westmere"
+ select X86_P6_NOP
+ ---help--- + ---help---
+ +
+ Select this for the Intel Westmere formerly Nehalem-C family. + Select this for the Intel Westmere formerly Nehalem-C family.
@ -230,6 +296,7 @@ gcc version >=4.9
+ +
+config MSILVERMONT +config MSILVERMONT
+ bool "Intel Silvermont" + bool "Intel Silvermont"
+ select X86_P6_NOP
+ ---help--- + ---help---
+ +
+ Select this for the Intel Silvermont platform. + Select this for the Intel Silvermont platform.
@ -238,6 +305,7 @@ gcc version >=4.9
+ +
+config MSANDYBRIDGE +config MSANDYBRIDGE
+ bool "Intel Sandy Bridge" + bool "Intel Sandy Bridge"
+ select X86_P6_NOP
+ ---help--- + ---help---
+ +
+ Select this for 2nd Gen Core processors in the Sandy Bridge family. + Select this for 2nd Gen Core processors in the Sandy Bridge family.
@ -246,6 +314,7 @@ gcc version >=4.9
+ +
+config MIVYBRIDGE +config MIVYBRIDGE
+ bool "Intel Ivy Bridge" + bool "Intel Ivy Bridge"
+ select X86_P6_NOP
+ ---help--- + ---help---
+ +
+ Select this for 3rd Gen Core processors in the Ivy Bridge family. + Select this for 3rd Gen Core processors in the Ivy Bridge family.
@ -254,6 +323,7 @@ gcc version >=4.9
+ +
+config MHASWELL +config MHASWELL
+ bool "Intel Haswell" + bool "Intel Haswell"
+ select X86_P6_NOP
+ ---help--- + ---help---
+ +
+ Select this for 4th Gen Core processors in the Haswell family. + Select this for 4th Gen Core processors in the Haswell family.
@ -262,6 +332,7 @@ gcc version >=4.9
+ +
+config MBROADWELL +config MBROADWELL
+ bool "Intel Broadwell" + bool "Intel Broadwell"
+ select X86_P6_NOP
+ ---help--- + ---help---
+ +
+ Select this for 5th Gen Core processors in the Broadwell family. + Select this for 5th Gen Core processors in the Broadwell family.
@ -270,6 +341,7 @@ gcc version >=4.9
+ +
+config MSKYLAKE +config MSKYLAKE
+ bool "Intel Skylake" + bool "Intel Skylake"
+ select X86_P6_NOP
+ ---help--- + ---help---
+ +
+ Select this for 6th Gen Core processors in the Skylake family. + Select this for 6th Gen Core processors in the Skylake family.
@ -278,7 +350,7 @@ gcc version >=4.9
config GENERIC_CPU config GENERIC_CPU
bool "Generic-x86-64" bool "Generic-x86-64"
@@ -276,6 +398,19 @@ config GENERIC_CPU @@ -286,6 +434,19 @@ config GENERIC_CPU
Generic x86-64 CPU. Generic x86-64 CPU.
Run equally well on all x86-64 CPUs. Run equally well on all x86-64 CPUs.
@ -298,16 +370,16 @@ gcc version >=4.9
endchoice endchoice
config X86_GENERIC config X86_GENERIC
@@ -300,7 +435,7 @@ config X86_INTERNODE_CACHE_SHIFT @@ -310,7 +471,7 @@ config X86_INTERNODE_CACHE_SHIFT
config X86_L1_CACHE_SHIFT config X86_L1_CACHE_SHIFT
int int
default "7" if MPENTIUM4 || MPSC default "7" if MPENTIUM4 || MPSC
- default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU - default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
+ default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU + default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
default "4" if MELAN || M486 || MGEODEGX1 default "4" if MELAN || M486 || MGEODEGX1
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
@@ -331,11 +466,11 @@ config X86_ALIGNMENT_16 @@ -341,45 +502,46 @@ config X86_ALIGNMENT_16
config X86_INTEL_USERCOPY config X86_INTEL_USERCOPY
def_bool y def_bool y
@ -321,7 +393,38 @@ gcc version >=4.9
config X86_USE_3DNOW config X86_USE_3DNOW
def_bool y def_bool y
@@ -359,17 +494,17 @@ config X86_P6_NOP depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
-#
-# P6_NOPs are a relatively minor optimization that require a family >=
-# 6 processor, except that it is broken on certain VIA chips.
-# Furthermore, AMD chips prefer a totally different sequence of NOPs
-# (which work on all CPUs). In addition, it looks like Virtual PC
-# does not understand them.
-#
-# As a result, disallow these if we're not compiling for X86_64 (these
-# NOPs do work on all x86-64 capable chips); the list of processors in
-# the right-hand clause are the cores that benefit from this optimization.
-#
config X86_P6_NOP
- def_bool y
- depends on X86_64
- depends on (MCORE2 || MPENTIUM4 || MPSC)
+ default n
+ bool "Support for P6_NOPs on Intel chips"
+ depends on (MCORE2 || MPENTIUM4 || MPSC || MATOM || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE)
+ ---help---
+ P6_NOPs are a relatively minor optimization that require a family >=
+ 6 processor, except that it is broken on certain VIA chips.
+ Furthermore, AMD chips prefer a totally different sequence of NOPs
+ (which work on all CPUs). In addition, it looks like Virtual PC
+ does not understand them.
+
+ As a result, disallow these if we're not compiling for X86_64 (these
+ NOPs do work on all x86-64 capable chips); the list of processors in
+ the right-hand clause are the cores that benefit from this optimization.
+
+ Say Y if you have Intel CPU newer than Pentium Pro, N otherwise.
config X86_TSC config X86_TSC
def_bool y def_bool y
@ -338,13 +441,13 @@ gcc version >=4.9
config X86_CMOV config X86_CMOV
def_bool y def_bool y
- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) - depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
+ depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX) + depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
config X86_MINIMUM_CPU_FAMILY config X86_MINIMUM_CPU_FAMILY
int int
--- a/arch/x86/Makefile 2015-08-30 14:34:09.000000000 -0400 --- a/arch/x86/Makefile 2016-12-11 14:17:54.000000000 -0500
+++ b/arch/x86/Makefile 2015-11-06 14:21:05.708983344 -0500 +++ b/arch/x86/Makefile 2017-01-06 20:44:36.603227283 -0500
@@ -94,13 +94,38 @@ else @@ -104,13 +104,40 @@ else
KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup) KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
# FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu) # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
@ -354,10 +457,12 @@ gcc version >=4.9
+ cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10) + cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
+ cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona) + cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
+ cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1) + cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
+ cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
+ cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1) + cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
+ cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2) + cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
+ cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3) + cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3)
+ cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2) + cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
+ cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona) cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
cflags-$(CONFIG_MCORE2) += \ cflags-$(CONFIG_MCORE2) += \
@ -386,9 +491,9 @@ gcc version >=4.9
cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic) cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
KBUILD_CFLAGS += $(cflags-y) KBUILD_CFLAGS += $(cflags-y)
--- a/arch/x86/Makefile_32.cpu 2015-08-30 14:34:09.000000000 -0400 --- a/arch/x86/Makefile_32.cpu 2016-12-11 14:17:54.000000000 -0500
+++ b/arch/x86/Makefile_32.cpu 2015-11-06 14:21:43.604429077 -0500 +++ b/arch/x86/Makefile_32.cpu 2017-01-06 20:44:36.603227283 -0500
@@ -23,7 +23,16 @@ cflags-$(CONFIG_MK6) += -march=k6 @@ -23,7 +23,18 @@ cflags-$(CONFIG_MK6) += -march=k6
# Please note, that patches that add -march=athlon-xp and friends are pointless. # Please note, that patches that add -march=athlon-xp and friends are pointless.
# They make zero difference whatsosever to performance at this time. # They make zero difference whatsosever to performance at this time.
cflags-$(CONFIG_MK7) += -march=athlon cflags-$(CONFIG_MK7) += -march=athlon
@ -398,14 +503,16 @@ gcc version >=4.9
+cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10,-march=athlon) +cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10,-march=athlon)
+cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona,-march=athlon) +cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona,-march=athlon)
+cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1,-march=athlon) +cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1,-march=athlon)
+cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2,-march=athlon)
+cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1,-march=athlon) +cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1,-march=athlon)
+cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2,-march=athlon) +cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2,-march=athlon)
+cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3,-march=athlon) +cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3,-march=athlon)
+cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2,-march=athlon) +cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4,-march=athlon)
+cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1,-march=athlon)
cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0 cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0 cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586) cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
@@ -32,8 +41,16 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc- @@ -32,8 +43,16 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc-
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686) cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
cflags-$(CONFIG_MVIAC7) += -march=i686 cflags-$(CONFIG_MVIAC7) += -march=i686
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2) cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,148 @@
--- linux-4.8/drivers/cpufreq/intel_pstate.c.orig 2016-10-02 19:24:33.000000000 -0400
+++ linux-4.8/drivers/cpufreq/intel_pstate.c 2016-10-09 19:32:01.073141319 -0400
@@ -181,6 +181,8 @@
* @cpu: CPU number for this instance data
* @update_util: CPUFreq utility callback information
* @update_util_set: CPUFreq utility callback is set
+ * @iowait_boost: iowait-related boost fraction
+ * @last_update: Time of the last update.
* @pstate: Stores P state limits for this CPU
* @vid: Stores VID limits for this CPU
* @pid: Stores PID parameters for this CPU
@@ -206,6 +208,7 @@
struct vid_data vid;
struct _pid pid;
+ u64 last_update;
u64 last_sample_time;
u64 prev_aperf;
u64 prev_mperf;
@@ -216,6 +219,7 @@
struct acpi_processor_performance acpi_perf_data;
bool valid_pss_table;
#endif
+ unsigned int iowait_boost;
};
static struct cpudata **all_cpu_data;
@@ -229,6 +233,7 @@
* @p_gain_pct: PID proportional gain
* @i_gain_pct: PID integral gain
* @d_gain_pct: PID derivative gain
+ * @boost_iowait: Whether or not to use iowait boosting.
*
* Stores per CPU model static PID configuration data.
*/
@@ -240,6 +245,7 @@
int p_gain_pct;
int d_gain_pct;
int i_gain_pct;
+ bool boost_iowait;
};
/**
@@ -1029,7 +1035,7 @@
},
};
-static struct cpu_defaults silvermont_params = {
+static const struct cpu_defaults silvermont_params = {
.pid_policy = {
.sample_rate_ms = 10,
.deadband = 0,
@@ -1037,6 +1043,7 @@
.p_gain_pct = 14,
.d_gain_pct = 0,
.i_gain_pct = 4,
+ .boost_iowait = true,
},
.funcs = {
.get_max = atom_get_max_pstate,
@@ -1050,7 +1057,7 @@
},
};
-static struct cpu_defaults airmont_params = {
+static const struct cpu_defaults airmont_params = {
.pid_policy = {
.sample_rate_ms = 10,
.deadband = 0,
@@ -1058,6 +1065,7 @@
.p_gain_pct = 14,
.d_gain_pct = 0,
.i_gain_pct = 4,
+ .boost_iowait = true,
},
.funcs = {
.get_max = atom_get_max_pstate,
@@ -1071,7 +1079,7 @@
},
};
-static struct cpu_defaults knl_params = {
+static const struct cpu_defaults knl_params = {
.pid_policy = {
.sample_rate_ms = 10,
.deadband = 0,
@@ -1091,7 +1099,7 @@
},
};
-static struct cpu_defaults bxt_params = {
+static const struct cpu_defaults bxt_params = {
.pid_policy = {
.sample_rate_ms = 10,
.deadband = 0,
@@ -1099,6 +1107,7 @@
.p_gain_pct = 14,
.d_gain_pct = 0,
.i_gain_pct = 4,
+ .boost_iowait = true,
},
.funcs = {
.get_max = core_get_max_pstate,
@@ -1222,36 +1231,18 @@
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
{
struct sample *sample = &cpu->sample;
- u64 cummulative_iowait, delta_iowait_us;
- u64 delta_iowait_mperf;
- u64 mperf, now;
- int32_t cpu_load;
+ int32_t busy_frac, boost;
- cummulative_iowait = get_cpu_iowait_time_us(cpu->cpu, &now);
+ busy_frac = div_fp(sample->mperf, sample->tsc);
- /*
- * Convert iowait time into number of IO cycles spent at max_freq.
- * IO is considered as busy only for the cpu_load algorithm. For
- * performance this is not needed since we always try to reach the
- * maximum P-State, so we are already boosting the IOs.
- */
- delta_iowait_us = cummulative_iowait - cpu->prev_cummulative_iowait;
- delta_iowait_mperf = div64_u64(delta_iowait_us * cpu->pstate.scaling *
- cpu->pstate.max_pstate, MSEC_PER_SEC);
-
- mperf = cpu->sample.mperf + delta_iowait_mperf;
- cpu->prev_cummulative_iowait = cummulative_iowait;
+ boost = cpu->iowait_boost;
+ cpu->iowait_boost >>= 1;
- /*
- * The load can be estimated as the ratio of the mperf counter
- * running at a constant frequency during active periods
- * (C0) and the time stamp counter running at the same frequency
- * also during C-states.
- */
- cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc);
- cpu->sample.busy_scaled = cpu_load;
+ if (busy_frac < boost)
+ busy_frac = boost;
- return get_avg_pstate(cpu) - pid_calc(&cpu->pid, cpu_load);
+ sample->busy_scaled = busy_frac * 100;
+ return get_avg_pstate(cpu) - pid_calc(&cpu->pid, sample->busy_scaled);
}
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)

View File

@ -0,0 +1,94 @@
From 19be0eaffa3ac7d8eb6784ad9bdbc7d67ed8e619 Mon Sep 17 00:00:00 2001
From: Linus Torvalds <torvalds@linux-foundation.org>
Date: Thu, 13 Oct 2016 13:07:36 -0700
Subject: mm: remove gup_flags FOLL_WRITE games from __get_user_pages()
This is an ancient bug that was actually attempted to be fixed once
(badly) by me eleven years ago in commit 4ceb5db9757a ("Fix
get_user_pages() race for write access") but that was then undone due to
problems on s390 by commit f33ea7f404e5 ("fix get_user_pages bug").
In the meantime, the s390 situation has long been fixed, and we can now
fix it by checking the pte_dirty() bit properly (and do it better). The
s390 dirty bit was implemented in abf09bed3cce ("s390/mm: implement
software dirty bits") which made it into v3.9. Earlier kernels will
have to look at the page state itself.
Also, the VM has become more scalable, and what used a purely
theoretical race back then has become easier to trigger.
To fix it, we introduce a new internal FOLL_COW flag to mark the "yes,
we already did a COW" rather than play racy games with FOLL_WRITE that
is very fundamental, and then use the pte dirty flag to validate that
the FOLL_COW flag is still valid.
Reported-and-tested-by: Phil "not Paul" Oester <kernel@linuxace.com>
Acked-by: Hugh Dickins <hughd@google.com>
Reviewed-by: Michal Hocko <mhocko@suse.com>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Kees Cook <keescook@chromium.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Willy Tarreau <w@1wt.eu>
Cc: Nick Piggin <npiggin@gmail.com>
Cc: Greg Thelen <gthelen@google.com>
Cc: stable@vger.kernel.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
---
include/linux/mm.h | 1 +
mm/gup.c | 14 ++++++++++++--
2 files changed, 13 insertions(+), 2 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e9caec6..ed85879 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2232,6 +2232,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
#define FOLL_MLOCK 0x1000 /* lock present pages */
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
+#define FOLL_COW 0x4000 /* internal GUP flag */
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
void *data);
diff --git a/mm/gup.c b/mm/gup.c
index 96b2b2f..22cc22e 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -60,6 +60,16 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
return -EEXIST;
}
+/*
+ * FOLL_FORCE can write to even unwritable pte's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+{
+ return pte_write(pte) ||
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+}
+
static struct page *follow_page_pte(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, unsigned int flags)
{
@@ -95,7 +105,7 @@ retry:
}
if ((flags & FOLL_NUMA) && pte_protnone(pte))
goto no_page;
- if ((flags & FOLL_WRITE) && !pte_write(pte)) {
+ if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
pte_unmap_unlock(ptep, ptl);
return NULL;
}
@@ -412,7 +422,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
* reCOWed by userspace write).
*/
if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
- *flags &= ~FOLL_WRITE;
+ *flags |= FOLL_COW;
return 0;
}
--
cgit v0.12

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,708 @@
diff --git a/init/Kconfig b/init/Kconfig
index 0dfd09d..8d704e5 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -36,6 +36,15 @@ config BROKEN_ON_SMP
depends on BROKEN || !SMP
default y
+config BLD
+ bool "An alternate CPU load distribution technique for task scheduler"
+ depends on SMP
+ default y
+ help
+ This is an alternate CPU load distribution technique based for task
+ scheduler based on The Barbershop Load Distribution algorithm. Not
+ suitable for NUMA, should work well on SMP.
+
config INIT_ENV_ARG_LIMIT
int
default 32 if !UML
diff --git a/kernel/sched/bld.h b/kernel/sched/bld.h
new file mode 100644
index 0000000..f1f9fba
--- /dev/null
+++ b/kernel/sched/bld.h
@@ -0,0 +1,215 @@
+#ifdef CONFIG_BLD
+
+static DEFINE_RWLOCK(rt_list_lock);
+static LIST_HEAD(rt_rq_head);
+static LIST_HEAD(cfs_rq_head);
+static DEFINE_RWLOCK(cfs_list_lock);
+
+#ifdef CONFIG_FAIR_GROUP_SCHED
+static inline struct rq *rq_of_cfs(struct cfs_rq *cfs_rq)
+{
+ return cfs_rq->rq;
+}
+#else
+static inline struct rq *rq_of_cfs(struct cfs_rq *cfs_rq)
+{
+ return container_of(cfs_rq, struct rq, cfs);
+}
+#endif
+
+#ifdef CONFIG_RT_GROUP_SCHED
+static inline struct rq *rq_of_rt(struct rt_rq *rt_rq)
+{
+ return rt_rq->rq;
+}
+#else
+static inline struct rq *rq_of_rt(struct rt_rq *rt_rq)
+{
+ return container_of(rt_rq, struct rq, rt);
+}
+#endif
+
+static int select_cpu_for_wakeup(int task_type, struct cpumask *mask)
+{
+ int cpu = smp_processor_id(), i;
+ unsigned long load, varload;
+ struct rq *rq;
+
+ if (task_type) {
+ varload = ULONG_MAX;
+ for_each_cpu(i, mask) {
+ rq = cpu_rq(i);
+ load = rq->cfs.load.weight;
+ if (load < varload) {
+ varload = load;
+ cpu = i;
+ }
+ }
+ } else {
+ /* Here's an attempt to get a CPU within the mask where
+ * we can preempt easily. To achieve this we tried to
+ * maintain a lowbit, which indicate the lowest bit set on
+ * array bitmap. Since all CPUs contains high priority
+ * kernel threads therefore we eliminate 0, so it might not
+ * be right every time, but it's just an indicator.
+ */
+ varload = 1;
+
+ for_each_cpu(i, mask) {
+ rq = cpu_rq(i);
+ load = rq->rt.lowbit;
+ if (load >= varload) {
+ varload = load;
+ cpu = i;
+ }
+ }
+ }
+
+ return cpu;
+}
+
+static int bld_pick_cpu_cfs(struct task_struct *p, int sd_flags, int wake_flags)
+{
+ struct cfs_rq *cfs;
+ unsigned long flags;
+ unsigned int cpu = smp_processor_id();
+
+ read_lock_irqsave(&cfs_list_lock, flags);
+ list_for_each_entry(cfs, &cfs_rq_head, bld_cfs_list) {
+ cpu = cpu_of(rq_of_cfs(cfs));
+ if (cpu_online(cpu))
+ break;
+ }
+ read_unlock_irqrestore(&cfs_list_lock, flags);
+ return cpu;
+}
+
+static int bld_pick_cpu_rt(struct task_struct *p, int sd_flags, int wake_flags)
+{
+ struct rt_rq *rt;
+ unsigned long flags;
+ unsigned int cpu = smp_processor_id();
+
+ read_lock_irqsave(&rt_list_lock, flags);
+ list_for_each_entry(rt, &rt_rq_head, bld_rt_list) {
+ cpu = cpu_of(rq_of_rt(rt));
+ if (cpu_online(cpu))
+ break;
+ }
+ read_unlock_irqrestore(&rt_list_lock, flags);
+ return cpu;
+}
+
+static int bld_pick_cpu_domain(struct task_struct *p, int sd_flags, int wake_flags)
+{
+ unsigned int cpu = smp_processor_id(), want_affine = 0;
+ struct cpumask *tmpmask;
+
+ if (p->nr_cpus_allowed == 1)
+ return task_cpu(p);
+
+ if (sd_flags & SD_BALANCE_WAKE) {
+ if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
+ want_affine = 1;
+ }
+ }
+
+ if (want_affine)
+ tmpmask = tsk_cpus_allowed(p);
+ else
+ tmpmask = sched_domain_span(cpu_rq(task_cpu(p))->sd);
+
+ if (rt_task(p))
+ cpu = select_cpu_for_wakeup(0, tmpmask);
+ else
+ cpu = select_cpu_for_wakeup(1, tmpmask);
+
+ return cpu;
+}
+
+static void track_load_rt(struct rq *rq, struct task_struct *p)
+{
+ unsigned long flag;
+ int firstbit;
+ struct rt_rq *first;
+ struct rt_prio_array *array = &rq->rt.active;
+
+ first = list_entry(rt_rq_head.next, struct rt_rq, bld_rt_list);
+ firstbit = sched_find_first_bit(array->bitmap);
+
+ /* Maintaining rt.lowbit */
+ if (firstbit > 0 && firstbit <= rq->rt.lowbit)
+ rq->rt.lowbit = firstbit;
+
+ if (rq->rt.lowbit < first->lowbit) {
+ write_lock_irqsave(&rt_list_lock, flag);
+ list_del(&rq->rt.bld_rt_list);
+ list_add_tail(&rq->rt.bld_rt_list, &rt_rq_head);
+ write_unlock_irqrestore(&rt_list_lock, flag);
+ }
+}
+
+static int bld_get_cpu(struct task_struct *p, int sd_flags, int wake_flags)
+{
+ unsigned int cpu;
+
+ if (sd_flags == SD_BALANCE_WAKE || (sd_flags == SD_BALANCE_EXEC && (get_nr_threads(p) > 1)))
+ cpu = bld_pick_cpu_domain(p, sd_flags, wake_flags);
+ else {
+ if (rt_task(p))
+ cpu = bld_pick_cpu_rt(p, sd_flags, wake_flags);
+ else
+ cpu = bld_pick_cpu_cfs(p, sd_flags, wake_flags);
+ }
+
+ return cpu;
+}
+
+static void bld_track_load_activate(struct rq *rq, struct task_struct *p)
+{
+ unsigned long flag;
+ if (rt_task(p)) {
+ track_load_rt(rq, p);
+ } else {
+ if (rq->cfs.pos != 2) {
+ struct cfs_rq *last;
+ last = list_entry(cfs_rq_head.prev, struct cfs_rq, bld_cfs_list);
+ if (rq->cfs.load.weight >= last->load.weight) {
+ write_lock_irqsave(&cfs_list_lock, flag);
+ list_del(&rq->cfs.bld_cfs_list);
+ list_add_tail(&rq->cfs.bld_cfs_list, &cfs_rq_head);
+ rq->cfs.pos = 2; last->pos = 1;
+ write_unlock_irqrestore(&cfs_list_lock, flag);
+ }
+ }
+ }
+}
+
+static void bld_track_load_deactivate(struct rq *rq, struct task_struct *p)
+{
+ unsigned long flag;
+ if (rt_task(p)) {
+ track_load_rt(rq, p);
+ } else {
+ if (rq->cfs.pos != 0) {
+ struct cfs_rq *first;
+ first = list_entry(cfs_rq_head.next, struct cfs_rq, bld_cfs_list);
+ if (rq->cfs.load.weight <= first->load.weight) {
+ write_lock_irqsave(&cfs_list_lock, flag);
+ list_del(&rq->cfs.bld_cfs_list);
+ list_add(&rq->cfs.bld_cfs_list, &cfs_rq_head);
+ rq->cfs.pos = 0; first->pos = 1;
+ write_unlock_irqrestore(&cfs_list_lock, flag);
+ }
+ }
+ }
+}
+#else
+static inline void bld_track_load_activate(struct rq *rq, struct task_struct *p)
+{
+}
+
+static inline void bld_track_load_deactivate(struct rq *rq, struct task_struct *p)
+{
+}
+#endif /* CONFIG_BLD */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d1f7149..c3236de 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -24,6 +24,8 @@
* 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
* 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
* Thomas Gleixner, Mike Kravetz
+ * 2012-Feb The Barbershop Load Distribution (BLD) algorithm - an alternate
+ * CPU load distribution technique for kernel scheduler by Rakib Mullick.
*/
#include <linux/kasan.h>
@@ -86,6 +88,7 @@
#include "sched.h"
#include "../workqueue_internal.h"
#include "../smpboot.h"
+#include "bld.h"
#define CREATE_TRACE_POINTS
#include <trace/events/sched.h>
@@ -713,6 +716,8 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
if (!(flags & ENQUEUE_RESTORE))
sched_info_queued(rq, p);
p->sched_class->enqueue_task(rq, p, flags);
+ if (!dl_task(p))
+ bld_track_load_activate(rq, p);
}
static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
@@ -721,6 +726,8 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
if (!(flags & DEQUEUE_SAVE))
sched_info_dequeued(rq, p);
p->sched_class->dequeue_task(rq, p, flags);
+ if (!dl_task(p))
+ bld_track_load_deactivate(rq, p);
}
void activate_task(struct rq *rq, struct task_struct *p, int flags)
@@ -1515,8 +1522,16 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
{
lockdep_assert_held(&p->pi_lock);
- if (p->nr_cpus_allowed > 1)
+ if (p->nr_cpus_allowed > 1) {
+#ifndef CONFIG_BLD
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
+#else
+ if(dl_task(p))
+ cpu = dl_sched_class.select_task_rq(p, cpu, sd_flags, wake_flags);
+ else
+ cpu = bld_get_cpu(p, sd_flags, wake_flags);
+#endif
+ }
/*
* In order not to call set_task_cpu() on a blocking task we need
@@ -1706,7 +1721,11 @@ void scheduler_ipi(void)
*/
preempt_fold_need_resched();
+#ifndef CONFIG_BLD
if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
+#else
+ if (llist_empty(&this_rq()->wake_list))
+#endif
return;
/*
@@ -1728,13 +1747,16 @@ void scheduler_ipi(void)
/*
* Check if someone kicked us for doing the nohz idle load balance.
*/
+#ifndef CONFIG_BLD
if (unlikely(got_nohz_idle_kick())) {
this_rq()->idle_balance = 1;
raise_softirq_irqoff(SCHED_SOFTIRQ);
}
+#endif
irq_exit();
}
+#ifndef CONFIG_BLD
static void ttwu_queue_remote(struct task_struct *p, int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -1747,6 +1769,13 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
}
}
+#endif
+
+bool cpus_share_cache(int this_cpu, int that_cpu)
+{
+ return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
+}
+
void wake_up_if_idle(int cpu)
{
struct rq *rq = cpu_rq(cpu);
@@ -1770,18 +1799,13 @@ void wake_up_if_idle(int cpu)
out:
rcu_read_unlock();
}
-
-bool cpus_share_cache(int this_cpu, int that_cpu)
-{
- return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
-}
#endif /* CONFIG_SMP */
static void ttwu_queue(struct task_struct *p, int cpu)
{
struct rq *rq = cpu_rq(cpu);
-#if defined(CONFIG_SMP)
+#if defined(CONFIG_SMP) && !defined(CONFIG_BLD)
if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
sched_clock_cpu(cpu); /* sync clocks x-cpu */
ttwu_queue_remote(p, cpu);
@@ -2292,7 +2316,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
* Silence PROVE_RCU.
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
- set_task_cpu(p, cpu);
+ __set_task_cpu(p, cpu);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
#ifdef CONFIG_SCHED_INFO
@@ -2837,7 +2861,14 @@ void sched_exec(void)
int dest_cpu;
raw_spin_lock_irqsave(&p->pi_lock, flags);
+#ifndef CONFIG_BLD
dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
+#else
+ if (dl_task(p))
+ dest_cpu = task_cpu(p);
+ else
+ dest_cpu = bld_get_cpu(p, SD_BALANCE_EXEC, 0);
+#endif
if (dest_cpu == smp_processor_id())
goto unlock;
@@ -2926,8 +2957,10 @@ void scheduler_tick(void)
#ifdef CONFIG_SMP
rq->idle_balance = idle_cpu(cpu);
+#ifndef CONFIG_BLD
trigger_load_balance(rq);
#endif
+#endif
rq_last_tick_reset(rq);
}
@@ -7359,6 +7392,15 @@ void __init sched_init(void)
#endif
init_rq_hrtick(rq);
atomic_set(&rq->nr_iowait, 0);
+#ifdef CONFIG_BLD
+ INIT_LIST_HEAD(&rq->cfs.bld_cfs_list);
+ list_add_tail(&rq->cfs.bld_cfs_list, &cfs_rq_head);
+ rq->cfs.pos = 0;
+
+ INIT_LIST_HEAD(&rq->rt.bld_rt_list);
+ list_add_tail(&rq->rt.bld_rt_list, &rt_rq_head);
+ rq->rt.lowbit = INT_MAX;
+#endif
}
set_load_weight(&init_task);
@@ -7399,6 +7441,9 @@ void __init sched_init(void)
init_sched_fair_class();
scheduler_running = 1;
+#ifdef CONFIG_BLD
+ printk(KERN_INFO "BLD: An Alternate CPU load distributor activated.\n");
+#endif
}
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e7dd0ec..555572f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4746,6 +4746,7 @@ static void task_waking_fair(struct task_struct *p)
record_wakee(p);
}
+#ifndef CONFIG_BLD
#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* effective_load() calculates the load change as seen from the root_task_group
@@ -5248,6 +5249,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
return new_cpu;
}
+#endif /* CONFIG_BLD */
/*
* Called immediately before a task is migrated to a new cpu; task_cpu(p) and
@@ -5552,6 +5554,7 @@ idle:
* further scheduler activity on it and we're being very careful to
* re-start the picking loop.
*/
+#ifndef CONFIG_BLD
lockdep_unpin_lock(&rq->lock);
new_tasks = idle_balance(rq);
lockdep_pin_lock(&rq->lock);
@@ -5565,7 +5568,7 @@ idle:
if (new_tasks > 0)
goto again;
-
+#endif
return NULL;
}
@@ -6226,8 +6229,9 @@ static unsigned long task_h_load(struct task_struct *p)
}
#endif
-/********** Helpers for find_busiest_group ************************/
+#ifndef CONFIG_BLD
+/********** Helpers for find_busiest_group ************************/
enum group_type {
group_other = 0,
group_imbalanced,
@@ -6318,6 +6322,7 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
return load_idx;
}
+#endif /* CONFIG_BLD */
static unsigned long scale_rt_capacity(int cpu)
{
@@ -6426,6 +6431,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
sdg->sgc->capacity = capacity;
}
+#ifndef CONFIG_BLD
/*
* Check whether the capacity of the rq has been noticeably reduced by side
* activity. The imbalance_pct is used for the threshold.
@@ -7659,6 +7665,8 @@ static inline int on_null_domain(struct rq *rq)
return unlikely(!rcu_dereference_sched(rq->sd));
}
+#endif /* CONFIG_BLD */
+
#ifdef CONFIG_NO_HZ_COMMON
/*
* idle load balancing details
@@ -7666,12 +7674,39 @@ static inline int on_null_domain(struct rq *rq)
* needed, they will kick the idle load balancer, which then does idle
* load balancing for all the idle CPUs.
*/
+#ifndef CONFIG_BLD
static struct {
cpumask_var_t idle_cpus_mask;
atomic_t nr_cpus;
unsigned long next_balance; /* in jiffy units */
} nohz ____cacheline_aligned;
+static inline void nohz_balance_exit_idle(int cpu)
+{
+ if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
+ /*
+ * Completely isolated CPUs don't ever set, so we must test.
+ */
+ if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
+ cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
+ atomic_dec(&nohz.nr_cpus);
+ }
+ clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
+ }
+}
+
+static int sched_ilb_notifier(struct notifier_block *nfb,
+ unsigned long action, void *hcpu)
+{
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_DYING:
+ nohz_balance_exit_idle(smp_processor_id());
+ return NOTIFY_OK;
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
static inline int find_new_ilb(void)
{
int ilb = cpumask_first(nohz.idle_cpus_mask);
@@ -7709,20 +7744,7 @@ static void nohz_balancer_kick(void)
smp_send_reschedule(ilb_cpu);
return;
}
-
-static inline void nohz_balance_exit_idle(int cpu)
-{
- if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
- /*
- * Completely isolated CPUs don't ever set, so we must test.
- */
- if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
- cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
- atomic_dec(&nohz.nr_cpus);
- }
- clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
- }
-}
+#endif /* CONFIG_BLD */
static inline void set_cpu_sd_state_busy(void)
{
@@ -7764,6 +7786,7 @@ unlock:
*/
void nohz_balance_enter_idle(int cpu)
{
+#ifndef CONFIG_BLD
/*
* If this cpu is going down, then nothing needs to be done.
*/
@@ -7782,23 +7805,10 @@ void nohz_balance_enter_idle(int cpu)
cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
atomic_inc(&nohz.nr_cpus);
set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
-}
-
-static int sched_ilb_notifier(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
-{
- switch (action & ~CPU_TASKS_FROZEN) {
- case CPU_DYING:
- nohz_balance_exit_idle(smp_processor_id());
- return NOTIFY_OK;
- default:
- return NOTIFY_DONE;
- }
+#endif
}
#endif
-static DEFINE_SPINLOCK(balancing);
-
/*
* Scale the max load_balance interval with the number of CPUs in the system.
* This trades load-balance latency on larger machines for less cross talk.
@@ -7808,6 +7818,9 @@ void update_max_interval(void)
max_load_balance_interval = HZ*num_online_cpus()/10;
}
+#ifndef CONFIG_BLD
+static DEFINE_SPINLOCK(balancing);
+
/*
* It checks each scheduling domain to see if it is due to be balanced,
* and initiates a balancing operation if so.
@@ -8095,6 +8108,7 @@ void trigger_load_balance(struct rq *rq)
nohz_balancer_kick();
#endif
}
+#endif /* CONFIG_BLD */
static void rq_online_fair(struct rq *rq)
{
@@ -8531,7 +8545,9 @@ const struct sched_class fair_sched_class = {
.put_prev_task = put_prev_task_fair,
#ifdef CONFIG_SMP
+#ifndef CONFIG_BLD
.select_task_rq = select_task_rq_fair,
+#endif
.migrate_task_rq = migrate_task_rq_fair,
.rq_online = rq_online_fair,
@@ -8593,6 +8609,7 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
__init void init_sched_fair_class(void)
{
+#ifndef CONFIG_BLD
#ifdef CONFIG_SMP
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
@@ -8602,5 +8619,5 @@ __init void init_sched_fair_class(void)
cpu_notifier(sched_ilb_notifier, 0);
#endif
#endif /* SMP */
-
+#endif /* BLD */
}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ec4f538d..4462bed 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1375,6 +1375,7 @@ static void yield_task_rt(struct rq *rq)
#ifdef CONFIG_SMP
static int find_lowest_rq(struct task_struct *task);
+#ifndef CONFIG_BLD
static int
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
{
@@ -1430,6 +1431,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
out:
return cpu;
}
+#endif /* CONFIG_BLD */
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
{
@@ -2335,7 +2337,9 @@ const struct sched_class rt_sched_class = {
.put_prev_task = put_prev_task_rt,
#ifdef CONFIG_SMP
+#ifndef CONFIG_BLD
.select_task_rq = select_task_rq_rt,
+#endif
.set_cpus_allowed = set_cpus_allowed_common,
.rq_online = rq_online_rt,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ec2e8d2..aaab735 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -408,9 +408,8 @@ struct cfs_rq {
#endif /* CONFIG_FAIR_GROUP_SCHED */
#endif /* CONFIG_SMP */
-#ifdef CONFIG_FAIR_GROUP_SCHED
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
-
+#ifdef CONFIG_FAIR_GROUP_SCHED
/*
* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
* a hierarchy). Non-leaf lrqs hold other higher schedulable entities
@@ -434,6 +433,11 @@ struct cfs_rq {
struct list_head throttled_list;
#endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */
+
+#ifdef CONFIG_BLD
+ struct list_head bld_cfs_list;
+ char pos;
+#endif
};
static inline int rt_bandwidth_enabled(void)
@@ -479,12 +483,16 @@ struct rt_rq {
/* Nests inside the rq lock: */
raw_spinlock_t rt_runtime_lock;
+ struct rq *rq;
#ifdef CONFIG_RT_GROUP_SCHED
unsigned long rt_nr_boosted;
- struct rq *rq;
struct task_group *tg;
#endif
+#ifdef CONFIG_BLD
+ struct list_head bld_rt_list;
+ int lowbit;
+#endif
};
/* Deadline class' related fields in a runqueue */

View File

@ -1,9 +1,5 @@
BLD changes for Linux kernel version 4.7
---
diff --git a/init/Kconfig b/init/Kconfig diff --git a/init/Kconfig b/init/Kconfig
index c02d897..edf8697 100644 index cac3f09..4e49d16 100644
--- a/init/Kconfig --- a/init/Kconfig
+++ b/init/Kconfig +++ b/init/Kconfig
@@ -36,6 +36,15 @@ config BROKEN_ON_SMP @@ -36,6 +36,15 @@ config BROKEN_ON_SMP
@ -244,7 +240,7 @@ index 0000000..f1f9fba
+} +}
+#endif /* CONFIG_BLD */ +#endif /* CONFIG_BLD */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 97ee9ac..b2ddabc 100644 index 44817c6..f0f3321 100644
--- a/kernel/sched/core.c --- a/kernel/sched/core.c
+++ b/kernel/sched/core.c +++ b/kernel/sched/core.c
@@ -24,6 +24,8 @@ @@ -24,6 +24,8 @@
@ -256,7 +252,7 @@ index 97ee9ac..b2ddabc 100644
*/ */
#include <linux/kasan.h> #include <linux/kasan.h>
@@ -86,6 +88,7 @@ @@ -87,6 +89,7 @@
#include "sched.h" #include "sched.h"
#include "../workqueue_internal.h" #include "../workqueue_internal.h"
#include "../smpboot.h" #include "../smpboot.h"
@ -264,7 +260,7 @@ index 97ee9ac..b2ddabc 100644
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/sched.h> #include <trace/events/sched.h>
@@ -750,6 +753,8 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags) @@ -751,6 +754,8 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
if (!(flags & ENQUEUE_RESTORE)) if (!(flags & ENQUEUE_RESTORE))
sched_info_queued(rq, p); sched_info_queued(rq, p);
p->sched_class->enqueue_task(rq, p, flags); p->sched_class->enqueue_task(rq, p, flags);
@ -273,7 +269,7 @@ index 97ee9ac..b2ddabc 100644
} }
static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
@@ -758,6 +763,8 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags) @@ -759,6 +764,8 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
if (!(flags & DEQUEUE_SAVE)) if (!(flags & DEQUEUE_SAVE))
sched_info_dequeued(rq, p); sched_info_dequeued(rq, p);
p->sched_class->dequeue_task(rq, p, flags); p->sched_class->dequeue_task(rq, p, flags);
@ -282,7 +278,7 @@ index 97ee9ac..b2ddabc 100644
} }
void activate_task(struct rq *rq, struct task_struct *p, int flags) void activate_task(struct rq *rq, struct task_struct *p, int flags)
@@ -1587,11 +1594,17 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags) @@ -1588,11 +1595,17 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
{ {
lockdep_assert_held(&p->pi_lock); lockdep_assert_held(&p->pi_lock);
@ -301,7 +297,7 @@ index 97ee9ac..b2ddabc 100644
/* /*
* In order not to call set_task_cpu() on a blocking task we need * In order not to call set_task_cpu() on a blocking task we need
* to rely on ttwu() to place the task on a valid ->cpus_allowed * to rely on ttwu() to place the task on a valid ->cpus_allowed
@@ -1794,7 +1807,11 @@ void scheduler_ipi(void) @@ -1795,7 +1808,11 @@ void scheduler_ipi(void)
*/ */
preempt_fold_need_resched(); preempt_fold_need_resched();
@ -313,7 +309,7 @@ index 97ee9ac..b2ddabc 100644
return; return;
/* /*
@@ -1816,13 +1833,16 @@ void scheduler_ipi(void) @@ -1817,13 +1834,16 @@ void scheduler_ipi(void)
/* /*
* Check if someone kicked us for doing the nohz idle load balance. * Check if someone kicked us for doing the nohz idle load balance.
*/ */
@ -330,7 +326,7 @@ index 97ee9ac..b2ddabc 100644
static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags) static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
{ {
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
@@ -1836,6 +1856,7 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags) @@ -1837,6 +1857,7 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
trace_sched_wake_idle_without_ipi(cpu); trace_sched_wake_idle_without_ipi(cpu);
} }
} }
@ -338,7 +334,7 @@ index 97ee9ac..b2ddabc 100644
void wake_up_if_idle(int cpu) void wake_up_if_idle(int cpu)
{ {
@@ -1872,7 +1893,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags) @@ -1873,7 +1894,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
struct rq *rq = cpu_rq(cpu); struct rq *rq = cpu_rq(cpu);
struct pin_cookie cookie; struct pin_cookie cookie;
@ -347,16 +343,7 @@ index 97ee9ac..b2ddabc 100644
if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) { if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
sched_clock_cpu(cpu); /* sync clocks x-cpu */ sched_clock_cpu(cpu); /* sync clocks x-cpu */
ttwu_queue_remote(p, cpu, wake_flags); ttwu_queue_remote(p, cpu, wake_flags);
@@ -2394,7 +2415,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p) @@ -2971,7 +2992,14 @@ void sched_exec(void)
* Silence PROVE_RCU.
*/
raw_spin_lock_irqsave(&p->pi_lock, flags);
- set_task_cpu(p, cpu);
+ __set_task_cpu(p, cpu);
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
#ifdef CONFIG_SCHED_INFO
@@ -2941,7 +2962,14 @@ void sched_exec(void)
int dest_cpu; int dest_cpu;
raw_spin_lock_irqsave(&p->pi_lock, flags); raw_spin_lock_irqsave(&p->pi_lock, flags);
@ -371,7 +358,7 @@ index 97ee9ac..b2ddabc 100644
if (dest_cpu == smp_processor_id()) if (dest_cpu == smp_processor_id())
goto unlock; goto unlock;
@@ -3030,8 +3058,10 @@ void scheduler_tick(void) @@ -3078,8 +3106,10 @@ void scheduler_tick(void)
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
rq->idle_balance = idle_cpu(cpu); rq->idle_balance = idle_cpu(cpu);
@ -382,7 +369,7 @@ index 97ee9ac..b2ddabc 100644
rq_last_tick_reset(rq); rq_last_tick_reset(rq);
} }
@@ -7262,7 +7292,9 @@ int sched_cpu_dying(unsigned int cpu) @@ -7313,7 +7343,9 @@ int sched_cpu_dying(unsigned int cpu)
raw_spin_unlock_irqrestore(&rq->lock, flags); raw_spin_unlock_irqrestore(&rq->lock, flags);
calc_load_migrate(rq); calc_load_migrate(rq);
update_max_interval(); update_max_interval();
@ -392,7 +379,7 @@ index 97ee9ac..b2ddabc 100644
hrtick_clear(rq); hrtick_clear(rq);
return 0; return 0;
} }
@@ -7468,6 +7500,15 @@ void __init sched_init(void) @@ -7519,6 +7551,15 @@ void __init sched_init(void)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
init_rq_hrtick(rq); init_rq_hrtick(rq);
atomic_set(&rq->nr_iowait, 0); atomic_set(&rq->nr_iowait, 0);
@ -408,7 +395,7 @@ index 97ee9ac..b2ddabc 100644
} }
set_load_weight(&init_task); set_load_weight(&init_task);
@@ -7510,6 +7551,9 @@ void __init sched_init(void) @@ -7561,6 +7602,9 @@ void __init sched_init(void)
init_schedstats(); init_schedstats();
scheduler_running = 1; scheduler_running = 1;
@ -419,10 +406,10 @@ index 97ee9ac..b2ddabc 100644
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c8c5d2d..5b694b3 100644 index 039de34..f823e5b 100644
--- a/kernel/sched/fair.c --- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c +++ b/kernel/sched/fair.c
@@ -4880,6 +4880,7 @@ static unsigned long cpu_avg_load_per_task(int cpu) @@ -4924,6 +4924,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
return 0; return 0;
} }
@ -430,7 +417,7 @@ index c8c5d2d..5b694b3 100644
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
/* /*
* effective_load() calculates the load change as seen from the root_task_group * effective_load() calculates the load change as seen from the root_task_group
@@ -5411,6 +5412,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f @@ -5455,6 +5456,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
return new_cpu; return new_cpu;
} }
@ -438,7 +425,7 @@ index c8c5d2d..5b694b3 100644
/* /*
* Called immediately before a task is migrated to a new cpu; task_cpu(p) and * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
@@ -5741,6 +5743,7 @@ idle: @@ -5785,6 +5787,7 @@ idle:
* further scheduler activity on it and we're being very careful to * further scheduler activity on it and we're being very careful to
* re-start the picking loop. * re-start the picking loop.
*/ */
@ -446,7 +433,7 @@ index c8c5d2d..5b694b3 100644
lockdep_unpin_lock(&rq->lock, cookie); lockdep_unpin_lock(&rq->lock, cookie);
new_tasks = idle_balance(rq); new_tasks = idle_balance(rq);
lockdep_repin_lock(&rq->lock, cookie); lockdep_repin_lock(&rq->lock, cookie);
@@ -5754,7 +5757,7 @@ idle: @@ -5798,7 +5801,7 @@ idle:
if (new_tasks > 0) if (new_tasks > 0)
goto again; goto again;
@ -455,7 +442,7 @@ index c8c5d2d..5b694b3 100644
return NULL; return NULL;
} }
@@ -6415,8 +6418,9 @@ static unsigned long task_h_load(struct task_struct *p) @@ -6459,8 +6462,9 @@ static unsigned long task_h_load(struct task_struct *p)
} }
#endif #endif
@ -466,7 +453,7 @@ index c8c5d2d..5b694b3 100644
enum group_type { enum group_type {
group_other = 0, group_other = 0,
group_imbalanced, group_imbalanced,
@@ -6507,6 +6511,7 @@ static inline int get_sd_load_idx(struct sched_domain *sd, @@ -6551,6 +6555,7 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
return load_idx; return load_idx;
} }
@ -474,7 +461,7 @@ index c8c5d2d..5b694b3 100644
static unsigned long scale_rt_capacity(int cpu) static unsigned long scale_rt_capacity(int cpu)
{ {
@@ -6615,6 +6620,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu) @@ -6659,6 +6664,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
sdg->sgc->capacity = capacity; sdg->sgc->capacity = capacity;
} }
@ -482,7 +469,7 @@ index c8c5d2d..5b694b3 100644
/* /*
* Check whether the capacity of the rq has been noticeably reduced by side * Check whether the capacity of the rq has been noticeably reduced by side
* activity. The imbalance_pct is used for the threshold. * activity. The imbalance_pct is used for the threshold.
@@ -7848,6 +7854,7 @@ static inline int on_null_domain(struct rq *rq) @@ -7892,6 +7898,7 @@ static inline int on_null_domain(struct rq *rq)
{ {
return unlikely(!rcu_dereference_sched(rq->sd)); return unlikely(!rcu_dereference_sched(rq->sd));
} }
@ -490,7 +477,7 @@ index c8c5d2d..5b694b3 100644
#ifdef CONFIG_NO_HZ_COMMON #ifdef CONFIG_NO_HZ_COMMON
/* /*
@@ -7856,12 +7863,39 @@ static inline int on_null_domain(struct rq *rq) @@ -7900,12 +7907,39 @@ static inline int on_null_domain(struct rq *rq)
* needed, they will kick the idle load balancer, which then does idle * needed, they will kick the idle load balancer, which then does idle
* load balancing for all the idle CPUs. * load balancing for all the idle CPUs.
*/ */
@ -530,7 +517,7 @@ index c8c5d2d..5b694b3 100644
static inline int find_new_ilb(void) static inline int find_new_ilb(void)
{ {
int ilb = cpumask_first(nohz.idle_cpus_mask); int ilb = cpumask_first(nohz.idle_cpus_mask);
@@ -7900,20 +7934,6 @@ static void nohz_balancer_kick(void) @@ -7944,20 +7978,6 @@ static void nohz_balancer_kick(void)
return; return;
} }
@ -551,7 +538,7 @@ index c8c5d2d..5b694b3 100644
static inline void set_cpu_sd_state_busy(void) static inline void set_cpu_sd_state_busy(void)
{ {
struct sched_domain *sd; struct sched_domain *sd;
@@ -7930,6 +7950,8 @@ static inline void set_cpu_sd_state_busy(void) @@ -7974,6 +7994,8 @@ static inline void set_cpu_sd_state_busy(void)
unlock: unlock:
rcu_read_unlock(); rcu_read_unlock();
} }
@ -560,7 +547,7 @@ index c8c5d2d..5b694b3 100644
void set_cpu_sd_state_idle(void) void set_cpu_sd_state_idle(void)
{ {
@@ -7954,6 +7976,7 @@ unlock: @@ -7998,6 +8020,7 @@ unlock:
*/ */
void nohz_balance_enter_idle(int cpu) void nohz_balance_enter_idle(int cpu)
{ {
@ -568,7 +555,7 @@ index c8c5d2d..5b694b3 100644
/* /*
* If this cpu is going down, then nothing needs to be done. * If this cpu is going down, then nothing needs to be done.
*/ */
@@ -7972,10 +7995,8 @@ void nohz_balance_enter_idle(int cpu) @@ -8016,10 +8039,8 @@ void nohz_balance_enter_idle(int cpu)
cpumask_set_cpu(cpu, nohz.idle_cpus_mask); cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
atomic_inc(&nohz.nr_cpus); atomic_inc(&nohz.nr_cpus);
set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)); set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
@ -580,7 +567,7 @@ index c8c5d2d..5b694b3 100644
/* /*
* Scale the max load_balance interval with the number of CPUs in the system. * Scale the max load_balance interval with the number of CPUs in the system.
@@ -7986,6 +8007,9 @@ void update_max_interval(void) @@ -8030,6 +8051,9 @@ void update_max_interval(void)
max_load_balance_interval = HZ*num_online_cpus()/10; max_load_balance_interval = HZ*num_online_cpus()/10;
} }
@ -590,7 +577,7 @@ index c8c5d2d..5b694b3 100644
/* /*
* It checks each scheduling domain to see if it is due to be balanced, * It checks each scheduling domain to see if it is due to be balanced,
* and initiates a balancing operation if so. * and initiates a balancing operation if so.
@@ -8273,6 +8297,7 @@ void trigger_load_balance(struct rq *rq) @@ -8317,6 +8341,7 @@ void trigger_load_balance(struct rq *rq)
nohz_balancer_kick(); nohz_balancer_kick();
#endif #endif
} }
@ -598,7 +585,7 @@ index c8c5d2d..5b694b3 100644
static void rq_online_fair(struct rq *rq) static void rq_online_fair(struct rq *rq)
{ {
@@ -8288,7 +8313,6 @@ static void rq_offline_fair(struct rq *rq) @@ -8332,7 +8357,6 @@ static void rq_offline_fair(struct rq *rq)
/* Ensure any throttled groups are reachable by pick_next_task */ /* Ensure any throttled groups are reachable by pick_next_task */
unthrottle_offline_cfs_rqs(rq); unthrottle_offline_cfs_rqs(rq);
} }
@ -606,7 +593,7 @@ index c8c5d2d..5b694b3 100644
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
/* /*
@@ -8716,7 +8740,9 @@ const struct sched_class fair_sched_class = { @@ -8791,7 +8815,9 @@ const struct sched_class fair_sched_class = {
.put_prev_task = put_prev_task_fair, .put_prev_task = put_prev_task_fair,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
@ -616,7 +603,7 @@ index c8c5d2d..5b694b3 100644
.migrate_task_rq = migrate_task_rq_fair, .migrate_task_rq = migrate_task_rq_fair,
.rq_online = rq_online_fair, .rq_online = rq_online_fair,
@@ -8777,6 +8803,7 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m) @@ -8852,6 +8878,7 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
__init void init_sched_fair_class(void) __init void init_sched_fair_class(void)
{ {
@ -624,7 +611,7 @@ index c8c5d2d..5b694b3 100644
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains); open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
@@ -8785,5 +8812,5 @@ __init void init_sched_fair_class(void) @@ -8860,5 +8887,5 @@ __init void init_sched_fair_class(void)
zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT); zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
#endif #endif
#endif /* SMP */ #endif /* SMP */
@ -662,10 +649,10 @@ index d5690b7..6f3589e 100644
.set_cpus_allowed = set_cpus_allowed_common, .set_cpus_allowed = set_cpus_allowed_common,
.rq_online = rq_online_rt, .rq_online = rq_online_rt,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 898c0d2..720d524 100644 index c64fc51..a1d329b 100644
--- a/kernel/sched/sched.h --- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h +++ b/kernel/sched/sched.h
@@ -415,9 +415,8 @@ struct cfs_rq { @@ -416,9 +416,8 @@ struct cfs_rq {
#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_FAIR_GROUP_SCHED */
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
@ -676,7 +663,7 @@ index 898c0d2..720d524 100644
/* /*
* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in * leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
* a hierarchy). Non-leaf lrqs hold other higher schedulable entities * a hierarchy). Non-leaf lrqs hold other higher schedulable entities
@@ -441,6 +440,11 @@ struct cfs_rq { @@ -442,6 +441,11 @@ struct cfs_rq {
struct list_head throttled_list; struct list_head throttled_list;
#endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_CFS_BANDWIDTH */
#endif /* CONFIG_FAIR_GROUP_SCHED */ #endif /* CONFIG_FAIR_GROUP_SCHED */
@ -688,7 +675,7 @@ index 898c0d2..720d524 100644
}; };
static inline int rt_bandwidth_enabled(void) static inline int rt_bandwidth_enabled(void)
@@ -486,12 +490,16 @@ struct rt_rq { @@ -487,12 +491,16 @@ struct rt_rq {
/* Nests inside the rq lock: */ /* Nests inside the rq lock: */
raw_spinlock_t rt_runtime_lock; raw_spinlock_t rt_runtime_lock;

View File

@ -78,7 +78,7 @@ index 0000000..8fce86f
+2015-04-22 UKSM 0.1.2.4 Fix a race condition that can sometimes trigger anonying warnings. +2015-04-22 UKSM 0.1.2.4 Fix a race condition that can sometimes trigger anonying warnings.
+2016-09-10 UKSM 0.1.2.5 Fix a bug in dedup ratio calculation. +2016-09-10 UKSM 0.1.2.5 Fix a bug in dedup ratio calculation.
diff --git a/fs/exec.c b/fs/exec.c diff --git a/fs/exec.c b/fs/exec.c
index 887c1c9..2bee16e 100644 index 6fcfb3f..ef87e0f 100644
--- a/fs/exec.c --- a/fs/exec.c
+++ b/fs/exec.c +++ b/fs/exec.c
@@ -19,7 +19,7 @@ @@ -19,7 +19,7 @@
@ -98,7 +98,7 @@ index 887c1c9..2bee16e 100644
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
@@ -1273,6 +1274,7 @@ void setup_new_exec(struct linux_binprm * bprm) @@ -1309,6 +1310,7 @@ void setup_new_exec(struct linux_binprm * bprm)
/* An exec changes our domain. We are no longer part of the thread /* An exec changes our domain. We are no longer part of the thread
group */ group */
current->self_exec_id++; current->self_exec_id++;
@ -107,7 +107,7 @@ index 887c1c9..2bee16e 100644
do_close_on_exec(current->files); do_close_on_exec(current->files);
} }
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 8372046..82aa2f4 100644 index b9a8c81..9765269 100644
--- a/fs/proc/meminfo.c --- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c +++ b/fs/proc/meminfo.c
@@ -89,6 +89,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) @@ -89,6 +89,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
@ -120,9 +120,9 @@ index 8372046..82aa2f4 100644
#ifdef CONFIG_QUICKLIST #ifdef CONFIG_QUICKLIST
"Quicklists: %8lu kB\n" "Quicklists: %8lu kB\n"
#endif #endif
@@ -147,6 +150,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v) @@ -149,6 +152,9 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
K(global_page_state(NR_SLAB_UNRECLAIMABLE)), K(global_page_state(NR_SLAB_UNRECLAIMABLE)),
global_page_state(NR_KERNEL_STACK) * THREAD_SIZE / 1024, global_page_state(NR_KERNEL_STACK_KB),
K(global_page_state(NR_PAGETABLE)), K(global_page_state(NR_PAGETABLE)),
+#ifdef CONFIG_UKSM +#ifdef CONFIG_UKSM
+ K(global_page_state(NR_UKSM_ZERO_PAGES)), + K(global_page_state(NR_UKSM_ZERO_PAGES)),
@ -171,7 +171,7 @@ index d4458b6..172ceb9 100644
static inline unsigned long my_zero_pfn(unsigned long addr) static inline unsigned long my_zero_pfn(unsigned long addr)
diff --git a/include/linux/ksm.h b/include/linux/ksm.h diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 7ae216a..06861d8 100644 index 481c8c4..5329b23 100644
--- a/include/linux/ksm.h --- a/include/linux/ksm.h
+++ b/include/linux/ksm.h +++ b/include/linux/ksm.h
@@ -19,21 +19,6 @@ struct mem_cgroup; @@ -19,21 +19,6 @@ struct mem_cgroup;
@ -196,7 +196,7 @@ index 7ae216a..06861d8 100644
static inline struct stable_node *page_stable_node(struct page *page) static inline struct stable_node *page_stable_node(struct page *page)
{ {
@@ -64,6 +49,33 @@ struct page *ksm_might_need_to_copy(struct page *page, @@ -63,6 +48,33 @@ struct page *ksm_might_need_to_copy(struct page *page,
int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage); void ksm_migrate_page(struct page *newpage, struct page *oldpage);
@ -230,7 +230,7 @@ index 7ae216a..06861d8 100644
#else /* !CONFIG_KSM */ #else /* !CONFIG_KSM */
static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm) static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
@@ -106,4 +118,6 @@ static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage) @@ -105,4 +117,6 @@ static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
#endif /* !CONFIG_KSM */ #endif /* !CONFIG_KSM */
@ -238,10 +238,10 @@ index 7ae216a..06861d8 100644
+ +
#endif /* __LINUX_KSM_H */ #endif /* __LINUX_KSM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index ca3e517..ae62e7d1 100644 index 903200f..6c7d900 100644
--- a/include/linux/mm_types.h --- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h +++ b/include/linux/mm_types.h
@@ -357,6 +357,9 @@ struct vm_area_struct { @@ -358,6 +358,9 @@ struct vm_area_struct {
struct mempolicy *vm_policy; /* NUMA policy for the VMA */ struct mempolicy *vm_policy; /* NUMA policy for the VMA */
#endif #endif
struct vm_userfaultfd_ctx vm_userfaultfd_ctx; struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
@ -252,20 +252,20 @@ index ca3e517..ae62e7d1 100644
struct core_thread { struct core_thread {
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 02069c2..f7cce50 100644 index 7f2ae99..89f7dd8 100644
--- a/include/linux/mmzone.h --- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h +++ b/include/linux/mmzone.h
@@ -153,6 +153,9 @@ enum zone_stat_item { @@ -138,6 +138,9 @@ enum zone_stat_item {
WORKINGSET_NODERECLAIM, NUMA_OTHER, /* allocation from other node */
NR_ANON_TRANSPARENT_HUGEPAGES, #endif
NR_FREE_CMA_PAGES, NR_FREE_CMA_PAGES,
+#ifdef CONFIG_UKSM +#ifdef CONFIG_UKSM
+ NR_UKSM_ZERO_PAGES, + NR_UKSM_ZERO_PAGES,
+#endif +#endif
NR_VM_ZONE_STAT_ITEMS }; NR_VM_ZONE_STAT_ITEMS };
/* enum node_stat_item {
@@ -817,7 +820,7 @@ static inline int is_highmem_idx(enum zone_type idx) @@ -869,7 +872,7 @@ static inline int is_highmem_idx(enum zone_type idx)
} }
/** /**
@ -513,10 +513,10 @@ index 0000000..825f05e
+#endif /* !CONFIG_UKSM */ +#endif /* !CONFIG_UKSM */
+#endif /* __LINUX_UKSM_H */ +#endif /* __LINUX_UKSM_H */
diff --git a/kernel/fork.c b/kernel/fork.c diff --git a/kernel/fork.c b/kernel/fork.c
index aea4f4d..f93e114 100644 index beb3172..569893a 100644
--- a/kernel/fork.c --- a/kernel/fork.c
+++ b/kernel/fork.c +++ b/kernel/fork.c
@@ -459,7 +459,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) @@ -457,7 +457,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
goto fail_nomem; goto fail_nomem;
charge = len; charge = len;
} }
@ -525,7 +525,7 @@ index aea4f4d..f93e114 100644
if (!tmp) if (!tmp)
goto fail_nomem; goto fail_nomem;
*tmp = *mpnt; *tmp = *mpnt;
@@ -512,7 +512,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) @@ -510,7 +510,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
__vma_link_rb(mm, tmp, rb_link, rb_parent); __vma_link_rb(mm, tmp, rb_link, rb_parent);
rb_link = &tmp->vm_rb.rb_right; rb_link = &tmp->vm_rb.rb_right;
rb_parent = &tmp->vm_rb; rb_parent = &tmp->vm_rb;
@ -535,17 +535,17 @@ index aea4f4d..f93e114 100644
retval = copy_page_range(mm, oldmm, mpnt); retval = copy_page_range(mm, oldmm, mpnt);
diff --git a/lib/Makefile b/lib/Makefile diff --git a/lib/Makefile b/lib/Makefile
index ff6a7a6..ac0bb55 100644 index 5dc77a8..b63a823 100644
--- a/lib/Makefile --- a/lib/Makefile
+++ b/lib/Makefile +++ b/lib/Makefile
@@ -20,7 +20,7 @@ KCOV_INSTRUMENT_dynamic_debug.o := n @@ -17,7 +17,7 @@ KCOV_INSTRUMENT_debugobjects.o := n
KCOV_INSTRUMENT_hweight.o := n KCOV_INSTRUMENT_dynamic_debug.o := n
lib-y := ctype.o string.o vsprintf.o cmdline.o \ lib-y := ctype.o string.o vsprintf.o cmdline.o \
- rbtree.o radix-tree.o dump_stack.o timerqueue.o\ - rbtree.o radix-tree.o dump_stack.o timerqueue.o\
+ rbtree.o radix-tree.o sradix-tree.o dump_stack.o timerqueue.o\ + rbtree.o radix-tree.o sradix-tree.o dump_stack.o timerqueue.o\
idr.o int_sqrt.o extable.o \ idr.o int_sqrt.o extable.o \
sha1.o md5.o irq_regs.o argv_split.o \ sha1.o chacha20.o md5.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \ flex_proportions.o ratelimit.o show_mem.o \
diff --git a/lib/sradix-tree.c b/lib/sradix-tree.c diff --git a/lib/sradix-tree.c b/lib/sradix-tree.c
new file mode 100644 new file mode 100644
@ -1030,10 +1030,10 @@ index 0000000..8d06329
+ return 0; + return 0;
+} +}
diff --git a/mm/Kconfig b/mm/Kconfig diff --git a/mm/Kconfig b/mm/Kconfig
index 3e2daef..165b60e 100644 index be0ee11..64fd3bc 100644
--- a/mm/Kconfig --- a/mm/Kconfig
+++ b/mm/Kconfig +++ b/mm/Kconfig
@@ -332,6 +332,32 @@ config KSM @@ -340,6 +340,32 @@ config KSM
See Documentation/vm/ksm.txt for more information: KSM is inactive See Documentation/vm/ksm.txt for more information: KSM is inactive
until a program has madvised that an area is MADV_MERGEABLE, and until a program has madvised that an area is MADV_MERGEABLE, and
root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
@ -1067,10 +1067,10 @@ index 3e2daef..165b60e 100644
config DEFAULT_MMAP_MIN_ADDR config DEFAULT_MMAP_MIN_ADDR
int "Low address space to protect from user allocation" int "Low address space to protect from user allocation"
diff --git a/mm/Makefile b/mm/Makefile diff --git a/mm/Makefile b/mm/Makefile
index 78c6f7d..7e7cd8a 100644 index 2ca1faf..980c8dd 100644
--- a/mm/Makefile --- a/mm/Makefile
+++ b/mm/Makefile +++ b/mm/Makefile
@@ -63,7 +63,8 @@ obj-$(CONFIG_SPARSEMEM) += sparse.o @@ -66,7 +66,8 @@ obj-$(CONFIG_SPARSEMEM) += sparse.o
obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
obj-$(CONFIG_SLOB) += slob.o obj-$(CONFIG_SLOB) += slob.o
obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
@ -1081,10 +1081,10 @@ index 78c6f7d..7e7cd8a 100644
obj-$(CONFIG_SLAB) += slab.o obj-$(CONFIG_SLAB) += slab.o
obj-$(CONFIG_SLUB) += slub.o obj-$(CONFIG_SLUB) += slub.o
diff --git a/mm/memory.c b/mm/memory.c diff --git a/mm/memory.c b/mm/memory.c
index 9e04681..02200d3 100644 index 793fe0f..0464507 100644
--- a/mm/memory.c --- a/mm/memory.c
+++ b/mm/memory.c +++ b/mm/memory.c
@@ -124,6 +124,28 @@ unsigned long highest_memmap_pfn __read_mostly; @@ -124,6 +124,25 @@ unsigned long highest_memmap_pfn __read_mostly;
EXPORT_SYMBOL(zero_pfn); EXPORT_SYMBOL(zero_pfn);
@ -1095,14 +1095,11 @@ index 9e04681..02200d3 100644
+ +
+static int __init setup_uksm_zero_page(void) +static int __init setup_uksm_zero_page(void)
+{ +{
+ unsigned long addr; + empty_uksm_zero_page = alloc_pages(__GFP_ZERO & ~__GFP_MOVABLE, 0);
+ addr = __get_free_pages(GFP_KERNEL | __GFP_ZERO, 0); + if (!empty_uksm_zero_page)
+ if (!addr)
+ panic("Oh boy, that early out of memory?"); + panic("Oh boy, that early out of memory?");
+ +
+ empty_uksm_zero_page = virt_to_page((void *) addr);
+ SetPageReserved(empty_uksm_zero_page); + SetPageReserved(empty_uksm_zero_page);
+
+ uksm_zero_pfn = page_to_pfn(empty_uksm_zero_page); + uksm_zero_pfn = page_to_pfn(empty_uksm_zero_page);
+ +
+ return 0; + return 0;
@ -1113,7 +1110,7 @@ index 9e04681..02200d3 100644
/* /*
* CONFIG_MMU architectures set up ZERO_PAGE in their paging_init() * CONFIG_MMU architectures set up ZERO_PAGE in their paging_init()
*/ */
@@ -135,6 +157,7 @@ static int __init init_zero_pfn(void) @@ -135,6 +154,7 @@ static int __init init_zero_pfn(void)
core_initcall(init_zero_pfn); core_initcall(init_zero_pfn);
@ -1121,7 +1118,7 @@ index 9e04681..02200d3 100644
#if defined(SPLIT_RSS_COUNTING) #if defined(SPLIT_RSS_COUNTING)
void sync_mm_rss(struct mm_struct *mm) void sync_mm_rss(struct mm_struct *mm)
@@ -905,6 +928,11 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, @@ -914,6 +934,11 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
get_page(page); get_page(page);
page_dup_rmap(page, false); page_dup_rmap(page, false);
rss[mm_counter(page)]++; rss[mm_counter(page)]++;
@ -1133,7 +1130,7 @@ index 9e04681..02200d3 100644
} }
out_set_pte: out_set_pte:
@@ -1138,8 +1166,10 @@ again: @@ -1148,8 +1173,10 @@ again:
ptent = ptep_get_and_clear_full(mm, addr, pte, ptent = ptep_get_and_clear_full(mm, addr, pte,
tlb->fullmm); tlb->fullmm);
tlb_remove_tlb_entry(tlb, pte, addr); tlb_remove_tlb_entry(tlb, pte, addr);
@ -1145,7 +1142,7 @@ index 9e04681..02200d3 100644
if (!PageAnon(page)) { if (!PageAnon(page)) {
if (pte_dirty(ptent)) { if (pte_dirty(ptent)) {
@@ -1995,8 +2025,10 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo @@ -2010,8 +2037,10 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
clear_page(kaddr); clear_page(kaddr);
kunmap_atomic(kaddr); kunmap_atomic(kaddr);
flush_dcache_page(dst); flush_dcache_page(dst);
@ -1157,15 +1154,15 @@ index 9e04681..02200d3 100644
} }
static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma) static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
@@ -2141,6 +2173,7 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, @@ -2154,6 +2183,7 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte,
new_page = alloc_zeroed_user_highpage_movable(vma, address); new_page = alloc_zeroed_user_highpage_movable(vma, fe->address);
if (!new_page) if (!new_page)
goto oom; goto oom;
+ uksm_cow_pte(vma, orig_pte); + uksm_cow_pte(vma, orig_pte);
} else { } else {
new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma,
if (!new_page) fe->address);
@@ -2166,7 +2199,9 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, @@ -2180,7 +2210,9 @@ static int wp_page_copy(struct fault_env *fe, pte_t orig_pte,
mm_counter_file(old_page)); mm_counter_file(old_page));
inc_mm_counter_fast(mm, MM_ANONPAGES); inc_mm_counter_fast(mm, MM_ANONPAGES);
} }
@ -1174,12 +1171,12 @@ index 9e04681..02200d3 100644
+ uksm_unmap_zero_page(orig_pte); + uksm_unmap_zero_page(orig_pte);
inc_mm_counter_fast(mm, MM_ANONPAGES); inc_mm_counter_fast(mm, MM_ANONPAGES);
} }
flush_cache_page(vma, address, pte_pfn(orig_pte)); flush_cache_page(vma, fe->address, pte_pfn(orig_pte));
diff --git a/mm/mmap.c b/mm/mmap.c diff --git a/mm/mmap.c b/mm/mmap.c
index de2c176..ce60715 100644 index ca9d91b..cf565b7 100644
--- a/mm/mmap.c --- a/mm/mmap.c
+++ b/mm/mmap.c +++ b/mm/mmap.c
@@ -43,6 +43,7 @@ @@ -44,6 +44,7 @@
#include <linux/userfaultfd_k.h> #include <linux/userfaultfd_k.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/pkeys.h> #include <linux/pkeys.h>
@ -1187,7 +1184,7 @@ index de2c176..ce60715 100644
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
@@ -164,6 +165,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) @@ -165,6 +166,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
if (vma->vm_file) if (vma->vm_file)
fput(vma->vm_file); fput(vma->vm_file);
mpol_put(vma_policy(vma)); mpol_put(vma_policy(vma));
@ -1206,13 +1203,13 @@ index de2c176..ce60715 100644
+ uksm_remove_vma(vma); + uksm_remove_vma(vma);
+ +
if (next && !insert) { if (next && !insert) {
struct vm_area_struct *exporter = NULL; struct vm_area_struct *exporter = NULL, *importer = NULL;
+ uksm_remove_vma(next); + uksm_remove_vma(next);
if (end >= next->vm_end) { if (end >= next->vm_end) {
/* /*
* vma expands, overlapping all the next, and * vma expands, overlapping all the next, and
@@ -725,6 +734,7 @@ again: remove_next = 1 + (end > next->vm_end); @@ -733,6 +742,7 @@ again:
end_changed = true; end_changed = true;
} }
vma->vm_pgoff = pgoff; vma->vm_pgoff = pgoff;
@ -1220,14 +1217,13 @@ index de2c176..ce60715 100644
if (adjust_next) { if (adjust_next) {
next->vm_start += adjust_next << PAGE_SHIFT; next->vm_start += adjust_next << PAGE_SHIFT;
next->vm_pgoff += adjust_next; next->vm_pgoff += adjust_next;
@@ -795,16 +805,22 @@ again: remove_next = 1 + (end > next->vm_end); @@ -806,16 +816,21 @@ again:
* up the code too much to do both in one go. if (remove_next == 2) {
*/ remove_next = 1;
next = vma->vm_next; end = next->vm_end;
- if (remove_next == 2)
+ if (remove_next == 2) {
+ uksm_remove_vma(next); + uksm_remove_vma(next);
goto again; goto again;
- }
- else if (next) - else if (next)
+ } else if (next) { + } else if (next) {
vma_gap_update(next); vma_gap_update(next);
@ -1246,7 +1242,7 @@ index de2c176..ce60715 100644
validate_mm(mm); validate_mm(mm);
return 0; return 0;
@@ -1196,6 +1212,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr, @@ -1207,6 +1222,9 @@ unsigned long do_mmap(struct file *file, unsigned long addr,
vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) | vm_flags |= calc_vm_prot_bits(prot, pkey) | calc_vm_flag_bits(flags) |
mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
@ -1256,7 +1252,7 @@ index de2c176..ce60715 100644
if (flags & MAP_LOCKED) if (flags & MAP_LOCKED)
if (!can_do_mlock()) if (!can_do_mlock())
return -EPERM; return -EPERM;
@@ -1534,6 +1553,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, @@ -1545,6 +1563,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
allow_write_access(file); allow_write_access(file);
} }
file = vma->vm_file; file = vma->vm_file;
@ -1264,7 +1260,7 @@ index de2c176..ce60715 100644
out: out:
perf_event_mmap(vma); perf_event_mmap(vma);
@@ -1575,6 +1595,7 @@ allow_write_and_free_vma: @@ -1586,6 +1605,7 @@ allow_write_and_free_vma:
if (vm_flags & VM_DENYWRITE) if (vm_flags & VM_DENYWRITE)
allow_write_access(file); allow_write_access(file);
free_vma: free_vma:
@ -1272,7 +1268,7 @@ index de2c176..ce60715 100644
kmem_cache_free(vm_area_cachep, vma); kmem_cache_free(vm_area_cachep, vma);
unacct_error: unacct_error:
if (charged) if (charged)
@@ -2369,6 +2390,8 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, @@ -2391,6 +2411,8 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
else else
err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
@ -1281,7 +1277,7 @@ index de2c176..ce60715 100644
/* Success. */ /* Success. */
if (!err) if (!err)
return 0; return 0;
@@ -2639,6 +2662,7 @@ static int do_brk(unsigned long addr, unsigned long len) @@ -2669,6 +2691,7 @@ static int do_brk(unsigned long addr, unsigned long request)
return 0; return 0;
flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
@ -1289,7 +1285,7 @@ index de2c176..ce60715 100644
error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
if (offset_in_page(error)) if (offset_in_page(error))
@@ -2696,6 +2720,7 @@ static int do_brk(unsigned long addr, unsigned long len) @@ -2726,6 +2749,7 @@ static int do_brk(unsigned long addr, unsigned long request)
vma->vm_flags = flags; vma->vm_flags = flags;
vma->vm_page_prot = vm_get_page_prot(flags); vma->vm_page_prot = vm_get_page_prot(flags);
vma_link(mm, vma, prev, rb_link, rb_parent); vma_link(mm, vma, prev, rb_link, rb_parent);
@ -1297,7 +1293,7 @@ index de2c176..ce60715 100644
out: out:
perf_event_mmap(vma); perf_event_mmap(vma);
mm->total_vm += len >> PAGE_SHIFT; mm->total_vm += len >> PAGE_SHIFT;
@@ -2734,6 +2759,12 @@ void exit_mmap(struct mm_struct *mm) @@ -2764,6 +2788,12 @@ void exit_mmap(struct mm_struct *mm)
/* mm's last user has gone, and its about to be pulled down */ /* mm's last user has gone, and its about to be pulled down */
mmu_notifier_release(mm); mmu_notifier_release(mm);
@ -1310,7 +1306,7 @@ index de2c176..ce60715 100644
if (mm->locked_vm) { if (mm->locked_vm) {
vma = mm->mmap; vma = mm->mmap;
while (vma) { while (vma) {
@@ -2769,6 +2800,11 @@ void exit_mmap(struct mm_struct *mm) @@ -2799,6 +2829,11 @@ void exit_mmap(struct mm_struct *mm)
vma = remove_vma(vma); vma = remove_vma(vma);
} }
vm_unacct_memory(nr_accounted); vm_unacct_memory(nr_accounted);
@ -1322,7 +1318,7 @@ index de2c176..ce60715 100644
} }
/* Insert vm structure into process list sorted by address /* Insert vm structure into process list sorted by address
@@ -2878,6 +2914,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, @@ -2908,6 +2943,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
new_vma->vm_ops->open(new_vma); new_vma->vm_ops->open(new_vma);
vma_link(mm, new_vma, prev, rb_link, rb_parent); vma_link(mm, new_vma, prev, rb_link, rb_parent);
*need_rmap_locks = false; *need_rmap_locks = false;
@ -1330,7 +1326,7 @@ index de2c176..ce60715 100644
} }
return new_vma; return new_vma;
@@ -3015,6 +3052,7 @@ static struct vm_area_struct *__install_special_mapping( @@ -3055,6 +3091,7 @@ static struct vm_area_struct *__install_special_mapping(
vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT); vm_stat_account(mm, vma->vm_flags, len >> PAGE_SHIFT);
perf_event_mmap(vma); perf_event_mmap(vma);
@ -1339,7 +1335,7 @@ index de2c176..ce60715 100644
return vma; return vma;
diff --git a/mm/rmap.c b/mm/rmap.c diff --git a/mm/rmap.c b/mm/rmap.c
index 701b93f..64ba784 100644 index 1ef3640..1c40463 100644
--- a/mm/rmap.c --- a/mm/rmap.c
+++ b/mm/rmap.c +++ b/mm/rmap.c
@@ -1110,9 +1110,9 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) @@ -1110,9 +1110,9 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
@ -1356,10 +1352,10 @@ index 701b93f..64ba784 100644
static void __page_set_anon_rmap(struct page *page, static void __page_set_anon_rmap(struct page *page,
diff --git a/mm/uksm.c b/mm/uksm.c diff --git a/mm/uksm.c b/mm/uksm.c
new file mode 100644 new file mode 100644
index 0000000..039192f index 0000000..56852a5
--- /dev/null --- /dev/null
+++ b/mm/uksm.c +++ b/mm/uksm.c
@@ -0,0 +1,5518 @@ @@ -0,0 +1,5524 @@
+/* +/*
+ * Ultra KSM. Copyright (C) 2011-2012 Nai Xia + * Ultra KSM. Copyright (C) 2011-2012 Nai Xia
+ * + *
@ -1558,7 +1554,8 @@ index 0000000..039192f
+static struct sradix_tree_node *slot_tree_node_alloc(void) +static struct sradix_tree_node *slot_tree_node_alloc(void)
+{ +{
+ struct slot_tree_node *p; + struct slot_tree_node *p;
+ p = kmem_cache_zalloc(slot_tree_node_cachep, GFP_KERNEL); + p = kmem_cache_zalloc(slot_tree_node_cachep, GFP_KERNEL |
+ __GFP_NORETRY | __GFP_NOWARN);
+ if (!p) + if (!p)
+ return NULL; + return NULL;
+ +
@ -2044,7 +2041,8 @@ index 0000000..039192f
+static inline struct node_vma *alloc_node_vma(void) +static inline struct node_vma *alloc_node_vma(void)
+{ +{
+ struct node_vma *node_vma; + struct node_vma *node_vma;
+ node_vma = kmem_cache_zalloc(node_vma_cache, GFP_KERNEL); + node_vma = kmem_cache_zalloc(node_vma_cache, GFP_KERNEL |
+ __GFP_NORETRY | __GFP_NOWARN);
+ if (node_vma) { + if (node_vma) {
+ INIT_HLIST_HEAD(&node_vma->rmap_hlist); + INIT_HLIST_HEAD(&node_vma->rmap_hlist);
+ INIT_HLIST_NODE(&node_vma->hlist); + INIT_HLIST_NODE(&node_vma->hlist);
@ -2069,7 +2067,8 @@ index 0000000..039192f
+ if (!vma_slot_cache) + if (!vma_slot_cache)
+ return NULL; + return NULL;
+ +
+ slot = kmem_cache_zalloc(vma_slot_cache, GFP_KERNEL); + slot = kmem_cache_zalloc(vma_slot_cache, GFP_KERNEL |
+ __GFP_NORETRY | __GFP_NOWARN);
+ if (slot) { + if (slot) {
+ INIT_LIST_HEAD(&slot->slot_list); + INIT_LIST_HEAD(&slot->slot_list);
+ INIT_LIST_HEAD(&slot->dedup_list); + INIT_LIST_HEAD(&slot->dedup_list);
@ -2089,7 +2088,8 @@ index 0000000..039192f
+{ +{
+ struct rmap_item *rmap_item; + struct rmap_item *rmap_item;
+ +
+ rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); + rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL |
+ __GFP_NORETRY | __GFP_NOWARN);
+ if (rmap_item) { + if (rmap_item) {
+ /* bug on lowest bit is not clear for flag use */ + /* bug on lowest bit is not clear for flag use */
+ BUG_ON(is_addr(rmap_item)); + BUG_ON(is_addr(rmap_item));
@ -2106,7 +2106,8 @@ index 0000000..039192f
+static inline struct stable_node *alloc_stable_node(void) +static inline struct stable_node *alloc_stable_node(void)
+{ +{
+ struct stable_node *node; + struct stable_node *node;
+ node = kmem_cache_alloc(stable_node_cache, GFP_KERNEL | GFP_ATOMIC); + node = kmem_cache_alloc(stable_node_cache, GFP_KERNEL |
+ __GFP_NORETRY | __GFP_NOWARN);
+ if (!node) + if (!node)
+ return NULL; + return NULL;
+ +
@ -2124,7 +2125,8 @@ index 0000000..039192f
+static inline struct tree_node *alloc_tree_node(struct list_head *list) +static inline struct tree_node *alloc_tree_node(struct list_head *list)
+{ +{
+ struct tree_node *node; + struct tree_node *node;
+ node = kmem_cache_zalloc(tree_node_cache, GFP_KERNEL | GFP_ATOMIC); + node = kmem_cache_zalloc(tree_node_cache, GFP_KERNEL |
+ __GFP_NORETRY | __GFP_NOWARN);
+ if (!node) + if (!node)
+ return NULL; + return NULL;
+ +
@ -2241,8 +2243,8 @@ index 0000000..039192f
+ void *expected_mapping; + void *expected_mapping;
+ +
+ page = pfn_to_page(stable_node->kpfn); + page = pfn_to_page(stable_node->kpfn);
+ expected_mapping = (void *)stable_node + + expected_mapping = (void *)((unsigned long)stable_node |
+ (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); + PAGE_MAPPING_KSM);
+ rcu_read_lock(); + rcu_read_lock();
+ if (page->mapping != expected_mapping) + if (page->mapping != expected_mapping)
+ goto stale; + goto stale;
@ -2919,6 +2921,7 @@ index 0000000..039192f
+ (page_to_pfn(kpage) == zero_pfn)) { + (page_to_pfn(kpage) == zero_pfn)) {
+ entry = pte_mkspecial(entry); + entry = pte_mkspecial(entry);
+ dec_mm_counter(mm, MM_ANONPAGES); + dec_mm_counter(mm, MM_ANONPAGES);
+ inc_zone_page_state(page, NR_UKSM_ZERO_PAGES);
+ } else { + } else {
+ get_page(kpage); + get_page(kpage);
+ page_add_anon_rmap(kpage, vma, addr, false); + page_add_anon_rmap(kpage, vma, addr, false);
@ -3986,7 +3989,7 @@ index 0000000..039192f
+ if (IS_ERR_OR_NULL(page)) + if (IS_ERR_OR_NULL(page))
+ break; + break;
+ if (PageKsm(page)) { + if (PageKsm(page)) {
+ ret = handle_mm_fault(vma->vm_mm, vma, addr, + ret = handle_mm_fault(vma, addr,
+ FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE); + FAULT_FLAG_WRITE | FAULT_FLAG_REMOTE);
+ } else + } else
+ ret = VM_FAULT_WRITE; + ret = VM_FAULT_WRITE;
@ -4634,7 +4637,6 @@ index 0000000..039192f
+ if (find_zero_page_hash(hash_strength, *hash)) { + if (find_zero_page_hash(hash_strength, *hash)) {
+ if (!cmp_and_merge_zero_page(slot->vma, page)) { + if (!cmp_and_merge_zero_page(slot->vma, page)) {
+ slot->pages_merged++; + slot->pages_merged++;
+ inc_zone_page_state(page, NR_UKSM_ZERO_PAGES);
+ +
+ /* For full-zero pages, no need to create rmap item */ + /* For full-zero pages, no need to create rmap item */
+ goto putpage; + goto putpage;
@ -6879,12 +6881,12 @@ index 0000000..039192f
+#endif +#endif
+ +
diff --git a/mm/vmstat.c b/mm/vmstat.c diff --git a/mm/vmstat.c b/mm/vmstat.c
index cb2a67b..912b86f 100644 index 89cec42..188ce43 100644
--- a/mm/vmstat.c --- a/mm/vmstat.c
+++ b/mm/vmstat.c +++ b/mm/vmstat.c
@@ -733,6 +733,9 @@ const char * const vmstat_text[] = { @@ -974,6 +974,9 @@ const char * const vmstat_text[] = {
"nr_anon_transparent_hugepages", "nr_dirtied",
"nr_free_cma", "nr_written",
+#ifdef CONFIG_UKSM +#ifdef CONFIG_UKSM
+ "nr_uksm_zero_pages", + "nr_uksm_zero_pages",