initial commit
This commit is contained in:
commit
a3fbee018e
|
@ -0,0 +1,47 @@
|
|||
|
||||
# Created by https://www.gitignore.io/api/rchlinuxpackages,archives,linux
|
||||
|
||||
#!! ERROR: rchlinuxpackages is undefined. Use list command to see defined gitignore types !!#
|
||||
|
||||
### Archives ###
|
||||
# It's better to unpack these files and commit the raw source because
|
||||
# git has its own built in compression methods.
|
||||
*.7z
|
||||
*.jar
|
||||
*.rar
|
||||
*.zip
|
||||
*.gz
|
||||
*.bzip
|
||||
*.bz2
|
||||
*.xz
|
||||
*.lzma
|
||||
*.cab
|
||||
|
||||
#packing-only formats
|
||||
*.iso
|
||||
*.tar
|
||||
|
||||
#package management formats
|
||||
*.dmg
|
||||
*.xpi
|
||||
*.gem
|
||||
*.egg
|
||||
*.deb
|
||||
*.rpm
|
||||
*.msi
|
||||
*.msm
|
||||
*.msp
|
||||
|
||||
|
||||
### Linux ###
|
||||
*~
|
||||
|
||||
# temporary files which can be created if a process still has a handle open of a deleted file
|
||||
.fuse_hidden*
|
||||
|
||||
# KDE directory preferences
|
||||
.directory
|
||||
|
||||
# Linux trash folder which might appear on any partition or disk
|
||||
.Trash-*
|
||||
|
|
@ -0,0 +1,556 @@
|
|||
# Maintainer: xduugu
|
||||
_pkgext=-spica
|
||||
pkgbase=linux$_pkgext
|
||||
pkgname=$pkgbase
|
||||
|
||||
# required by AUR
|
||||
# comment the following line to build a single package containing the kernel and the headers
|
||||
(( 1 )) && pkgname=("$pkgbase" "$pkgbase-headers" "$pkgbase-docs")
|
||||
pkgdesc="The Linux Kernel and modules from Linus' git tree"
|
||||
depends=('coreutils' 'linux-firmware' 'module-init-tools' 'mkinitcpio')
|
||||
|
||||
pkgver=4.11.rc5
|
||||
pkgrel=1
|
||||
url="http://www.kernel.org/"
|
||||
arch=(i686 x86_64)
|
||||
license=('GPL2')
|
||||
makedepends=(git bc)
|
||||
options=(!strip)
|
||||
source=($pkgname::git://git.kernel.org/pub/scm/linux/kernel/git/stable/linux-stable.git)
|
||||
#source=($pkgname::git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git)
|
||||
md5sums=('SKIP')
|
||||
sha256sums=('SKIP')
|
||||
|
||||
# set _gitrev to a git revision (man gitrevisions) like a tag, a commit sha1
|
||||
# hash or a branch name to build from this tree instead of master
|
||||
|
||||
_gitrev="v4.10.8"
|
||||
|
||||
####################################################################
|
||||
# KERNEL CONFIG FILES
|
||||
#
|
||||
# This PKGBUILD searches for config files in the current directory
|
||||
# and will use the first one it finds from the following
|
||||
# list as base configuration:
|
||||
# config.local
|
||||
# config.saved.$CARCH
|
||||
# config.$CARCH
|
||||
#
|
||||
####################################################################
|
||||
|
||||
|
||||
#############################################################
|
||||
# PATCHES
|
||||
#
|
||||
# This package builds the vanilla git kernel by default,
|
||||
# but it is possible to patch the source without modifying
|
||||
# this PKGBUILD.
|
||||
#
|
||||
# Simply create a directory 'patches' in your PKGBUILD
|
||||
# directory and _any_ file (dotfiles excluded) in this
|
||||
# folder will be applied to the kernel source.
|
||||
#
|
||||
# Prefixing the patch file names with dots will obviously
|
||||
# excluded them from the patching process.
|
||||
#
|
||||
#############################################################
|
||||
|
||||
|
||||
#############################
|
||||
# CONFIGURATION
|
||||
#
|
||||
# Uncomment desired options
|
||||
#############################
|
||||
#_make_modules=1
|
||||
|
||||
|
||||
#######
|
||||
# Skip the merge of Linus's kernel tree
|
||||
#
|
||||
# _skip_merge=1
|
||||
|
||||
|
||||
|
||||
MAKEFLAGS="-j $(expr $(cat /proc/cpuinfo |grep processor |wc -l) \* 2)"
|
||||
#######
|
||||
# Set to e.g. menuconfig, xconfig or gconfig
|
||||
#
|
||||
# For a full list of supported commands, please have a look
|
||||
# at "Configuration targets" section of `make help`'s output
|
||||
# or the help target in scripts/kconfig/Makefile
|
||||
# http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob;f=scripts/kconfig/Makefile
|
||||
#
|
||||
# If unset or set to an empty or space-only string, the
|
||||
# (manual) kernel configuration step will be skipped.
|
||||
#
|
||||
_config_cmd="${_config_cmd:-menuconfig}"
|
||||
#_config_cmd="${_config_cmd:-oldconfig}"
|
||||
|
||||
|
||||
#######
|
||||
# Stop build process after kernel configuration
|
||||
#
|
||||
# This option enables _save_config implicitly.
|
||||
#
|
||||
# _configure_only=1
|
||||
|
||||
|
||||
#######
|
||||
# The directory where the kernel should be built
|
||||
#
|
||||
# Can be useful, for example, if you want to compile on a
|
||||
# tmpfs mount, which can speed up the compilation process
|
||||
#
|
||||
#_build_dir="${_build_dir:-$srcdir}"
|
||||
|
||||
|
||||
#######
|
||||
# Append the date to the localversion
|
||||
#
|
||||
# e.g. -ARCH -> -ARCH-20090422
|
||||
#
|
||||
#_date_localversion=0
|
||||
|
||||
|
||||
|
||||
#######
|
||||
# Set the pkgver to the kernel version
|
||||
# rather than the build date
|
||||
#
|
||||
# _kernel_pkgver=1
|
||||
|
||||
|
||||
#######
|
||||
# Save the .config file to package directory
|
||||
# as config.saved.$CARCH
|
||||
#
|
||||
_save_config=1
|
||||
|
||||
|
||||
#######
|
||||
# Do not compress kernel modules
|
||||
#
|
||||
_no_modules_compression=0
|
||||
|
||||
|
||||
#######
|
||||
# Make the kernel build process verbose
|
||||
#
|
||||
# _verbose=1
|
||||
|
||||
|
||||
# internal variables
|
||||
(( 1 )) && _kernel_src="$pkgname"
|
||||
#(( 1 )) && _kernel_src="$BUILDDIR/$(find . -maxdepth 1 -type d -name "linux-*" -printf "%f\n" | head -1)"
|
||||
#(( 1 )) && _kernel_src="$_build_dir/$pkgname_$"
|
||||
|
||||
|
||||
#######
|
||||
# define required functions
|
||||
|
||||
pkgver() {
|
||||
cd "$_kernel_src"
|
||||
git describe --always | sed 's/^v//;s/-/./g'
|
||||
}
|
||||
|
||||
# single package
|
||||
package() {
|
||||
eval package_$pkgbase-headers
|
||||
eval package_$pkgbase
|
||||
}
|
||||
|
||||
# split package functions
|
||||
eval "package_$pkgbase() { _generic_package_kernel; }"
|
||||
eval "package_$pkgbase-headers() { _generic_package_kernel-headers; }"
|
||||
eval "package_$pkgbase-docs() { _generic_package_kernel-docs; }"
|
||||
|
||||
|
||||
##############################
|
||||
# where the magic happens...
|
||||
##############################
|
||||
build() {
|
||||
cd "$_kernel_src"
|
||||
msg "Sanitizing source tree.."
|
||||
[[ -n $_gitrev ]] && git reset --hard "$_gitrev"
|
||||
# cleaning source trees
|
||||
git clean -f
|
||||
|
||||
#################
|
||||
# Apply patches
|
||||
#################
|
||||
msg "Applying patches..."
|
||||
local i patches
|
||||
for i in "${source[@]}"; do
|
||||
i=${i##*/}
|
||||
[[ $i =~ .*\.patch$ ]] && patches=("${patches[@]}" "$srcdir/$i")
|
||||
[[ ${i%.*} =~ .*\.patch$ ]] && patches=("${patches[@]}" "$srcdir/${i%.*}")
|
||||
done
|
||||
|
||||
shopt -s nullglob
|
||||
for i in "${patches[@]}" "$startdir/patches/"*; do
|
||||
msg2 "Applying ${i##*/}..."
|
||||
patch -Np1 -i "$i" || (error "Applying ${i##*/} failed" && return 1)
|
||||
done
|
||||
shopt -u nullglob
|
||||
|
||||
|
||||
#################
|
||||
# CONFIGURATION
|
||||
#################
|
||||
|
||||
#########################
|
||||
# Loading configuration
|
||||
#########################
|
||||
msg "Loading configuration..."
|
||||
for i in local "saved.$CARCH" "$CARCH"; do
|
||||
if [[ -e $startdir/config.$i ]]; then
|
||||
msg2 "Using kernel config file config.$i..."
|
||||
cp -f "$startdir/config.$i" .config
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
[[ ! -e .config ]] &&
|
||||
warning "No suitable kernel config file was found. You'll have to configure the kernel from scratch."
|
||||
|
||||
|
||||
###########################
|
||||
# Start the configuration
|
||||
###########################
|
||||
msg "Updating configuration..."
|
||||
yes "" | make config > /dev/null
|
||||
|
||||
if [[ -f "$startdir/config.saved.$CARCH" ]]; then
|
||||
msg2 "migrating previous config..."
|
||||
cp "$startdir/config.saved.$CARCH" .config
|
||||
make oldconfig
|
||||
else
|
||||
msg2 "migrating default config..."
|
||||
cp "$startdir/config.$CARCH" .config
|
||||
make oldconfig
|
||||
fi
|
||||
if [[ -n ${_config_cmd// /} ]]; then
|
||||
msg2 "Running make $_config_cmd..."
|
||||
make $_config_cmd
|
||||
else
|
||||
warning "Unknown config command: $_config_cmd"
|
||||
fi
|
||||
|
||||
##############################################
|
||||
# Save the config file the package directory
|
||||
##############################################
|
||||
if [[ -n $_save_config || -n $_configure_only ]]; then
|
||||
msg "Saving configuration..."
|
||||
msg2 "Saving $_kernel_src/.config as $startdir/config.saved.$CARCH"
|
||||
cp .config "$startdir/config.saved.$CARCH"
|
||||
fi
|
||||
|
||||
|
||||
#######################################
|
||||
# Stop after configuration if desired
|
||||
#######################################
|
||||
if [[ -n $_configure_only ]]; then
|
||||
rm -rf "$_kernel_src" "$srcdir" "$pkgdir"
|
||||
return 1
|
||||
fi
|
||||
|
||||
|
||||
###############################
|
||||
# Append date to localversion
|
||||
###############################
|
||||
if [[ -n $_date_localversion ]]; then
|
||||
local _localversion="$(sed -rn 's/^CONFIG_LOCALVERSION="([^"]*)"$/\1/p' .config)"
|
||||
[[ -n $_localversion ]] && msg2 "CONFIG_LOCALVERSION is set to: $_localversion"
|
||||
|
||||
# since this is a git package, the $pkgver is equal to $(date +%Y%m%d)
|
||||
msg2 "Appending $pkgver to CONFIG_LOCALVERSION..."
|
||||
sed -ri "s/^(CONFIG_LOCALVERSION=).*$/\1\"$_localversion-$pkgver\"/" .config
|
||||
fi
|
||||
|
||||
|
||||
|
||||
#################
|
||||
# BUILD PROCESS
|
||||
#################
|
||||
|
||||
################################
|
||||
# Build the kernel and modules
|
||||
################################
|
||||
msg "Building kernel and modules..."
|
||||
if [[ -n $_make_modules ]]; then
|
||||
make $MAKEFLAGS V="$_verbose" bzImage modules
|
||||
else
|
||||
make $MAKEFLAGS V="$_verbose" bzImage
|
||||
fi
|
||||
|
||||
############
|
||||
# CLEANUP
|
||||
############
|
||||
|
||||
###################################
|
||||
# Copy files from build directory
|
||||
####################################
|
||||
# if (( ! CLEANUP )) && [[ $_build_dir != $srcdir ]]; then
|
||||
# msg "Saving $_kernel_src to $srcdir/${_kernel_src##*/}..."
|
||||
# mv "$_kernel_src" "$srcdir"
|
||||
# rm -rf "$_kernel_src"
|
||||
# fi
|
||||
}
|
||||
|
||||
|
||||
_generic_package_initialization() {
|
||||
cd "$_kernel_src"
|
||||
|
||||
_karch="x86"
|
||||
|
||||
######################
|
||||
# Get kernel version
|
||||
######################
|
||||
_kernver=$(make kernelrelease)
|
||||
_basekernel=${_kernver%%-*}
|
||||
|
||||
############################################################
|
||||
# Use kernel version instead of the current date as pkgver
|
||||
############################################################
|
||||
if [[ -n $_kernel_pkgver ]]; then
|
||||
pkgver=${_kernver//-/_}
|
||||
msg "Setting pkgver to kernel version: $pkgver"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
_generic_package_kernel() {
|
||||
pkgdesc="The Linux Kernel and modules from Linus' git tree"
|
||||
depends=('coreutils' 'linux-firmware' 'module-init-tools' 'mkinitcpio')
|
||||
backup=(etc/mkinitcpio.d/$pkgname.preset)
|
||||
install=$pkgname.install
|
||||
changelog=$pkgname.changelog
|
||||
|
||||
# set required variables
|
||||
_generic_package_initialization
|
||||
|
||||
|
||||
#############################################################
|
||||
# Provide linux
|
||||
# (probably someone wants to use this kernel exclusively?)
|
||||
#############################################################
|
||||
provides=("${provides[@]}" "linux=${_kernver//-/_}")
|
||||
|
||||
################
|
||||
# INSTALLATION
|
||||
################
|
||||
|
||||
#####################
|
||||
# Install the image
|
||||
#####################
|
||||
msg "Installing kernel image..."
|
||||
install -Dm644 arch/$_karch/boot/bzImage "$pkgdir/boot/vmlinuz-$pkgname"
|
||||
|
||||
|
||||
##########################
|
||||
# Install kernel modules
|
||||
##########################
|
||||
if [[ -n $_make_modules ]]; then
|
||||
msg "Installing kernel modules..."
|
||||
make INSTALL_MOD_PATH="$pkgdir" modules_install
|
||||
[[ -z $_no_modules_compression ]] && find "$pkgdir" -name "*.ko"
|
||||
-exec gzip -9 {} +
|
||||
|
||||
|
||||
##################################
|
||||
# Create important symlinks
|
||||
##################################
|
||||
msg "Creating important symlinks..."
|
||||
|
||||
# Create generic modules symlink
|
||||
|
||||
if [[ $_kernver != ${_basekernel}${_pkgext} ]]; then
|
||||
cd "$pkgdir/lib/modules"
|
||||
ln -s "$_kernver" "${_basekernel}${_pkgext}"
|
||||
cd "$OLDPWD"
|
||||
|
||||
|
||||
# remove header symlinks
|
||||
cd "$pkgdir/lib/modules/$_kernver"
|
||||
rm -rf source build
|
||||
cd "$OLDPWD"
|
||||
fi
|
||||
fi
|
||||
|
||||
############################
|
||||
# Install mkinitcpio files
|
||||
############################
|
||||
install -d "$pkgdir/etc/mkinitcpio.d"
|
||||
|
||||
msg "Generating $pkgname.preset..."
|
||||
cat > "$pkgdir/etc/mkinitcpio.d/$pkgname.preset" <<EOF
|
||||
# mkinitcpio preset file for $pkgname
|
||||
|
||||
|
||||
ALL_config="/etc/mkinitcpio.conf"
|
||||
ALL_kver="/boot/vmlinuz-$pkgname"
|
||||
|
||||
PRESETS=('default')
|
||||
|
||||
#default_config="/etc/mkinitcpio.conf"
|
||||
default_image="/boot/initramfs-$pkgname.img"
|
||||
COMPRESSION="lz4" # since kernel 2.6.34
|
||||
|
||||
EOF
|
||||
|
||||
msg "Generating $pkgname.kver..."
|
||||
echo -e "# DO NOT EDIT THIS FILE\nALL_kver='$_kernver'" \
|
||||
> "$pkgdir/etc/mkinitcpio.d/$pkgname.kver"
|
||||
|
||||
|
||||
|
||||
#######################
|
||||
# Update install file
|
||||
#######################
|
||||
msg "Updating install file..."
|
||||
sed -ri "s/^(pkgname=).*$/\1$pkgname/" "$startdir/$pkgname.install"
|
||||
sed -ri "s/^(kernver=).*$/\1$_kernver/" "$startdir/$pkgname.install"
|
||||
|
||||
|
||||
|
||||
#######################
|
||||
# Remove the firmware
|
||||
#######################
|
||||
|
||||
# remove the firmware
|
||||
rm -rf "${pkgdir}/lib/firmware"
|
||||
if [[ -n $_make_modules ]]; then
|
||||
# Now we call depmod...
|
||||
depmod -b "${pkgdir}" -F System.map "${_kernver}"
|
||||
|
||||
# move module tree /lib -> /usr/lib
|
||||
mkdir -p "${pkgdir}/usr"
|
||||
mv "${pkgdir}/lib" "${pkgdir}/usr/"
|
||||
fi
|
||||
|
||||
}
|
||||
|
||||
|
||||
_generic_package_kernel-headers() {
|
||||
pkgdesc="Header files and scripts for building modules for $pkgbase"
|
||||
depends=("$pkgbase")
|
||||
|
||||
# set required variables
|
||||
_generic_package_initialization
|
||||
|
||||
#############################################################
|
||||
# Provide linux-headers
|
||||
# (probably someone wants to use this kernel exclusively?)
|
||||
#############################################################
|
||||
provides=("${provides[@]}" "linux-headers=${_kernver//-/_}")
|
||||
|
||||
|
||||
##############################
|
||||
# Install fake kernel source
|
||||
##############################
|
||||
install -Dm644 Module.symvers "$pkgdir/usr/src/linux-$_kernver/Module.symvers"
|
||||
install -Dm644 Makefile "$pkgdir/usr/src/linux-$_kernver/Makefile"
|
||||
install -Dm644 kernel/Makefile "$pkgdir/usr/src/linux-$_kernver/kernel/Makefile"
|
||||
install -Dm644 .config "$pkgdir/usr/lib/modules/$_kernver/.config"
|
||||
|
||||
|
||||
|
||||
#######################################################
|
||||
# Install scripts directory and fix permissions on it
|
||||
#######################################################
|
||||
cp -a scripts "$pkgdir/usr/src/linux-$_kernver"
|
||||
|
||||
|
||||
##########################
|
||||
# Install header files
|
||||
##########################
|
||||
msg "Installing header files..."
|
||||
|
||||
for i in net/ipv4/netfilter/ipt_CLUSTERIP.c \
|
||||
$(find include/ net/mac80211/ drivers/{md,media/video/} -iname "*.h") \
|
||||
$(find include/config/ -type f) \
|
||||
$(find . -name "Kconfig*")
|
||||
do
|
||||
mkdir -p "$pkgdir/usr/src/linux-$_kernver/${i%/*}"
|
||||
cp -af "$i" "$pkgdir/usr/src/linux-$_kernver/$i"
|
||||
done
|
||||
|
||||
# required by virtualbox and probably others
|
||||
ln -s "../generated/autoconf.h" "$pkgdir/usr/src/linux-$_kernver/include/linux/"
|
||||
|
||||
|
||||
########################################
|
||||
# Install architecture dependent files
|
||||
########################################
|
||||
msg "Installing architecture files..."
|
||||
mkdir -p "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/kernel"
|
||||
cp -a arch/$_karch/kernel/asm-offsets.s "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/kernel/"
|
||||
|
||||
cp -a arch/$_karch/Makefile* "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/"
|
||||
cp -a arch/$_karch/configs "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/"
|
||||
|
||||
# copy arch includes for external modules and fix the nVidia issue
|
||||
mkdir -p "$pkgdir/usr/src/linux-$_kernver/arch/$_karch"
|
||||
cp -a "arch/$_karch/include" "$pkgdir/usr/src/linux-$_kernver/arch/$_karch/"
|
||||
|
||||
# create a necessary symlink to the arch folder
|
||||
cd "$pkgdir/usr/src/linux-$_kernver/arch"
|
||||
|
||||
if [[ $CARCH = "x86_64" ]]; then
|
||||
ln -s $_karch x86_64
|
||||
else
|
||||
ln -s $_karch i386
|
||||
fi
|
||||
|
||||
cd "$OLDPWD"
|
||||
|
||||
|
||||
################################
|
||||
# Remove unneeded architecures
|
||||
################################
|
||||
msg "Removing unneeded architectures..."
|
||||
for i in "$pkgdir/usr/src/linux-$_kernver/arch/"*; do
|
||||
[[ ${i##*/} =~ ($_karch|Kconfig) ]] || rm -rf "$i"
|
||||
done
|
||||
|
||||
|
||||
############################
|
||||
# Remove .gitignore files
|
||||
############################
|
||||
msg "Removing .gitignore files from kernel source..."
|
||||
find "$pkgdir/usr/src/linux-$_kernver/" -name ".gitignore" -delete
|
||||
|
||||
|
||||
##################################
|
||||
# Create important symlinks
|
||||
##################################
|
||||
msg "Creating important symlinks..."
|
||||
|
||||
# the build symlink needs to be relative
|
||||
if [[ -n $_make_modules ]]; then
|
||||
cd "$pkgdir/usr/lib/modules/$_kernver"
|
||||
rm -rf source build
|
||||
ln -s "/usr/src/linux-$_kernver" build
|
||||
cd "$OLDPWD"
|
||||
fi
|
||||
|
||||
if [[ $_kernver != ${_basekernver}${_pkgext} ]]; then
|
||||
cd "$pkgdir/usr/src"
|
||||
ln -s "linux-$_kernver" "linux-${_basekernel}${_pkgext}"
|
||||
cd "$OLDPWD"
|
||||
fi
|
||||
}
|
||||
|
||||
_generic_package_kernel-docs() {
|
||||
pkgdesc="Kernel hackers manual - HTML documentation that comes with the Linux kernel."
|
||||
depends=("$pkgbase")
|
||||
|
||||
# set required variables
|
||||
_generic_package_initialization
|
||||
|
||||
mkdir -p "$pkgdir/usr/src/linux-$_kernver"
|
||||
cp -a Documentation "$pkgdir/usr/src/linux-$_kernver/"
|
||||
}
|
||||
|
||||
# vim: set fenc=utf-8 ts=2 sw=2 noet:
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,163 @@
|
|||
2013-07-20
|
||||
* added CONFIG_SND_HDA_I915=y to kernel config
|
||||
* updated kernel config files to latest linux configs (3.10.1-1)
|
||||
2013-04-15
|
||||
* adjusted PKGBUILD to make use of pacman 4.1's vcs functionality
|
||||
* updated kernel config files to latest linux configs (3.8.7-1)
|
||||
2013-03-28
|
||||
* added bc to makedepends
|
||||
* updated kernel config files to latest linux configs (3.8.4-1)
|
||||
2012-12-12
|
||||
* updated kernel config files to latest linux configs (3.7-1)
|
||||
2012-09-12
|
||||
* added support for arch's kmod 10-1 package (thanks to gun26)
|
||||
* updated kernel config files to latest linux configs (3.5.3-1)
|
||||
2012-07-22
|
||||
* unpatched kernels do not get the -dirty suffix appended to their version anymore
|
||||
* updated kernel config files to latest linux configs (3.5.0-1)
|
||||
2012-07-04
|
||||
* updated kernel config files to latest linux configs (3.4.1-1)
|
||||
* modules are now installed to /usr/lib instead of /lib
|
||||
2012-03-19
|
||||
* updated kernel config files to latest linux configs (3.3-1)
|
||||
2012-01-05
|
||||
* updated kernel config files to latest linux configs (3.2-1)
|
||||
2011-11-05
|
||||
* fixed kernel version in extramodules directory name (thanks to gun26)
|
||||
2011-10-31
|
||||
* removed System.map file (https://bugs.archlinux.org/task/25247)
|
||||
* added extramodules directory (might not be very useful for git kernels,
|
||||
but for stable ones)
|
||||
* append pkgrel to kernel version for stable kernels
|
||||
* updated kernel config files to latest linux configs (3.1-1)
|
||||
2011-07-24
|
||||
* new variable _gitrev that can be used to build a sepcific tag, commit or branch
|
||||
* reworked mkinitcpio preset file
|
||||
* removed kernel26{,-headers} from provides array because arch's linux
|
||||
package conflicts with these
|
||||
2011-07-23
|
||||
* disabled file stripping
|
||||
* renamed initramfs file from /boot/$pkgname.img to /boot/initramfs-$pkgname.img
|
||||
* updated kernel config files to latest linux configs (3.0-1)
|
||||
2011-07-22
|
||||
* make sure the src directory is moved to $srcdir after finishing the build
|
||||
step when the _build_dir config option is in use
|
||||
* changed repository source url to linux.git (was linux-2.6.git) as a
|
||||
result of the 3.0 release
|
||||
2011-07-13
|
||||
* fixed issue with mkinitcpio that led to broken images because the kernel
|
||||
modules could not be found (solved by running depmod before mkinitcpio)
|
||||
* removed kver file
|
||||
* removed arch linux logo
|
||||
* updated kernel config files to latest kernel26 configs (2.6.39.3-1)
|
||||
2011-05-30
|
||||
* renamed kernel26-git to linux-git
|
||||
* worked around make 3.82 bug
|
||||
2011-05-25
|
||||
* make kernel26$_pkgext-{headers,docs} depend on $pkgbase and not on itself
|
||||
2011-05-24
|
||||
* updated kernel config files to latest kernel26 configs (2.6.39-1)
|
||||
2011-01-31
|
||||
* reworked kernel header package generation
|
||||
* support for split (default) and single package kernels
|
||||
* added -docs package
|
||||
* honour MAKEFLAGS
|
||||
2011-01-30
|
||||
* compress kernel modules by default
|
||||
* updated kernel config files to latest kernel26 configs (2.6.37-5)
|
||||
* added crypto and xen headers
|
||||
2010-10-05
|
||||
* Kconfig file was unintentionally removed from /usr/src/linux-$kver/arch (thanks to Kariddi)
|
||||
* updated kernel config files to latest kernel26 configs (2.6.35.7)
|
||||
2010-08-18
|
||||
* removed zc0301 from header copy list (driver was removed: 0d58cef664e01fb1848833455bfdbe1a3d91044c)
|
||||
* updated kernel config files to latest kernel26 configs (2.6.35.2)
|
||||
2010-06-17
|
||||
* updated kernel config files to latest kernel26 configs (2.6.34)
|
||||
* replaced kernel26-firmware-git dependency with linux-firmware-git
|
||||
* introduced new variable _kernel_src that contains the directory where the
|
||||
kernel is acutally built
|
||||
2010-04-07
|
||||
* make config options configurable via command line
|
||||
* updated kernel config files to latest kernel26 configs (2.6.33.2)
|
||||
* removed /boot/kconfig26$_pkgext
|
||||
* replaced dynamic kernel version detection with a static string which is
|
||||
updated at build time
|
||||
* generate the *.preset file from the PKGBUILD
|
||||
* introduced new variable _pkgext to make it easier to adapt this PKGBUILD
|
||||
to other kernel sources
|
||||
2010-01-12
|
||||
* fixed abort when patches directory exists but is empty
|
||||
2009-12-24
|
||||
* included some new required kernel header files
|
||||
* save build directory to $srcdir when building without makepkg's -c flag
|
||||
and if $_build_dir != $srcdir
|
||||
* fixed kernel version detection in install file (again... replaced `\s`
|
||||
with `[:space:]`)
|
||||
2009-12-22
|
||||
* fixed kernel version detection in install file
|
||||
* removed asm-$_karch references
|
||||
* some code cleanup
|
||||
2009-12-05
|
||||
* added kernel26-headers to provides
|
||||
2009-12-04
|
||||
* updated kernel config files to latest kernel26 configs (2.6.32)
|
||||
* reworked install scriptlet
|
||||
2009-11-13
|
||||
* added support for all available configuration commands of the kernel
|
||||
* cleaned up the code
|
||||
2009-10-25
|
||||
* added changelog variable for pacman's next major release
|
||||
* make use of new package function to reduce fakeroot usage
|
||||
* added a warning about initial download size
|
||||
2009-10-09
|
||||
* removed .gitignore files from source tree
|
||||
* updated kernel config files to latest kernel26 configs (2.6.31.3)
|
||||
2009-09-10
|
||||
* updated kernel config files to latest kernel26 configs (2.6.31)
|
||||
2009-08-15
|
||||
* added CONFIG_MEDIA_SUPPORT=m to config files
|
||||
2009-08-14
|
||||
* Since pacman 3.3.0, makepkg starts in $srcdir and not in $startdir
|
||||
anymore. In order to get the kernel sources into $startdir again, it
|
||||
is required to change to this directory
|
||||
* updated kernel config files to latest kernel26 configs (2.6.30.4)
|
||||
2009-06-11
|
||||
* updated kernel config files to latest kernel26 configs (2.6.30)
|
||||
2009-04-26
|
||||
* added new configure option _build_dir
|
||||
* moved the 'patches' folder from $srcdir to $startdir
|
||||
* moved the git repository clone from $srcdir to $startdir
|
||||
* made the linux-2.6 directory a bare repository, which reduces the folder
|
||||
size by more than the half
|
||||
* renamed cloned git repository from kernel26-git to linux-2.6.git
|
||||
2009-04-24
|
||||
* documented configure option _verbose which makes the kernel building
|
||||
process verbose
|
||||
* added a check for an existing package when using _kernel_pkgver to avoid
|
||||
silent overwrites
|
||||
* cleaned up the PKGBUILD
|
||||
* remove the git changelog generation functionality
|
||||
* replace dashes in kernel version with underscores instead of periods
|
||||
* separate date from localversion by a dash when using _date_localversion
|
||||
* provide kernel26
|
||||
* some more documentation
|
||||
2009-04-22
|
||||
* use arch's kernel config files as default
|
||||
* removed patches
|
||||
* added possibility to patch the source without modifying the PKGBUILD
|
||||
* started to make PKGBUILD more readable
|
||||
* documented PKGBUILD configure options
|
||||
* introduced new configure option '_configure_only' which stops the building
|
||||
process after the kernel configuration
|
||||
2009-04-21
|
||||
* moved firmware from package into new dependency kernel26-firmware-git,
|
||||
which makes it possible to install kernel26-git beside arch's stock kernel
|
||||
* fetch latest git changes when there is already a local repository
|
||||
* added some "|| return 1"
|
||||
* kernel26-git.install: extract required kernel version from package files
|
||||
rather than specify it explicitly
|
||||
* renamed config to config.i686
|
||||
* replaced $startdir with $srcdir and $pkgdir
|
||||
* added quotes where necessary
|
||||
* cleaned up the checkout part
|
|
@ -0,0 +1,37 @@
|
|||
pkgname=linux-spica
|
||||
kernver=4.10.8spica-dirty
|
||||
#bootdevice="BOOT_IMAGE=/boot/vmlinuz-$pkgname root=UUID=d670564f-2cb3-4981-9d51-6ed9c1327d47"
|
||||
#option="rw quiet clocksource=hpet initrd=EFI/spi-ca/initrd intel_iommu=on pci-stub.ids=1002:683f,1002:aab0 vfio_iommu_type1.allow_unsafe_interrupts=1,kvm.ignore_msrs=1"
|
||||
#option="rw quiet clocksource=hpet initrd=EFI/spi-ca/initrd quiet intremap=no_x2apic_optout zswap.enabled=1 zswap.max_pool_percent=25 zswap.compressor=lz4"
|
||||
post_install () {
|
||||
echo ">"
|
||||
echo "> Updating module dependencies. Please wait ..."
|
||||
depmod $kernver
|
||||
|
||||
echo ">"
|
||||
echo "> Generating initramfs, using mkinitcpio. Please wait..."
|
||||
echo ">"
|
||||
mkinitcpio -p $pkgname
|
||||
# echo "> Modifing efibootmgr..."
|
||||
# efibootmgr|awk 'match($0,/^Boot([0-9a-fA-F]{4})\* spi-ca_v(.+)$/,m){printf "efibootmgr -b %s -B;echo \">> remove entry : %s\";",m[1],m[2]}'|sh
|
||||
# echo "> Copy efistub from boot"
|
||||
# cp -fv "boot/vmlinuz-$pkgname" "boot/efi/EFI/spi-ca/kernel.efi"
|
||||
# cp -fv "boot/initramfs-$pkgname.img" "boot/efi/EFI/spi-ca/initrd"
|
||||
# echo "> Registering efistub "
|
||||
#echo 'efibootmgr -c -g -d /dev/sda -p 1 -L "spi-ca_v$kernver" -l "\EFI\spi-ca\kernel.efi" #-u "$bootdevice $option"'
|
||||
# efibootmgr -c -g -d /dev/sde -p 1 -L "spi-ca_v$kernver" -l "\EFI\spi-ca\kernel" # -u "$bootdevice $option"
|
||||
# echo "> Reordering Bootorder..."
|
||||
# newentry=`efibootmgr|awk 'match($0,/^Boot([0-9a-fA-F]{4})\* spi-ca_v(.+)$/,m){print m[1]}'`
|
||||
# prebootorder=`efibootmgr |grep BootOrder |cut -d : -f 2 |tr -d ' '`
|
||||
# efibootmgr -O
|
||||
# efibootmgr -o ${newentry},${prebootorder}
|
||||
echo "> OK!"
|
||||
}
|
||||
|
||||
post_upgrade() {
|
||||
post_install
|
||||
}
|
||||
|
||||
post_remove() {
|
||||
rm -f -- "boot/initramfs-$pkgname.img"
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
From 22ee35ec82fa543b65c1b6d516a086a21f723846 Mon Sep 17 00:00:00 2001
|
||||
From: Paolo Valente <paolo.valente@unimore.it>
|
||||
Date: Tue, 7 Apr 2015 13:39:12 +0200
|
||||
Subject: [PATCH 1/4] block: cgroups, kconfig, build bits for BFQ-v7r11-4.7.0
|
||||
|
||||
Update Kconfig.iosched and do the related Makefile changes to include
|
||||
kernel configuration options for BFQ. Also increase the number of
|
||||
policies supported by the blkio controller so that BFQ can add its
|
||||
own.
|
||||
|
||||
Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
|
||||
Signed-off-by: Arianna Avanzini <avanzini@google.com>
|
||||
---
|
||||
block/Kconfig.iosched | 32 ++++++++++++++++++++++++++++++++
|
||||
block/Makefile | 1 +
|
||||
include/linux/blkdev.h | 2 +-
|
||||
3 files changed, 34 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
|
||||
index 421bef9..0ee5f0f 100644
|
||||
--- a/block/Kconfig.iosched
|
||||
+++ b/block/Kconfig.iosched
|
||||
@@ -39,6 +39,27 @@ config CFQ_GROUP_IOSCHED
|
||||
---help---
|
||||
Enable group IO scheduling in CFQ.
|
||||
|
||||
+config IOSCHED_BFQ
|
||||
+ tristate "BFQ I/O scheduler"
|
||||
+ default n
|
||||
+ ---help---
|
||||
+ The BFQ I/O scheduler tries to distribute bandwidth among
|
||||
+ all processes according to their weights.
|
||||
+ It aims at distributing the bandwidth as desired, independently of
|
||||
+ the disk parameters and with any workload. It also tries to
|
||||
+ guarantee low latency to interactive and soft real-time
|
||||
+ applications. If compiled built-in (saying Y here), BFQ can
|
||||
+ be configured to support hierarchical scheduling.
|
||||
+
|
||||
+config CGROUP_BFQIO
|
||||
+ bool "BFQ hierarchical scheduling support"
|
||||
+ depends on CGROUPS && IOSCHED_BFQ=y
|
||||
+ default n
|
||||
+ ---help---
|
||||
+ Enable hierarchical scheduling in BFQ, using the cgroups
|
||||
+ filesystem interface. The name of the subsystem will be
|
||||
+ bfqio.
|
||||
+
|
||||
choice
|
||||
prompt "Default I/O scheduler"
|
||||
default DEFAULT_CFQ
|
||||
@@ -52,6 +73,16 @@ choice
|
||||
config DEFAULT_CFQ
|
||||
bool "CFQ" if IOSCHED_CFQ=y
|
||||
|
||||
+ config DEFAULT_BFQ
|
||||
+ bool "BFQ" if IOSCHED_BFQ=y
|
||||
+ help
|
||||
+ Selects BFQ as the default I/O scheduler which will be
|
||||
+ used by default for all block devices.
|
||||
+ The BFQ I/O scheduler aims at distributing the bandwidth
|
||||
+ as desired, independently of the disk parameters and with
|
||||
+ any workload. It also tries to guarantee low latency to
|
||||
+ interactive and soft real-time applications.
|
||||
+
|
||||
config DEFAULT_NOOP
|
||||
bool "No-op"
|
||||
|
||||
@@ -61,6 +92,7 @@ config DEFAULT_IOSCHED
|
||||
string
|
||||
default "deadline" if DEFAULT_DEADLINE
|
||||
default "cfq" if DEFAULT_CFQ
|
||||
+ default "bfq" if DEFAULT_BFQ
|
||||
default "noop" if DEFAULT_NOOP
|
||||
|
||||
endmenu
|
||||
diff --git a/block/Makefile b/block/Makefile
|
||||
index 9eda232..4a36683 100644
|
||||
--- a/block/Makefile
|
||||
+++ b/block/Makefile
|
||||
@@ -18,6 +18,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
|
||||
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
|
||||
obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
|
||||
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
|
||||
+obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o
|
||||
|
||||
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
|
||||
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
|
||||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
|
||||
index 3d9cf32..8d862a0 100644
|
||||
--- a/include/linux/blkdev.h
|
||||
+++ b/include/linux/blkdev.h
|
||||
@@ -45,7 +45,7 @@ struct pr_ops;
|
||||
* Maximum number of blkcg policies allowed to be registered concurrently.
|
||||
* Defined here to simplify include dependency.
|
||||
*/
|
||||
-#define BLKCG_MAX_POLS 2
|
||||
+#define BLKCG_MAX_POLS 3
|
||||
|
||||
struct request;
|
||||
typedef void (rq_end_io_fn)(struct request *, int);
|
||||
--
|
||||
1.9.1
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,708 @@
|
|||
BLD changes for Linux kernel version 4.7
|
||||
|
||||
---
|
||||
|
||||
diff --git a/init/Kconfig b/init/Kconfig
|
||||
index c02d897..edf8697 100644
|
||||
--- a/init/Kconfig
|
||||
+++ b/init/Kconfig
|
||||
@@ -36,6 +36,15 @@ config BROKEN_ON_SMP
|
||||
depends on BROKEN || !SMP
|
||||
default y
|
||||
|
||||
+config BLD
|
||||
+ bool "An alternate CPU load distribution technique for task scheduler"
|
||||
+ depends on SMP
|
||||
+ default y
|
||||
+ help
|
||||
+ This is an alternate CPU load distribution technique based for task
|
||||
+ scheduler based on The Barbershop Load Distribution algorithm. Not
|
||||
+ suitable for NUMA, should work well on SMP.
|
||||
+
|
||||
config INIT_ENV_ARG_LIMIT
|
||||
int
|
||||
default 32 if !UML
|
||||
diff --git a/kernel/sched/bld.h b/kernel/sched/bld.h
|
||||
new file mode 100644
|
||||
index 0000000..f1f9fba
|
||||
--- /dev/null
|
||||
+++ b/kernel/sched/bld.h
|
||||
@@ -0,0 +1,215 @@
|
||||
+#ifdef CONFIG_BLD
|
||||
+
|
||||
+static DEFINE_RWLOCK(rt_list_lock);
|
||||
+static LIST_HEAD(rt_rq_head);
|
||||
+static LIST_HEAD(cfs_rq_head);
|
||||
+static DEFINE_RWLOCK(cfs_list_lock);
|
||||
+
|
||||
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
+static inline struct rq *rq_of_cfs(struct cfs_rq *cfs_rq)
|
||||
+{
|
||||
+ return cfs_rq->rq;
|
||||
+}
|
||||
+#else
|
||||
+static inline struct rq *rq_of_cfs(struct cfs_rq *cfs_rq)
|
||||
+{
|
||||
+ return container_of(cfs_rq, struct rq, cfs);
|
||||
+}
|
||||
+#endif
|
||||
+
|
||||
+#ifdef CONFIG_RT_GROUP_SCHED
|
||||
+static inline struct rq *rq_of_rt(struct rt_rq *rt_rq)
|
||||
+{
|
||||
+ return rt_rq->rq;
|
||||
+}
|
||||
+#else
|
||||
+static inline struct rq *rq_of_rt(struct rt_rq *rt_rq)
|
||||
+{
|
||||
+ return container_of(rt_rq, struct rq, rt);
|
||||
+}
|
||||
+#endif
|
||||
+
|
||||
+static int select_cpu_for_wakeup(int task_type, struct cpumask *mask)
|
||||
+{
|
||||
+ int cpu = smp_processor_id(), i;
|
||||
+ unsigned long load, varload;
|
||||
+ struct rq *rq;
|
||||
+
|
||||
+ if (task_type) {
|
||||
+ varload = ULONG_MAX;
|
||||
+ for_each_cpu(i, mask) {
|
||||
+ rq = cpu_rq(i);
|
||||
+ load = rq->cfs.load.weight;
|
||||
+ if (load < varload) {
|
||||
+ varload = load;
|
||||
+ cpu = i;
|
||||
+ }
|
||||
+ }
|
||||
+ } else {
|
||||
+ /* Here's an attempt to get a CPU within the mask where
|
||||
+ * we can preempt easily. To achieve this we tried to
|
||||
+ * maintain a lowbit, which indicate the lowest bit set on
|
||||
+ * array bitmap. Since all CPUs contains high priority
|
||||
+ * kernel threads therefore we eliminate 0, so it might not
|
||||
+ * be right every time, but it's just an indicator.
|
||||
+ */
|
||||
+ varload = 1;
|
||||
+
|
||||
+ for_each_cpu(i, mask) {
|
||||
+ rq = cpu_rq(i);
|
||||
+ load = rq->rt.lowbit;
|
||||
+ if (load >= varload) {
|
||||
+ varload = load;
|
||||
+ cpu = i;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ return cpu;
|
||||
+}
|
||||
+
|
||||
+static int bld_pick_cpu_cfs(struct task_struct *p, int sd_flags, int wake_flags)
|
||||
+{
|
||||
+ struct cfs_rq *cfs;
|
||||
+ unsigned long flags;
|
||||
+ unsigned int cpu = smp_processor_id();
|
||||
+
|
||||
+ read_lock_irqsave(&cfs_list_lock, flags);
|
||||
+ list_for_each_entry(cfs, &cfs_rq_head, bld_cfs_list) {
|
||||
+ cpu = cpu_of(rq_of_cfs(cfs));
|
||||
+ if (cpu_online(cpu))
|
||||
+ break;
|
||||
+ }
|
||||
+ read_unlock_irqrestore(&cfs_list_lock, flags);
|
||||
+ return cpu;
|
||||
+}
|
||||
+
|
||||
+static int bld_pick_cpu_rt(struct task_struct *p, int sd_flags, int wake_flags)
|
||||
+{
|
||||
+ struct rt_rq *rt;
|
||||
+ unsigned long flags;
|
||||
+ unsigned int cpu = smp_processor_id();
|
||||
+
|
||||
+ read_lock_irqsave(&rt_list_lock, flags);
|
||||
+ list_for_each_entry(rt, &rt_rq_head, bld_rt_list) {
|
||||
+ cpu = cpu_of(rq_of_rt(rt));
|
||||
+ if (cpu_online(cpu))
|
||||
+ break;
|
||||
+ }
|
||||
+ read_unlock_irqrestore(&rt_list_lock, flags);
|
||||
+ return cpu;
|
||||
+}
|
||||
+
|
||||
+static int bld_pick_cpu_domain(struct task_struct *p, int sd_flags, int wake_flags)
|
||||
+{
|
||||
+ unsigned int cpu = smp_processor_id(), want_affine = 0;
|
||||
+ struct cpumask *tmpmask;
|
||||
+
|
||||
+ if (p->nr_cpus_allowed == 1)
|
||||
+ return task_cpu(p);
|
||||
+
|
||||
+ if (sd_flags & SD_BALANCE_WAKE) {
|
||||
+ if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
|
||||
+ want_affine = 1;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (want_affine)
|
||||
+ tmpmask = tsk_cpus_allowed(p);
|
||||
+ else
|
||||
+ tmpmask = sched_domain_span(cpu_rq(task_cpu(p))->sd);
|
||||
+
|
||||
+ if (rt_task(p))
|
||||
+ cpu = select_cpu_for_wakeup(0, tmpmask);
|
||||
+ else
|
||||
+ cpu = select_cpu_for_wakeup(1, tmpmask);
|
||||
+
|
||||
+ return cpu;
|
||||
+}
|
||||
+
|
||||
+static void track_load_rt(struct rq *rq, struct task_struct *p)
|
||||
+{
|
||||
+ unsigned long flag;
|
||||
+ int firstbit;
|
||||
+ struct rt_rq *first;
|
||||
+ struct rt_prio_array *array = &rq->rt.active;
|
||||
+
|
||||
+ first = list_entry(rt_rq_head.next, struct rt_rq, bld_rt_list);
|
||||
+ firstbit = sched_find_first_bit(array->bitmap);
|
||||
+
|
||||
+ /* Maintaining rt.lowbit */
|
||||
+ if (firstbit > 0 && firstbit <= rq->rt.lowbit)
|
||||
+ rq->rt.lowbit = firstbit;
|
||||
+
|
||||
+ if (rq->rt.lowbit < first->lowbit) {
|
||||
+ write_lock_irqsave(&rt_list_lock, flag);
|
||||
+ list_del(&rq->rt.bld_rt_list);
|
||||
+ list_add_tail(&rq->rt.bld_rt_list, &rt_rq_head);
|
||||
+ write_unlock_irqrestore(&rt_list_lock, flag);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static int bld_get_cpu(struct task_struct *p, int sd_flags, int wake_flags)
|
||||
+{
|
||||
+ unsigned int cpu;
|
||||
+
|
||||
+ if (sd_flags == SD_BALANCE_WAKE || (sd_flags == SD_BALANCE_EXEC && (get_nr_threads(p) > 1)))
|
||||
+ cpu = bld_pick_cpu_domain(p, sd_flags, wake_flags);
|
||||
+ else {
|
||||
+ if (rt_task(p))
|
||||
+ cpu = bld_pick_cpu_rt(p, sd_flags, wake_flags);
|
||||
+ else
|
||||
+ cpu = bld_pick_cpu_cfs(p, sd_flags, wake_flags);
|
||||
+ }
|
||||
+
|
||||
+ return cpu;
|
||||
+}
|
||||
+
|
||||
+static void bld_track_load_activate(struct rq *rq, struct task_struct *p)
|
||||
+{
|
||||
+ unsigned long flag;
|
||||
+ if (rt_task(p)) {
|
||||
+ track_load_rt(rq, p);
|
||||
+ } else {
|
||||
+ if (rq->cfs.pos != 2) {
|
||||
+ struct cfs_rq *last;
|
||||
+ last = list_entry(cfs_rq_head.prev, struct cfs_rq, bld_cfs_list);
|
||||
+ if (rq->cfs.load.weight >= last->load.weight) {
|
||||
+ write_lock_irqsave(&cfs_list_lock, flag);
|
||||
+ list_del(&rq->cfs.bld_cfs_list);
|
||||
+ list_add_tail(&rq->cfs.bld_cfs_list, &cfs_rq_head);
|
||||
+ rq->cfs.pos = 2; last->pos = 1;
|
||||
+ write_unlock_irqrestore(&cfs_list_lock, flag);
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static void bld_track_load_deactivate(struct rq *rq, struct task_struct *p)
|
||||
+{
|
||||
+ unsigned long flag;
|
||||
+ if (rt_task(p)) {
|
||||
+ track_load_rt(rq, p);
|
||||
+ } else {
|
||||
+ if (rq->cfs.pos != 0) {
|
||||
+ struct cfs_rq *first;
|
||||
+ first = list_entry(cfs_rq_head.next, struct cfs_rq, bld_cfs_list);
|
||||
+ if (rq->cfs.load.weight <= first->load.weight) {
|
||||
+ write_lock_irqsave(&cfs_list_lock, flag);
|
||||
+ list_del(&rq->cfs.bld_cfs_list);
|
||||
+ list_add(&rq->cfs.bld_cfs_list, &cfs_rq_head);
|
||||
+ rq->cfs.pos = 0; first->pos = 1;
|
||||
+ write_unlock_irqrestore(&cfs_list_lock, flag);
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
+#else
|
||||
+static inline void bld_track_load_activate(struct rq *rq, struct task_struct *p)
|
||||
+{
|
||||
+}
|
||||
+
|
||||
+static inline void bld_track_load_deactivate(struct rq *rq, struct task_struct *p)
|
||||
+{
|
||||
+}
|
||||
+#endif /* CONFIG_BLD */
|
||||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
||||
index 97ee9ac..b2ddabc 100644
|
||||
--- a/kernel/sched/core.c
|
||||
+++ b/kernel/sched/core.c
|
||||
@@ -24,6 +24,8 @@
|
||||
* 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
|
||||
* 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
|
||||
* Thomas Gleixner, Mike Kravetz
|
||||
+ * 2012-Feb The Barbershop Load Distribution (BLD) algorithm - an alternate
|
||||
+ * CPU load distribution technique for kernel scheduler by Rakib Mullick.
|
||||
*/
|
||||
|
||||
#include <linux/kasan.h>
|
||||
@@ -86,6 +88,7 @@
|
||||
#include "sched.h"
|
||||
#include "../workqueue_internal.h"
|
||||
#include "../smpboot.h"
|
||||
+#include "bld.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/sched.h>
|
||||
@@ -750,6 +753,8 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
if (!(flags & ENQUEUE_RESTORE))
|
||||
sched_info_queued(rq, p);
|
||||
p->sched_class->enqueue_task(rq, p, flags);
|
||||
+ if (!dl_task(p))
|
||||
+ bld_track_load_activate(rq, p);
|
||||
}
|
||||
|
||||
static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
@@ -758,6 +763,8 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
if (!(flags & DEQUEUE_SAVE))
|
||||
sched_info_dequeued(rq, p);
|
||||
p->sched_class->dequeue_task(rq, p, flags);
|
||||
+ if (!dl_task(p))
|
||||
+ bld_track_load_deactivate(rq, p);
|
||||
}
|
||||
|
||||
void activate_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
@@ -1587,11 +1594,17 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
|
||||
{
|
||||
lockdep_assert_held(&p->pi_lock);
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
if (tsk_nr_cpus_allowed(p) > 1)
|
||||
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
|
||||
else
|
||||
cpu = cpumask_any(tsk_cpus_allowed(p));
|
||||
-
|
||||
+#else
|
||||
+ if (dl_task(p))
|
||||
+ cpu = dl_sched_class.select_task_rq(p, cpu, sd_flags, wake_flags);
|
||||
+ else
|
||||
+ cpu = bld_get_cpu(p, sd_flags, wake_flags);
|
||||
+#endif
|
||||
/*
|
||||
* In order not to call set_task_cpu() on a blocking task we need
|
||||
* to rely on ttwu() to place the task on a valid ->cpus_allowed
|
||||
@@ -1794,7 +1807,11 @@ void scheduler_ipi(void)
|
||||
*/
|
||||
preempt_fold_need_resched();
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
|
||||
+#else
|
||||
+ if (llist_empty(&this_rq()->wake_list))
|
||||
+#endif
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -1816,13 +1833,16 @@ void scheduler_ipi(void)
|
||||
/*
|
||||
* Check if someone kicked us for doing the nohz idle load balance.
|
||||
*/
|
||||
+#ifndef CONFIG_BLD
|
||||
if (unlikely(got_nohz_idle_kick())) {
|
||||
this_rq()->idle_balance = 1;
|
||||
raise_softirq_irqoff(SCHED_SOFTIRQ);
|
||||
}
|
||||
+#endif
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
@@ -1836,6 +1856,7 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
|
||||
trace_sched_wake_idle_without_ipi(cpu);
|
||||
}
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
void wake_up_if_idle(int cpu)
|
||||
{
|
||||
@@ -1872,7 +1893,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
struct pin_cookie cookie;
|
||||
|
||||
-#if defined(CONFIG_SMP)
|
||||
+#if defined(CONFIG_SMP) && !defined(CONFIG_BLD)
|
||||
if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
|
||||
sched_clock_cpu(cpu); /* sync clocks x-cpu */
|
||||
ttwu_queue_remote(p, cpu, wake_flags);
|
||||
@@ -2394,7 +2415,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
||||
* Silence PROVE_RCU.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||
- set_task_cpu(p, cpu);
|
||||
+ __set_task_cpu(p, cpu);
|
||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
|
||||
#ifdef CONFIG_SCHED_INFO
|
||||
@@ -2941,7 +2962,14 @@ void sched_exec(void)
|
||||
int dest_cpu;
|
||||
|
||||
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||
+#ifndef CONFIG_BLD
|
||||
dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
|
||||
+#else
|
||||
+ if (dl_task(p))
|
||||
+ dest_cpu = task_cpu(p);
|
||||
+ else
|
||||
+ dest_cpu = bld_get_cpu(p, SD_BALANCE_EXEC, 0);
|
||||
+#endif
|
||||
if (dest_cpu == smp_processor_id())
|
||||
goto unlock;
|
||||
|
||||
@@ -3030,8 +3058,10 @@ void scheduler_tick(void)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
rq->idle_balance = idle_cpu(cpu);
|
||||
+#ifndef CONFIG_BLD
|
||||
trigger_load_balance(rq);
|
||||
#endif
|
||||
+#endif
|
||||
rq_last_tick_reset(rq);
|
||||
}
|
||||
|
||||
@@ -7262,7 +7292,9 @@ int sched_cpu_dying(unsigned int cpu)
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
calc_load_migrate(rq);
|
||||
update_max_interval();
|
||||
+#ifndef CONFIG_BLD
|
||||
nohz_balance_exit_idle(cpu);
|
||||
+#endif
|
||||
hrtick_clear(rq);
|
||||
return 0;
|
||||
}
|
||||
@@ -7468,6 +7500,15 @@ void __init sched_init(void)
|
||||
#endif /* CONFIG_SMP */
|
||||
init_rq_hrtick(rq);
|
||||
atomic_set(&rq->nr_iowait, 0);
|
||||
+#ifdef CONFIG_BLD
|
||||
+ INIT_LIST_HEAD(&rq->cfs.bld_cfs_list);
|
||||
+ list_add_tail(&rq->cfs.bld_cfs_list, &cfs_rq_head);
|
||||
+ rq->cfs.pos = 0;
|
||||
+
|
||||
+ INIT_LIST_HEAD(&rq->rt.bld_rt_list);
|
||||
+ list_add_tail(&rq->rt.bld_rt_list, &rt_rq_head);
|
||||
+ rq->rt.lowbit = INT_MAX;
|
||||
+#endif
|
||||
}
|
||||
|
||||
set_load_weight(&init_task);
|
||||
@@ -7510,6 +7551,9 @@ void __init sched_init(void)
|
||||
init_schedstats();
|
||||
|
||||
scheduler_running = 1;
|
||||
+#ifdef CONFIG_BLD
|
||||
+ printk(KERN_INFO "BLD: An Alternate CPU load distributor activated.\n");
|
||||
+#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
||||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
||||
index c8c5d2d..5b694b3 100644
|
||||
--- a/kernel/sched/fair.c
|
||||
+++ b/kernel/sched/fair.c
|
||||
@@ -4880,6 +4880,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
/*
|
||||
* effective_load() calculates the load change as seen from the root_task_group
|
||||
@@ -5411,6 +5412,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
||||
|
||||
return new_cpu;
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
/*
|
||||
* Called immediately before a task is migrated to a new cpu; task_cpu(p) and
|
||||
@@ -5741,6 +5743,7 @@ idle:
|
||||
* further scheduler activity on it and we're being very careful to
|
||||
* re-start the picking loop.
|
||||
*/
|
||||
+#ifndef CONFIG_BLD
|
||||
lockdep_unpin_lock(&rq->lock, cookie);
|
||||
new_tasks = idle_balance(rq);
|
||||
lockdep_repin_lock(&rq->lock, cookie);
|
||||
@@ -5754,7 +5757,7 @@ idle:
|
||||
|
||||
if (new_tasks > 0)
|
||||
goto again;
|
||||
-
|
||||
+#endif /* CONFIG_BLD */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -6415,8 +6418,9 @@ static unsigned long task_h_load(struct task_struct *p)
|
||||
}
|
||||
#endif
|
||||
|
||||
-/********** Helpers for find_busiest_group ************************/
|
||||
+#ifndef CONFIG_BLD
|
||||
|
||||
+/********** Helpers for find_busiest_group ************************/
|
||||
enum group_type {
|
||||
group_other = 0,
|
||||
group_imbalanced,
|
||||
@@ -6507,6 +6511,7 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
|
||||
|
||||
return load_idx;
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
static unsigned long scale_rt_capacity(int cpu)
|
||||
{
|
||||
@@ -6615,6 +6620,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
|
||||
sdg->sgc->capacity = capacity;
|
||||
}
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
/*
|
||||
* Check whether the capacity of the rq has been noticeably reduced by side
|
||||
* activity. The imbalance_pct is used for the threshold.
|
||||
@@ -7848,6 +7854,7 @@ static inline int on_null_domain(struct rq *rq)
|
||||
{
|
||||
return unlikely(!rcu_dereference_sched(rq->sd));
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
/*
|
||||
@@ -7856,12 +7863,39 @@ static inline int on_null_domain(struct rq *rq)
|
||||
* needed, they will kick the idle load balancer, which then does idle
|
||||
* load balancing for all the idle CPUs.
|
||||
*/
|
||||
+#ifndef CONFIG_BLD
|
||||
static struct {
|
||||
cpumask_var_t idle_cpus_mask;
|
||||
atomic_t nr_cpus;
|
||||
unsigned long next_balance; /* in jiffy units */
|
||||
} nohz ____cacheline_aligned;
|
||||
|
||||
+void nohz_balance_exit_idle(unsigned int cpu)
|
||||
+{
|
||||
+ if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
|
||||
+ /*
|
||||
+ * Completely isolated CPUs don't ever set, so we must test.
|
||||
+ */
|
||||
+ if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
|
||||
+ cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
|
||||
+ atomic_dec(&nohz.nr_cpus);
|
||||
+ }
|
||||
+ clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static int sched_ilb_notifier(struct notifier_block *nfb,
|
||||
+ unsigned long action, void *hcpu)
|
||||
+{
|
||||
+ switch (action & ~CPU_TASKS_FROZEN) {
|
||||
+ case CPU_DYING:
|
||||
+ nohz_balance_exit_idle(smp_processor_id());
|
||||
+ return NOTIFY_OK;
|
||||
+ default:
|
||||
+ return NOTIFY_DONE;
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
static inline int find_new_ilb(void)
|
||||
{
|
||||
int ilb = cpumask_first(nohz.idle_cpus_mask);
|
||||
@@ -7900,20 +7934,6 @@ static void nohz_balancer_kick(void)
|
||||
return;
|
||||
}
|
||||
|
||||
-void nohz_balance_exit_idle(unsigned int cpu)
|
||||
-{
|
||||
- if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
|
||||
- /*
|
||||
- * Completely isolated CPUs don't ever set, so we must test.
|
||||
- */
|
||||
- if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
|
||||
- cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
|
||||
- atomic_dec(&nohz.nr_cpus);
|
||||
- }
|
||||
- clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
||||
- }
|
||||
-}
|
||||
-
|
||||
static inline void set_cpu_sd_state_busy(void)
|
||||
{
|
||||
struct sched_domain *sd;
|
||||
@@ -7930,6 +7950,8 @@ static inline void set_cpu_sd_state_busy(void)
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
+#endif /* NO_HZ_COMMON */
|
||||
|
||||
void set_cpu_sd_state_idle(void)
|
||||
{
|
||||
@@ -7954,6 +7976,7 @@ unlock:
|
||||
*/
|
||||
void nohz_balance_enter_idle(int cpu)
|
||||
{
|
||||
+#ifndef CONFIG_BLD
|
||||
/*
|
||||
* If this cpu is going down, then nothing needs to be done.
|
||||
*/
|
||||
@@ -7972,10 +7995,8 @@ void nohz_balance_enter_idle(int cpu)
|
||||
cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
|
||||
atomic_inc(&nohz.nr_cpus);
|
||||
set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
||||
-}
|
||||
#endif
|
||||
-
|
||||
-static DEFINE_SPINLOCK(balancing);
|
||||
+}
|
||||
|
||||
/*
|
||||
* Scale the max load_balance interval with the number of CPUs in the system.
|
||||
@@ -7986,6 +8007,9 @@ void update_max_interval(void)
|
||||
max_load_balance_interval = HZ*num_online_cpus()/10;
|
||||
}
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
+static DEFINE_SPINLOCK(balancing);
|
||||
+
|
||||
/*
|
||||
* It checks each scheduling domain to see if it is due to be balanced,
|
||||
* and initiates a balancing operation if so.
|
||||
@@ -8273,6 +8297,7 @@ void trigger_load_balance(struct rq *rq)
|
||||
nohz_balancer_kick();
|
||||
#endif
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
static void rq_online_fair(struct rq *rq)
|
||||
{
|
||||
@@ -8288,7 +8313,6 @@ static void rq_offline_fair(struct rq *rq)
|
||||
/* Ensure any throttled groups are reachable by pick_next_task */
|
||||
unthrottle_offline_cfs_rqs(rq);
|
||||
}
|
||||
-
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
@@ -8716,7 +8740,9 @@ const struct sched_class fair_sched_class = {
|
||||
.put_prev_task = put_prev_task_fair,
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
+#ifndef CONFIG_BLD
|
||||
.select_task_rq = select_task_rq_fair,
|
||||
+#endif
|
||||
.migrate_task_rq = migrate_task_rq_fair,
|
||||
|
||||
.rq_online = rq_online_fair,
|
||||
@@ -8777,6 +8803,7 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
|
||||
|
||||
__init void init_sched_fair_class(void)
|
||||
{
|
||||
+#ifndef CONFIG_BLD
|
||||
#ifdef CONFIG_SMP
|
||||
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
|
||||
|
||||
@@ -8785,5 +8812,5 @@ __init void init_sched_fair_class(void)
|
||||
zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
|
||||
#endif
|
||||
#endif /* SMP */
|
||||
-
|
||||
+#endif /* BLD */
|
||||
}
|
||||
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
|
||||
index d5690b7..6f3589e 100644
|
||||
--- a/kernel/sched/rt.c
|
||||
+++ b/kernel/sched/rt.c
|
||||
@@ -1375,6 +1375,7 @@ static void yield_task_rt(struct rq *rq)
|
||||
#ifdef CONFIG_SMP
|
||||
static int find_lowest_rq(struct task_struct *task);
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
static int
|
||||
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
||||
{
|
||||
@@ -1430,6 +1431,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
||||
out:
|
||||
return cpu;
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
@@ -2335,7 +2337,9 @@ const struct sched_class rt_sched_class = {
|
||||
.put_prev_task = put_prev_task_rt,
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
+#ifndef CONFIG_BLD
|
||||
.select_task_rq = select_task_rq_rt,
|
||||
+#endif
|
||||
|
||||
.set_cpus_allowed = set_cpus_allowed_common,
|
||||
.rq_online = rq_online_rt,
|
||||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
|
||||
index 898c0d2..720d524 100644
|
||||
--- a/kernel/sched/sched.h
|
||||
+++ b/kernel/sched/sched.h
|
||||
@@ -415,9 +415,8 @@ struct cfs_rq {
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
-#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
|
||||
-
|
||||
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
/*
|
||||
* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
|
||||
* a hierarchy). Non-leaf lrqs hold other higher schedulable entities
|
||||
@@ -441,6 +440,11 @@ struct cfs_rq {
|
||||
struct list_head throttled_list;
|
||||
#endif /* CONFIG_CFS_BANDWIDTH */
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
+
|
||||
+#ifdef CONFIG_BLD
|
||||
+ struct list_head bld_cfs_list;
|
||||
+ char pos;
|
||||
+#endif
|
||||
};
|
||||
|
||||
static inline int rt_bandwidth_enabled(void)
|
||||
@@ -486,12 +490,16 @@ struct rt_rq {
|
||||
/* Nests inside the rq lock: */
|
||||
raw_spinlock_t rt_runtime_lock;
|
||||
|
||||
+ struct rq *rq;
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
unsigned long rt_nr_boosted;
|
||||
|
||||
- struct rq *rq;
|
||||
struct task_group *tg;
|
||||
#endif
|
||||
+#ifdef CONFIG_BLD
|
||||
+ struct list_head bld_rt_list;
|
||||
+ int lowbit;
|
||||
+#endif
|
||||
};
|
||||
|
||||
/* Deadline class' related fields in a runqueue */
|
|
@ -0,0 +1,77 @@
|
|||
From 33ceb32e7abc13e6a5f27916f49fa7188a13978f Mon Sep 17 00:00:00 2001
|
||||
From: Corinna Vinschen <vinschen@redhat.com>
|
||||
Date: Fri, 3 Jun 2016 09:40:11 -0400
|
||||
Subject: [PATCH] igb: Allow to remove administratively set MAC on VFs
|
||||
|
||||
Before libvirt modifies the MAC address and vlan tag for an SRIOV VF
|
||||
for use by a virtual machine (either using vfio device assignment or
|
||||
macvtap passthru mode), it saves the current MAC address and vlan tag
|
||||
so that it can reset them to their original value when the guest is
|
||||
done. (libvirt can't leave the VF MAC set to the value used by the
|
||||
now-defunct guest (since it may be started again later using a
|
||||
different VF), but it certainly shouldn't just pick any random value,
|
||||
either. So it saves the state of everything prior to using the VF, and
|
||||
resets it to that.)
|
||||
|
||||
The igb driver initializes the MAC addresses of all VFs to
|
||||
00:00:00:00:00:00, and reports that when asked (via an RTM_GETLINK
|
||||
netlink message, also visible in the list of VFs in the output of "ip
|
||||
link show"). But when libvirt attempts to restore the MAC address back
|
||||
to 00:00:00:00:00:00 (using an RTM_SETLINK netlink message) the kernel
|
||||
responds with "Invalid argument".
|
||||
|
||||
However, to allow libvirt to revert to the original state, we need a
|
||||
way to remove the administrative set MAC on a VF, to allow normal host
|
||||
operation again, and to reset/overwrite the VF MAC via VF netdev.
|
||||
|
||||
This patch implements the aforementioned scenario by allowing to set
|
||||
the VF MAC to 00:00:00:00:00:00 via RTM_SETLINK on the PF.
|
||||
igb_ndo_set_vf_mac resets the IGB_VF_FLAG_PF_SET_MAC flag to 0,
|
||||
so it's possible to reset the VF MAC back to the original value via
|
||||
the VF netdev.
|
||||
|
||||
Signed-off-by: Corinna Vinschen <vinschen@redhat.com>
|
||||
---
|
||||
drivers/net/ethernet/intel/igb/igb_main.c | 25 ++++++++++++++++++++-----
|
||||
1 file changed, 20 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
|
||||
index 8e96c35..7be9279 100644
|
||||
--- a/drivers/net/ethernet/intel/igb/igb_main.c
|
||||
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
|
||||
@@ -7890,12 +7890,27 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
|
||||
static int igb_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
|
||||
{
|
||||
struct igb_adapter *adapter = netdev_priv(netdev);
|
||||
- if (!is_valid_ether_addr(mac) || (vf >= adapter->vfs_allocated_count))
|
||||
+
|
||||
+ if (vf >= adapter->vfs_allocated_count)
|
||||
+ return -EINVAL;
|
||||
+ /* Setting the VF MAC to 0 reverts the IGB_VF_FLAG_PF_SET_MAC
|
||||
+ flag and allows to overwrite the MAC via VF netdev. This
|
||||
+ is necessary to allow libvirt a way to restore the original
|
||||
+ MAC after unbinding vfio-pci and reloading igbvf after shutting
|
||||
+ down a VM. */
|
||||
+ if (is_zero_ether_addr(mac)) {
|
||||
+ adapter->vf_data[vf].flags &= ~IGB_VF_FLAG_PF_SET_MAC;
|
||||
+ dev_info(&adapter->pdev->dev,
|
||||
+ "remove administratively set MAC on VF %d\n",
|
||||
+ vf);
|
||||
+ } else if (is_valid_ether_addr (mac)) {
|
||||
+ adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
|
||||
+ dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n",
|
||||
+ mac, vf);
|
||||
+ dev_info(&adapter->pdev->dev,
|
||||
+ "Reload the VF driver to make this change effective.");
|
||||
+ } else
|
||||
return -EINVAL;
|
||||
- adapter->vf_data[vf].flags |= IGB_VF_FLAG_PF_SET_MAC;
|
||||
- dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf);
|
||||
- dev_info(&adapter->pdev->dev,
|
||||
- "Reload the VF driver to make this change effective.");
|
||||
if (test_bit(__IGB_DOWN, &adapter->state)) {
|
||||
dev_warn(&adapter->pdev->dev,
|
||||
"The VF MAC address has been set, but the PF device is not up.\n");
|
||||
--
|
||||
1.8.3.1
|
||||
|
|
@ -0,0 +1,525 @@
|
|||
WARNING
|
||||
This patch works with gcc versions 4.9+ and with kernel version 3.15+ and should
|
||||
NOT be applied when compiling on older versions of gcc due to key name changes
|
||||
of the march flags introduced with the version 4.9 release of gcc.[1]
|
||||
|
||||
Use the older version of this patch hosted on the same github for older
|
||||
versions of gcc.
|
||||
|
||||
FEATURES
|
||||
This patch adds additional CPU options to the Linux kernel accessible under:
|
||||
Processor type and features --->
|
||||
Processor family --->
|
||||
|
||||
The expanded microarchitectures include:
|
||||
* AMD Improved K8-family
|
||||
* AMD K10-family
|
||||
* AMD Family 10h (Barcelona)
|
||||
* AMD Family 14h (Bobcat)
|
||||
* AMD Family 16h (Jaguar)
|
||||
* AMD Family 15h (Bulldozer)
|
||||
* AMD Family 15h (Piledriver)
|
||||
* AMD Family 15h (Steamroller)
|
||||
* AMD Family 15h (Excavator)
|
||||
* AMD Family 17h (Zen)
|
||||
* Intel Silvermont low-power processors
|
||||
* Intel 1st Gen Core i3/i5/i7 (Nehalem)
|
||||
* Intel 1.5 Gen Core i3/i5/i7 (Westmere)
|
||||
* Intel 2nd Gen Core i3/i5/i7 (Sandybridge)
|
||||
* Intel 3rd Gen Core i3/i5/i7 (Ivybridge)
|
||||
* Intel 4th Gen Core i3/i5/i7 (Haswell)
|
||||
* Intel 5th Gen Core i3/i5/i7 (Broadwell)
|
||||
* Intel 6th Gen Core i3/i5.i7 (Skylake)
|
||||
|
||||
It also offers to compile passing the 'native' option which, "selects the CPU
|
||||
to generate code for at compilation time by determining the processor type of
|
||||
the compiling machine. Using -march=native enables all instruction subsets
|
||||
supported by the local machine and will produce code optimized for the local
|
||||
machine under the constraints of the selected instruction set."[3]
|
||||
|
||||
MINOR NOTES
|
||||
This patch also changes 'atom' to 'bonnell' in accordance with the gcc v4.9
|
||||
changes. Note that upstream is using the deprecated 'match=atom' flags when I
|
||||
believe it should use the newer 'march=bonnell' flag for atom processors.[2]
|
||||
|
||||
It is not recommended to compile on Atom-CPUs with the 'native' option.[4] The
|
||||
recommendation is to the 'atom' option instead.
|
||||
|
||||
BENEFITS
|
||||
Small but real speed increases are measurable using a make endpoint comparing
|
||||
a generic kernel to one built with one of the respective microarchs.
|
||||
|
||||
See the following experimental evidence supporting this statement:
|
||||
https://github.com/graysky2/kernel_gcc_patch
|
||||
|
||||
REQUIREMENTS
|
||||
linux version >=3.15
|
||||
gcc version >=4.9
|
||||
|
||||
ACKNOWLEDGMENTS
|
||||
This patch builds on the seminal work by Jeroen.[5]
|
||||
|
||||
REFERENCES
|
||||
1. https://gcc.gnu.org/gcc-4.9/changes.html
|
||||
2. https://bugzilla.kernel.org/show_bug.cgi?id=77461
|
||||
3. https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html
|
||||
4. https://github.com/graysky2/kernel_gcc_patch/issues/15
|
||||
5. http://www.linuxforge.net/docs/linux/linux-gcc.php
|
||||
|
||||
--- a/arch/x86/include/asm/module.h 2016-12-11 14:17:54.000000000 -0500
|
||||
+++ b/arch/x86/include/asm/module.h 2016-12-20 15:17:16.567422788 -0500
|
||||
@@ -15,6 +15,24 @@
|
||||
#define MODULE_PROC_FAMILY "586MMX "
|
||||
#elif defined CONFIG_MCORE2
|
||||
#define MODULE_PROC_FAMILY "CORE2 "
|
||||
+#elif defined CONFIG_MNATIVE
|
||||
+#define MODULE_PROC_FAMILY "NATIVE "
|
||||
+#elif defined CONFIG_MNEHALEM
|
||||
+#define MODULE_PROC_FAMILY "NEHALEM "
|
||||
+#elif defined CONFIG_MWESTMERE
|
||||
+#define MODULE_PROC_FAMILY "WESTMERE "
|
||||
+#elif defined CONFIG_MSILVERMONT
|
||||
+#define MODULE_PROC_FAMILY "SILVERMONT "
|
||||
+#elif defined CONFIG_MSANDYBRIDGE
|
||||
+#define MODULE_PROC_FAMILY "SANDYBRIDGE "
|
||||
+#elif defined CONFIG_MIVYBRIDGE
|
||||
+#define MODULE_PROC_FAMILY "IVYBRIDGE "
|
||||
+#elif defined CONFIG_MHASWELL
|
||||
+#define MODULE_PROC_FAMILY "HASWELL "
|
||||
+#elif defined CONFIG_MBROADWELL
|
||||
+#define MODULE_PROC_FAMILY "BROADWELL "
|
||||
+#elif defined CONFIG_MSKYLAKE
|
||||
+#define MODULE_PROC_FAMILY "SKYLAKE "
|
||||
#elif defined CONFIG_MATOM
|
||||
#define MODULE_PROC_FAMILY "ATOM "
|
||||
#elif defined CONFIG_M686
|
||||
@@ -33,6 +51,26 @@
|
||||
#define MODULE_PROC_FAMILY "K7 "
|
||||
#elif defined CONFIG_MK8
|
||||
#define MODULE_PROC_FAMILY "K8 "
|
||||
+#elif defined CONFIG_MK8SSE3
|
||||
+#define MODULE_PROC_FAMILY "K8SSE3 "
|
||||
+#elif defined CONFIG_MK10
|
||||
+#define MODULE_PROC_FAMILY "K10 "
|
||||
+#elif defined CONFIG_MBARCELONA
|
||||
+#define MODULE_PROC_FAMILY "BARCELONA "
|
||||
+#elif defined CONFIG_MBOBCAT
|
||||
+#define MODULE_PROC_FAMILY "BOBCAT "
|
||||
+#elif defined CONFIG_MBULLDOZER
|
||||
+#define MODULE_PROC_FAMILY "BULLDOZER "
|
||||
+#elif defined CONFIG_MPILEDRIVER
|
||||
+#define MODULE_PROC_FAMILY "STEAMROLLER "
|
||||
+#elif defined CONFIG_MSTEAMROLLER
|
||||
+#define MODULE_PROC_FAMILY "PILEDRIVER "
|
||||
+#elif defined CONFIG_MJAGUAR
|
||||
+#define MODULE_PROC_FAMILY "JAGUAR "
|
||||
+#elif defined CONFIG_MEXCAVATOR
|
||||
+#define MODULE_PROC_FAMILY "EXCAVATOR "
|
||||
+#elif defined CONFIG_MZEN
|
||||
+#define MODULE_PROC_FAMILY "ZEN "
|
||||
#elif defined CONFIG_MELAN
|
||||
#define MODULE_PROC_FAMILY "ELAN "
|
||||
#elif defined CONFIG_MCRUSOE
|
||||
--- a/arch/x86/Kconfig.cpu 2016-12-11 14:17:54.000000000 -0500
|
||||
+++ b/arch/x86/Kconfig.cpu 2016-12-20 15:31:19.929002279 -0500
|
||||
@@ -147,9 +147,8 @@ config MPENTIUM4
|
||||
-Paxville
|
||||
-Dempsey
|
||||
|
||||
-
|
||||
config MK6
|
||||
- bool "K6/K6-II/K6-III"
|
||||
+ bool "AMD K6/K6-II/K6-III"
|
||||
depends on X86_32
|
||||
---help---
|
||||
Select this for an AMD K6-family processor. Enables use of
|
||||
@@ -157,7 +156,7 @@ config MK6
|
||||
flags to GCC.
|
||||
|
||||
config MK7
|
||||
- bool "Athlon/Duron/K7"
|
||||
+ bool "AMD Athlon/Duron/K7"
|
||||
depends on X86_32
|
||||
---help---
|
||||
Select this for an AMD Athlon K7-family processor. Enables use of
|
||||
@@ -165,12 +164,83 @@ config MK7
|
||||
flags to GCC.
|
||||
|
||||
config MK8
|
||||
- bool "Opteron/Athlon64/Hammer/K8"
|
||||
+ bool "AMD Opteron/Athlon64/Hammer/K8"
|
||||
---help---
|
||||
Select this for an AMD Opteron or Athlon64 Hammer-family processor.
|
||||
Enables use of some extended instructions, and passes appropriate
|
||||
optimization flags to GCC.
|
||||
|
||||
+config MK8SSE3
|
||||
+ bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
|
||||
+ ---help---
|
||||
+ Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
|
||||
+ Enables use of some extended instructions, and passes appropriate
|
||||
+ optimization flags to GCC.
|
||||
+
|
||||
+config MK10
|
||||
+ bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
|
||||
+ ---help---
|
||||
+ Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
|
||||
+ Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
|
||||
+ Enables use of some extended instructions, and passes appropriate
|
||||
+ optimization flags to GCC.
|
||||
+
|
||||
+config MBARCELONA
|
||||
+ bool "AMD Barcelona"
|
||||
+ ---help---
|
||||
+ Select this for AMD Family 10h Barcelona processors.
|
||||
+
|
||||
+ Enables -march=barcelona
|
||||
+
|
||||
+config MBOBCAT
|
||||
+ bool "AMD Bobcat"
|
||||
+ ---help---
|
||||
+ Select this for AMD Family 14h Bobcat processors.
|
||||
+
|
||||
+ Enables -march=btver1
|
||||
+
|
||||
+config MJAGUAR
|
||||
+ bool "AMD Jaguar"
|
||||
+ ---help---
|
||||
+ Select this for AMD Family 16h Jaguar processors.
|
||||
+
|
||||
+ Enables -march=btver2
|
||||
+
|
||||
+config MBULLDOZER
|
||||
+ bool "AMD Bulldozer"
|
||||
+ ---help---
|
||||
+ Select this for AMD Family 15h Bulldozer processors.
|
||||
+
|
||||
+ Enables -march=bdver1
|
||||
+
|
||||
+config MPILEDRIVER
|
||||
+ bool "AMD Piledriver"
|
||||
+ ---help---
|
||||
+ Select this for AMD Family 15h Piledriver processors.
|
||||
+
|
||||
+ Enables -march=bdver2
|
||||
+
|
||||
+config MSTEAMROLLER
|
||||
+ bool "AMD Steamroller"
|
||||
+ ---help---
|
||||
+ Select this for AMD Family 15h Steamroller processors.
|
||||
+
|
||||
+ Enables -march=bdver3
|
||||
+
|
||||
+config MEXCAVATOR
|
||||
+ bool "AMD Excavator"
|
||||
+ ---help---
|
||||
+ Select this for AMD Family 15h Excavator processors.
|
||||
+
|
||||
+ Enables -march=bdver4
|
||||
+
|
||||
+config MZEN
|
||||
+ bool "AMD Zen"
|
||||
+ ---help---
|
||||
+ Select this for AMD Family 17h Zen processors.
|
||||
+
|
||||
+ Enables -march=znver1
|
||||
+
|
||||
config MCRUSOE
|
||||
bool "Crusoe"
|
||||
depends on X86_32
|
||||
@@ -252,6 +322,7 @@ config MVIAC7
|
||||
|
||||
config MPSC
|
||||
bool "Intel P4 / older Netburst based Xeon"
|
||||
+ select X86_P6_NOP
|
||||
depends on X86_64
|
||||
---help---
|
||||
Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
|
||||
@@ -261,8 +332,19 @@ config MPSC
|
||||
using the cpu family field
|
||||
in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
|
||||
|
||||
+config MATOM
|
||||
+ bool "Intel Atom"
|
||||
+ select X86_P6_NOP
|
||||
+ ---help---
|
||||
+
|
||||
+ Select this for the Intel Atom platform. Intel Atom CPUs have an
|
||||
+ in-order pipelining architecture and thus can benefit from
|
||||
+ accordingly optimized code. Use a recent GCC with specific Atom
|
||||
+ support in order to fully benefit from selecting this option.
|
||||
+
|
||||
config MCORE2
|
||||
- bool "Core 2/newer Xeon"
|
||||
+ bool "Intel Core 2"
|
||||
+ select X86_P6_NOP
|
||||
---help---
|
||||
|
||||
Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
|
||||
@@ -270,14 +352,79 @@ config MCORE2
|
||||
family in /proc/cpuinfo. Newer ones have 6 and older ones 15
|
||||
(not a typo)
|
||||
|
||||
-config MATOM
|
||||
- bool "Intel Atom"
|
||||
+ Enables -march=core2
|
||||
+
|
||||
+config MNEHALEM
|
||||
+ bool "Intel Nehalem"
|
||||
+ select X86_P6_NOP
|
||||
---help---
|
||||
|
||||
- Select this for the Intel Atom platform. Intel Atom CPUs have an
|
||||
- in-order pipelining architecture and thus can benefit from
|
||||
- accordingly optimized code. Use a recent GCC with specific Atom
|
||||
- support in order to fully benefit from selecting this option.
|
||||
+ Select this for 1st Gen Core processors in the Nehalem family.
|
||||
+
|
||||
+ Enables -march=nehalem
|
||||
+
|
||||
+config MWESTMERE
|
||||
+ bool "Intel Westmere"
|
||||
+ select X86_P6_NOP
|
||||
+ ---help---
|
||||
+
|
||||
+ Select this for the Intel Westmere formerly Nehalem-C family.
|
||||
+
|
||||
+ Enables -march=westmere
|
||||
+
|
||||
+config MSILVERMONT
|
||||
+ bool "Intel Silvermont"
|
||||
+ select X86_P6_NOP
|
||||
+ ---help---
|
||||
+
|
||||
+ Select this for the Intel Silvermont platform.
|
||||
+
|
||||
+ Enables -march=silvermont
|
||||
+
|
||||
+config MSANDYBRIDGE
|
||||
+ bool "Intel Sandy Bridge"
|
||||
+ select X86_P6_NOP
|
||||
+ ---help---
|
||||
+
|
||||
+ Select this for 2nd Gen Core processors in the Sandy Bridge family.
|
||||
+
|
||||
+ Enables -march=sandybridge
|
||||
+
|
||||
+config MIVYBRIDGE
|
||||
+ bool "Intel Ivy Bridge"
|
||||
+ select X86_P6_NOP
|
||||
+ ---help---
|
||||
+
|
||||
+ Select this for 3rd Gen Core processors in the Ivy Bridge family.
|
||||
+
|
||||
+ Enables -march=ivybridge
|
||||
+
|
||||
+config MHASWELL
|
||||
+ bool "Intel Haswell"
|
||||
+ select X86_P6_NOP
|
||||
+ ---help---
|
||||
+
|
||||
+ Select this for 4th Gen Core processors in the Haswell family.
|
||||
+
|
||||
+ Enables -march=haswell
|
||||
+
|
||||
+config MBROADWELL
|
||||
+ bool "Intel Broadwell"
|
||||
+ select X86_P6_NOP
|
||||
+ ---help---
|
||||
+
|
||||
+ Select this for 5th Gen Core processors in the Broadwell family.
|
||||
+
|
||||
+ Enables -march=broadwell
|
||||
+
|
||||
+config MSKYLAKE
|
||||
+ bool "Intel Skylake"
|
||||
+ select X86_P6_NOP
|
||||
+ ---help---
|
||||
+
|
||||
+ Select this for 6th Gen Core processors in the Skylake family.
|
||||
+
|
||||
+ Enables -march=skylake
|
||||
|
||||
config GENERIC_CPU
|
||||
bool "Generic-x86-64"
|
||||
@@ -286,6 +433,19 @@ config GENERIC_CPU
|
||||
Generic x86-64 CPU.
|
||||
Run equally well on all x86-64 CPUs.
|
||||
|
||||
+config MNATIVE
|
||||
+ bool "Native optimizations autodetected by GCC"
|
||||
+ ---help---
|
||||
+
|
||||
+ GCC 4.2 and above support -march=native, which automatically detects
|
||||
+ the optimum settings to use based on your processor. -march=native
|
||||
+ also detects and applies additional settings beyond -march specific
|
||||
+ to your CPU, (eg. -msse4). Unless you have a specific reason not to
|
||||
+ (e.g. distcc cross-compiling), you should probably be using
|
||||
+ -march=native rather than anything listed below.
|
||||
+
|
||||
+ Enables -march=native
|
||||
+
|
||||
endchoice
|
||||
|
||||
config X86_GENERIC
|
||||
@@ -310,7 +470,7 @@ config X86_INTERNODE_CACHE_SHIFT
|
||||
config X86_L1_CACHE_SHIFT
|
||||
int
|
||||
default "7" if MPENTIUM4 || MPSC
|
||||
- default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
|
||||
+ default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
|
||||
default "4" if MELAN || M486 || MGEODEGX1
|
||||
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
|
||||
|
||||
@@ -341,45 +501,47 @@ config X86_ALIGNMENT_16
|
||||
|
||||
config X86_INTEL_USERCOPY
|
||||
def_bool y
|
||||
- depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
|
||||
+ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE
|
||||
|
||||
config X86_USE_PPRO_CHECKSUM
|
||||
def_bool y
|
||||
- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
|
||||
+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MATOM || MNATIVE
|
||||
|
||||
config X86_USE_3DNOW
|
||||
def_bool y
|
||||
depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
|
||||
|
||||
-#
|
||||
-# P6_NOPs are a relatively minor optimization that require a family >=
|
||||
-# 6 processor, except that it is broken on certain VIA chips.
|
||||
-# Furthermore, AMD chips prefer a totally different sequence of NOPs
|
||||
-# (which work on all CPUs). In addition, it looks like Virtual PC
|
||||
-# does not understand them.
|
||||
-#
|
||||
-# As a result, disallow these if we're not compiling for X86_64 (these
|
||||
-# NOPs do work on all x86-64 capable chips); the list of processors in
|
||||
-# the right-hand clause are the cores that benefit from this optimization.
|
||||
-#
|
||||
config X86_P6_NOP
|
||||
- def_bool y
|
||||
+ default n
|
||||
+ bool "Support for P6_NOPs on Intel chips"
|
||||
depends on X86_64
|
||||
- depends on (MCORE2 || MPENTIUM4 || MPSC)
|
||||
+ depends on (MCORE2 || MPENTIUM4 || MPSC || MATOM || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE)
|
||||
+ ---help---
|
||||
+ P6_NOPs are a relatively minor optimization that require a family >=
|
||||
+ 6 processor, except that it is broken on certain VIA chips.
|
||||
+ Furthermore, AMD chips prefer a totally different sequence of NOPs
|
||||
+ (which work on all CPUs). In addition, it looks like Virtual PC
|
||||
+ does not understand them.
|
||||
+
|
||||
+ As a result, disallow these if we're not compiling for X86_64 (these
|
||||
+ NOPs do work on all x86-64 capable chips); the list of processors in
|
||||
+ the right-hand clause are the cores that benefit from this optimization.
|
||||
+
|
||||
+ Say Y if you have Intel CPU newer than Pentium Pro, N otherwise.
|
||||
|
||||
config X86_TSC
|
||||
def_bool y
|
||||
- depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
|
||||
+ depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE || MATOM) || X86_64
|
||||
|
||||
config X86_CMPXCHG64
|
||||
def_bool y
|
||||
- depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
|
||||
+ depends on X86_PAE || X86_64 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
|
||||
|
||||
# this should be set for all -march=.. options where the compiler
|
||||
# generates cmov.
|
||||
config X86_CMOV
|
||||
def_bool y
|
||||
- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
|
||||
+ depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
|
||||
|
||||
config X86_MINIMUM_CPU_FAMILY
|
||||
int
|
||||
--- a/arch/x86/Makefile 2016-12-11 14:17:54.000000000 -0500
|
||||
+++ b/arch/x86/Makefile 2016-12-20 15:26:01.142666491 -0500
|
||||
@@ -104,13 +104,40 @@ else
|
||||
KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
|
||||
|
||||
# FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
|
||||
+ cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
|
||||
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
|
||||
+ cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8)
|
||||
+ cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
|
||||
+ cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
|
||||
+ cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
|
||||
+ cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
|
||||
+ cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
|
||||
+ cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
|
||||
+ cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3)
|
||||
+ cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
|
||||
+ cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
|
||||
cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
|
||||
|
||||
cflags-$(CONFIG_MCORE2) += \
|
||||
- $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
|
||||
- cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
|
||||
- $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
|
||||
+ $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
|
||||
+ cflags-$(CONFIG_MNEHALEM) += \
|
||||
+ $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem))
|
||||
+ cflags-$(CONFIG_MWESTMERE) += \
|
||||
+ $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere))
|
||||
+ cflags-$(CONFIG_MSILVERMONT) += \
|
||||
+ $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont))
|
||||
+ cflags-$(CONFIG_MSANDYBRIDGE) += \
|
||||
+ $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge))
|
||||
+ cflags-$(CONFIG_MIVYBRIDGE) += \
|
||||
+ $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge))
|
||||
+ cflags-$(CONFIG_MHASWELL) += \
|
||||
+ $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell))
|
||||
+ cflags-$(CONFIG_MBROADWELL) += \
|
||||
+ $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell))
|
||||
+ cflags-$(CONFIG_MSKYLAKE) += \
|
||||
+ $(call cc-option,-march=skylake,$(call cc-option,-mtune=skylake))
|
||||
+ cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \
|
||||
+ $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
|
||||
cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
|
||||
KBUILD_CFLAGS += $(cflags-y)
|
||||
|
||||
--- a/arch/x86/Makefile_32.cpu 2016-12-11 14:17:54.000000000 -0500
|
||||
+++ b/arch/x86/Makefile_32.cpu 2016-12-20 15:27:05.357148298 -0500
|
||||
@@ -23,7 +23,18 @@ cflags-$(CONFIG_MK6) += -march=k6
|
||||
# Please note, that patches that add -march=athlon-xp and friends are pointless.
|
||||
# They make zero difference whatsosever to performance at this time.
|
||||
cflags-$(CONFIG_MK7) += -march=athlon
|
||||
+cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
|
||||
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
|
||||
+cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-march=athlon)
|
||||
+cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10,-march=athlon)
|
||||
+cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona,-march=athlon)
|
||||
+cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1,-march=athlon)
|
||||
+cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2,-march=athlon)
|
||||
+cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1,-march=athlon)
|
||||
+cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2,-march=athlon)
|
||||
+cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3,-march=athlon)
|
||||
+cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4,-march=athlon)
|
||||
+cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1,-march=athlon)
|
||||
cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
|
||||
cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
|
||||
cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
|
||||
@@ -32,8 +43,16 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc-
|
||||
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
|
||||
cflags-$(CONFIG_MVIAC7) += -march=i686
|
||||
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
|
||||
-cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
|
||||
- $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
|
||||
+cflags-$(CONFIG_MNEHALEM) += -march=i686 $(call tune,nehalem)
|
||||
+cflags-$(CONFIG_MWESTMERE) += -march=i686 $(call tune,westmere)
|
||||
+cflags-$(CONFIG_MSILVERMONT) += -march=i686 $(call tune,silvermont)
|
||||
+cflags-$(CONFIG_MSANDYBRIDGE) += -march=i686 $(call tune,sandybridge)
|
||||
+cflags-$(CONFIG_MIVYBRIDGE) += -march=i686 $(call tune,ivybridge)
|
||||
+cflags-$(CONFIG_MHASWELL) += -march=i686 $(call tune,haswell)
|
||||
+cflags-$(CONFIG_MBROADWELL) += -march=i686 $(call tune,broadwell)
|
||||
+cflags-$(CONFIG_MSKYLAKE) += -march=i686 $(call tune,skylake)
|
||||
+cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \
|
||||
+ $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
|
||||
|
||||
# AMD Elan support
|
||||
cflags-$(CONFIG_MELAN) += -march=i486
|
|
@ -0,0 +1,12 @@
|
|||
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
|
||||
index a61447f..1264a36 100644
|
||||
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
|
||||
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
|
||||
@@ -246,6 +246,7 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
|
||||
E1000_STATUS_FUNC_SHIFT;
|
||||
|
||||
/* Set phy->phy_addr and phy->id. */
|
||||
+ igb_write_phy_reg_82580(hw, I347AT4_PAGE_SELECT, 0);
|
||||
ret_val = igb_get_phy_id_82575(hw);
|
||||
if (ret_val)
|
||||
return ret_val;
|
|
@ -0,0 +1,20 @@
|
|||
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
|
||||
index a080f4496fe2..142341f8331b 100644
|
||||
--- a/drivers/pci/msi.c
|
||||
+++ b/drivers/pci/msi.c
|
||||
@@ -645,6 +645,15 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
|
||||
return ret;
|
||||
}
|
||||
|
||||
+ /*
|
||||
+ * The mask can be ignored and PCI 2.3 does not specify mask bits for
|
||||
+ * each MSI interrupt. So in case of hierarchical irqdomains we need
|
||||
+ * to make sure that if masking is not available that the msi message
|
||||
+ * is written prior to setting the MSI enable bit in the device.
|
||||
+ */
|
||||
+ if (pci_msi_ignore_mask || !entry->msi_attrib.maskbit)
|
||||
+ irq_domain_activate_irq(irq_get_irq_data(entry->irq));
|
||||
+
|
||||
/* Set MSI enabled bits */
|
||||
pci_intx_for_msi(dev, 0);
|
||||
pci_msi_set_enable(dev, 1);
|
|
@ -0,0 +1,23 @@
|
|||
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
|
||||
index a61447f..ee44398 100644
|
||||
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
|
||||
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
|
||||
@@ -245,6 +245,17 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
|
||||
hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
|
||||
E1000_STATUS_FUNC_SHIFT;
|
||||
|
||||
+ /* Make sure the PHY is in a good state. Several people have reported
|
||||
+ * firmware leaving the PHY's page select register set to something
|
||||
+ * other than the default of zero, which causes the PHY ID read to
|
||||
+ * access something other than the intended register.
|
||||
+ */
|
||||
+ ret_val = hw->phy.ops.reset(hw);
|
||||
+ if (ret_val) {
|
||||
+ hw_dbg("Error resetting the PHY.\n");
|
||||
+ goto out;
|
||||
+ }
|
||||
+
|
||||
/* Set phy->phy_addr and phy->id. */
|
||||
ret_val = igb_get_phy_id_82575(hw);
|
||||
if (ret_val)
|
||||
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,16 @@
|
|||
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
|
||||
index 5b54254..93ec2d0 100644
|
||||
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
|
||||
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
|
||||
@@ -77,6 +77,10 @@ s32 igb_get_phy_id(struct e1000_hw *hw)
|
||||
s32 ret_val = 0;
|
||||
u16 phy_id;
|
||||
|
||||
+ /* ensure phy page selection to fix misconfigured i210 */
|
||||
+ if (hw->mac.type == e1000_i210)
|
||||
+ phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0);
|
||||
+
|
||||
ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
|
||||
if (ret_val)
|
||||
goto out;
|
||||
|
|
@ -0,0 +1,103 @@
|
|||
From 8500f47272575b4616beb487c483019248d8c501 Mon Sep 17 00:00:00 2001
|
||||
From: Paolo Valente <paolo.valente@unimore.it>
|
||||
Date: Tue, 7 Apr 2015 13:39:12 +0200
|
||||
Subject: [PATCH 1/4] block: cgroups, kconfig, build bits for BFQ-v7r11-4.10.0
|
||||
|
||||
Update Kconfig.iosched and do the related Makefile changes to include
|
||||
kernel configuration options for BFQ. Also increase the number of
|
||||
policies supported by the blkio controller so that BFQ can add its
|
||||
own.
|
||||
|
||||
Signed-off-by: Paolo Valente <paolo.valente@unimore.it>
|
||||
Signed-off-by: Arianna Avanzini <avanzini@google.com>
|
||||
---
|
||||
block/Kconfig.iosched | 32 ++++++++++++++++++++++++++++++++
|
||||
block/Makefile | 1 +
|
||||
include/linux/blkdev.h | 2 +-
|
||||
3 files changed, 34 insertions(+), 1 deletion(-)
|
||||
|
||||
diff --git a/block/Kconfig.iosched b/block/Kconfig.iosched
|
||||
index 421bef9..0ee5f0f 100644
|
||||
--- a/block/Kconfig.iosched
|
||||
+++ b/block/Kconfig.iosched
|
||||
@@ -39,6 +39,27 @@ config CFQ_GROUP_IOSCHED
|
||||
---help---
|
||||
Enable group IO scheduling in CFQ.
|
||||
|
||||
+config IOSCHED_BFQ
|
||||
+ tristate "BFQ I/O scheduler"
|
||||
+ default n
|
||||
+ ---help---
|
||||
+ The BFQ I/O scheduler tries to distribute bandwidth among
|
||||
+ all processes according to their weights.
|
||||
+ It aims at distributing the bandwidth as desired, independently of
|
||||
+ the disk parameters and with any workload. It also tries to
|
||||
+ guarantee low latency to interactive and soft real-time
|
||||
+ applications. If compiled built-in (saying Y here), BFQ can
|
||||
+ be configured to support hierarchical scheduling.
|
||||
+
|
||||
+config CGROUP_BFQIO
|
||||
+ bool "BFQ hierarchical scheduling support"
|
||||
+ depends on CGROUPS && IOSCHED_BFQ=y
|
||||
+ default n
|
||||
+ ---help---
|
||||
+ Enable hierarchical scheduling in BFQ, using the cgroups
|
||||
+ filesystem interface. The name of the subsystem will be
|
||||
+ bfqio.
|
||||
+
|
||||
choice
|
||||
prompt "Default I/O scheduler"
|
||||
default DEFAULT_CFQ
|
||||
@@ -52,6 +73,16 @@ choice
|
||||
config DEFAULT_CFQ
|
||||
bool "CFQ" if IOSCHED_CFQ=y
|
||||
|
||||
+ config DEFAULT_BFQ
|
||||
+ bool "BFQ" if IOSCHED_BFQ=y
|
||||
+ help
|
||||
+ Selects BFQ as the default I/O scheduler which will be
|
||||
+ used by default for all block devices.
|
||||
+ The BFQ I/O scheduler aims at distributing the bandwidth
|
||||
+ as desired, independently of the disk parameters and with
|
||||
+ any workload. It also tries to guarantee low latency to
|
||||
+ interactive and soft real-time applications.
|
||||
+
|
||||
config DEFAULT_NOOP
|
||||
bool "No-op"
|
||||
|
||||
@@ -61,6 +92,7 @@ config DEFAULT_IOSCHED
|
||||
string
|
||||
default "deadline" if DEFAULT_DEADLINE
|
||||
default "cfq" if DEFAULT_CFQ
|
||||
+ default "bfq" if DEFAULT_BFQ
|
||||
default "noop" if DEFAULT_NOOP
|
||||
|
||||
endmenu
|
||||
diff --git a/block/Makefile b/block/Makefile
|
||||
index a827f98..3b14703 100644
|
||||
--- a/block/Makefile
|
||||
+++ b/block/Makefile
|
||||
@@ -18,6 +18,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
|
||||
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
|
||||
obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
|
||||
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
|
||||
+obj-$(CONFIG_IOSCHED_BFQ) += bfq-iosched.o
|
||||
|
||||
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
|
||||
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
|
||||
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
|
||||
index 1ca8e8f..8e2d6ed 100644
|
||||
--- a/include/linux/blkdev.h
|
||||
+++ b/include/linux/blkdev.h
|
||||
@@ -47,7 +47,7 @@ struct rq_wb;
|
||||
* Maximum number of blkcg policies allowed to be registered concurrently.
|
||||
* Defined here to simplify include dependency.
|
||||
*/
|
||||
-#define BLKCG_MAX_POLS 2
|
||||
+#define BLKCG_MAX_POLS 3
|
||||
|
||||
typedef void (rq_end_io_fn)(struct request *, int);
|
||||
|
||||
--
|
||||
2.10.0
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,111 @@
|
|||
From a7fb2842267fd275cae9cf44dd3037469f75eeef Mon Sep 17 00:00:00 2001
|
||||
From: Con Kolivas <kernel@kolivas.org>
|
||||
Date: Tue, 1 Nov 2016 12:54:20 +1100
|
||||
Subject: [PATCH 09/25] Implement min and msec hrtimeout un/interruptible
|
||||
schedule timeout variants with a lower resolution of 1ms to work around low
|
||||
Hz time resolutions.
|
||||
|
||||
---
|
||||
include/linux/sched.h | 6 +++++
|
||||
kernel/time/hrtimer.c | 71 +++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
2 files changed, 77 insertions(+)
|
||||
|
||||
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
||||
index d752ef6..46544f4 100644
|
||||
--- a/include/linux/sched.h
|
||||
+++ b/include/linux/sched.h
|
||||
@@ -457,6 +457,12 @@ extern signed long schedule_timeout_interruptible(signed long timeout);
|
||||
extern signed long schedule_timeout_killable(signed long timeout);
|
||||
extern signed long schedule_timeout_uninterruptible(signed long timeout);
|
||||
extern signed long schedule_timeout_idle(signed long timeout);
|
||||
+
|
||||
+extern signed long schedule_msec_hrtimeout(signed long timeout);
|
||||
+extern signed long schedule_min_hrtimeout(void);
|
||||
+extern signed long schedule_msec_hrtimeout_interruptible(signed long timeout);
|
||||
+extern signed long schedule_msec_hrtimeout_uninterruptible(signed long timeout);
|
||||
+
|
||||
asmlinkage void schedule(void);
|
||||
extern void schedule_preempt_disabled(void);
|
||||
|
||||
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
|
||||
index c6ecedd..a47f5b3 100644
|
||||
--- a/kernel/time/hrtimer.c
|
||||
+++ b/kernel/time/hrtimer.c
|
||||
@@ -1796,3 +1796,74 @@ int __sched schedule_hrtimeout(ktime_t *expires,
|
||||
return schedule_hrtimeout_range(expires, 0, mode);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(schedule_hrtimeout);
|
||||
+
|
||||
+/*
|
||||
+ * As per schedule_hrtimeout but taskes a millisecond value and returns how
|
||||
+ * many milliseconds are left.
|
||||
+ */
|
||||
+signed long __sched schedule_msec_hrtimeout(signed long timeout)
|
||||
+{
|
||||
+ struct hrtimer_sleeper t;
|
||||
+ int delta, secs, jiffs;
|
||||
+ ktime_t expires;
|
||||
+
|
||||
+ if (!timeout) {
|
||||
+ __set_current_state(TASK_RUNNING);
|
||||
+ return 0;
|
||||
+ }
|
||||
+
|
||||
+ jiffs = msecs_to_jiffies(timeout);
|
||||
+ /*
|
||||
+ * If regular timer resolution is adequate or hrtimer resolution is not
|
||||
+ * (yet) better than Hz, as would occur during startup, use regular
|
||||
+ * timers.
|
||||
+ */
|
||||
+ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ)
|
||||
+ return schedule_timeout(jiffs);
|
||||
+
|
||||
+ secs = timeout / 1000;
|
||||
+ delta = (timeout % 1000) * NSEC_PER_MSEC;
|
||||
+ expires = ktime_set(secs, delta);
|
||||
+
|
||||
+ hrtimer_init_on_stack(&t.timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
|
||||
+ hrtimer_set_expires_range_ns(&t.timer, expires, delta);
|
||||
+
|
||||
+ hrtimer_init_sleeper(&t, current);
|
||||
+
|
||||
+ hrtimer_start_expires(&t.timer, HRTIMER_MODE_REL);
|
||||
+
|
||||
+ if (likely(t.task))
|
||||
+ schedule();
|
||||
+
|
||||
+ hrtimer_cancel(&t.timer);
|
||||
+ destroy_hrtimer_on_stack(&t.timer);
|
||||
+
|
||||
+ __set_current_state(TASK_RUNNING);
|
||||
+
|
||||
+ expires = hrtimer_expires_remaining(&t.timer);
|
||||
+ timeout = ktime_to_ms(expires);
|
||||
+ return timeout < 0 ? 0 : timeout;
|
||||
+}
|
||||
+
|
||||
+EXPORT_SYMBOL(schedule_msec_hrtimeout);
|
||||
+
|
||||
+signed long __sched schedule_min_hrtimeout(void)
|
||||
+{
|
||||
+ return schedule_msec_hrtimeout(1);
|
||||
+}
|
||||
+
|
||||
+EXPORT_SYMBOL(schedule_min_hrtimeout);
|
||||
+
|
||||
+signed long __sched schedule_msec_hrtimeout_interruptible(signed long timeout)
|
||||
+{
|
||||
+ __set_current_state(TASK_INTERRUPTIBLE);
|
||||
+ return schedule_msec_hrtimeout(timeout);
|
||||
+}
|
||||
+EXPORT_SYMBOL(schedule_msec_hrtimeout_interruptible);
|
||||
+
|
||||
+signed long __sched schedule_msec_hrtimeout_uninterruptible(signed long timeout)
|
||||
+{
|
||||
+ __set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
+ return schedule_msec_hrtimeout(timeout);
|
||||
+}
|
||||
+EXPORT_SYMBOL(schedule_msec_hrtimeout_uninterruptible);
|
||||
--
|
||||
2.9.3
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
From a4f3820228ebab3d5d480d720fecebd3f7e71771 Mon Sep 17 00:00:00 2001
|
||||
From: Con Kolivas <kernel@kolivas.org>
|
||||
Date: Sat, 5 Nov 2016 09:27:36 +1100
|
||||
Subject: [PATCH 10/25] Special case calls of schedule_timeout(1) to use the
|
||||
min hrtimeout of 1ms, working around low Hz resolutions.
|
||||
|
||||
---
|
||||
kernel/time/timer.c | 15 +++++++++++++--
|
||||
1 file changed, 13 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
|
||||
index ef3128f..3f72c13 100644
|
||||
--- a/kernel/time/timer.c
|
||||
+++ b/kernel/time/timer.c
|
||||
@@ -1750,6 +1750,17 @@ signed long __sched schedule_timeout(signed long timeout)
|
||||
|
||||
expire = timeout + jiffies;
|
||||
|
||||
+ if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
|
||||
+ /*
|
||||
+ * Special case 1 as being a request for the minimum timeout
|
||||
+ * and use highres timers to timeout after 1ms to workaround
|
||||
+ * the granularity of low Hz tick timers.
|
||||
+ */
|
||||
+ if (!schedule_min_hrtimeout())
|
||||
+ return 0;
|
||||
+ goto out_timeout;
|
||||
+ }
|
||||
+
|
||||
setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
|
||||
__mod_timer(&timer, expire, false);
|
||||
schedule();
|
||||
@@ -1757,10 +1768,10 @@ signed long __sched schedule_timeout(signed long timeout)
|
||||
|
||||
/* Remove the timer from the object tracker */
|
||||
destroy_timer_on_stack(&timer);
|
||||
-
|
||||
+out_timeout:
|
||||
timeout = expire - jiffies;
|
||||
|
||||
- out:
|
||||
+out:
|
||||
return timeout < 0 ? 0 : timeout;
|
||||
}
|
||||
EXPORT_SYMBOL(schedule_timeout);
|
||||
--
|
||||
2.9.3
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
From 534bc9d3e559420eaf57771f48d2c2f549dcc4d2 Mon Sep 17 00:00:00 2001
|
||||
From: Con Kolivas <kernel@kolivas.org>
|
||||
Date: Fri, 4 Nov 2016 09:25:54 +1100
|
||||
Subject: [PATCH 11/25] Convert msleep to use hrtimers when active.
|
||||
|
||||
---
|
||||
kernel/time/timer.c | 24 ++++++++++++++++++++++--
|
||||
1 file changed, 22 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
|
||||
index 3f72c13..bc53598 100644
|
||||
--- a/kernel/time/timer.c
|
||||
+++ b/kernel/time/timer.c
|
||||
@@ -1892,7 +1892,19 @@ void __init init_timers(void)
|
||||
*/
|
||||
void msleep(unsigned int msecs)
|
||||
{
|
||||
- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
|
||||
+ int jiffs = msecs_to_jiffies(msecs);
|
||||
+ unsigned long timeout;
|
||||
+
|
||||
+ /*
|
||||
+ * Use high resolution timers where the resolution of tick based
|
||||
+ * timers is inadequate.
|
||||
+ */
|
||||
+ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
|
||||
+ while (msecs)
|
||||
+ msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
|
||||
+ return;
|
||||
+ }
|
||||
+ timeout = msecs_to_jiffies(msecs) + 1;
|
||||
|
||||
while (timeout)
|
||||
timeout = schedule_timeout_uninterruptible(timeout);
|
||||
@@ -1906,7 +1918,15 @@ EXPORT_SYMBOL(msleep);
|
||||
*/
|
||||
unsigned long msleep_interruptible(unsigned int msecs)
|
||||
{
|
||||
- unsigned long timeout = msecs_to_jiffies(msecs) + 1;
|
||||
+ int jiffs = msecs_to_jiffies(msecs);
|
||||
+ unsigned long timeout;
|
||||
+
|
||||
+ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
|
||||
+ while (msecs && !signal_pending(current))
|
||||
+ msecs = schedule_msec_hrtimeout_interruptible(msecs);
|
||||
+ return msecs;
|
||||
+ }
|
||||
+ timeout = msecs_to_jiffies(msecs) + 1;
|
||||
|
||||
while (timeout && !signal_pending(current))
|
||||
timeout = schedule_timeout_interruptible(timeout);
|
||||
--
|
||||
2.9.3
|
||||
|
|
@ -0,0 +1,226 @@
|
|||
From 8fef7b75352d874af02881de3493f2ce2d47a341 Mon Sep 17 00:00:00 2001
|
||||
From: Con Kolivas <kernel@kolivas.org>
|
||||
Date: Mon, 20 Feb 2017 13:28:30 +1100
|
||||
Subject: [PATCH 12/25] Replace all schedule timeout(1) with
|
||||
schedule_min_hrtimeout()
|
||||
|
||||
---
|
||||
drivers/block/swim.c | 6 +++---
|
||||
drivers/char/ipmi/ipmi_msghandler.c | 2 +-
|
||||
drivers/char/ipmi/ipmi_ssif.c | 2 +-
|
||||
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c | 2 +-
|
||||
drivers/gpu/drm/vmwgfx/vmwgfx_irq.c | 2 +-
|
||||
drivers/mfd/ucb1x00-core.c | 2 +-
|
||||
drivers/misc/sgi-xp/xpc_channel.c | 2 +-
|
||||
drivers/net/caif/caif_hsi.c | 2 +-
|
||||
drivers/ntb/test/ntb_perf.c | 2 +-
|
||||
drivers/staging/comedi/drivers/ni_mio_common.c | 2 +-
|
||||
fs/afs/vlocation.c | 2 +-
|
||||
fs/btrfs/extent-tree.c | 2 +-
|
||||
fs/btrfs/inode-map.c | 2 +-
|
||||
sound/usb/line6/pcm.c | 2 +-
|
||||
14 files changed, 16 insertions(+), 16 deletions(-)
|
||||
|
||||
diff --git a/drivers/block/swim.c b/drivers/block/swim.c
|
||||
index b5afd49..7d09955 100644
|
||||
--- a/drivers/block/swim.c
|
||||
+++ b/drivers/block/swim.c
|
||||
@@ -332,7 +332,7 @@ static inline void swim_motor(struct swim __iomem *base,
|
||||
if (swim_readbit(base, MOTOR_ON))
|
||||
break;
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
- schedule_timeout(1);
|
||||
+ schedule_min_hrtimeout();
|
||||
}
|
||||
} else if (action == OFF) {
|
||||
swim_action(base, MOTOR_OFF);
|
||||
@@ -351,7 +351,7 @@ static inline void swim_eject(struct swim __iomem *base)
|
||||
if (!swim_readbit(base, DISK_IN))
|
||||
break;
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
- schedule_timeout(1);
|
||||
+ schedule_min_hrtimeout();
|
||||
}
|
||||
swim_select(base, RELAX);
|
||||
}
|
||||
@@ -375,7 +375,7 @@ static inline int swim_step(struct swim __iomem *base)
|
||||
for (wait = 0; wait < HZ; wait++) {
|
||||
|
||||
current->state = TASK_INTERRUPTIBLE;
|
||||
- schedule_timeout(1);
|
||||
+ schedule_min_hrtimeout();
|
||||
|
||||
swim_select(base, RELAX);
|
||||
if (!swim_readbit(base, STEP))
|
||||
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
|
||||
index 92e53ac..a2418e7 100644
|
||||
--- a/drivers/char/ipmi/ipmi_msghandler.c
|
||||
+++ b/drivers/char/ipmi/ipmi_msghandler.c
|
||||
@@ -2953,7 +2953,7 @@ static void cleanup_smi_msgs(ipmi_smi_t intf)
|
||||
/* Current message first, to preserve order */
|
||||
while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
|
||||
/* Wait for the message to clear out. */
|
||||
- schedule_timeout(1);
|
||||
+ schedule_min_hrtimeout();
|
||||
}
|
||||
|
||||
/* No need for locks, the interface is down. */
|
||||
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
|
||||
index cca6e5b..fd3c7da 100644
|
||||
--- a/drivers/char/ipmi/ipmi_ssif.c
|
||||
+++ b/drivers/char/ipmi/ipmi_ssif.c
|
||||
@@ -1185,7 +1185,7 @@ static int ssif_remove(struct i2c_client *client)
|
||||
|
||||
/* make sure the driver is not looking for flags any more. */
|
||||
while (ssif_info->ssif_state != SSIF_NORMAL)
|
||||
- schedule_timeout(1);
|
||||
+ schedule_min_hrtimeout();
|
||||
|
||||
ssif_info->stopping = true;
|
||||
del_timer_sync(&ssif_info->retry_timer);
|
||||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
|
||||
index b6a0806..b5b02cf 100644
|
||||
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
|
||||
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
|
||||
@@ -235,7 +235,7 @@ static int vmw_fifo_wait_noirq(struct vmw_private *dev_priv,
|
||||
DRM_ERROR("SVGA device lockup.\n");
|
||||
break;
|
||||
}
|
||||
- schedule_timeout(1);
|
||||
+ schedule_min_hrtimeout();
|
||||
if (interruptible && signal_pending(current)) {
|
||||
ret = -ERESTARTSYS;
|
||||
break;
|
||||
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
|
||||
index 0c7e172..4c1555c 100644
|
||||
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
|
||||
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
|
||||
@@ -156,7 +156,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
|
||||
break;
|
||||
}
|
||||
if (lazy)
|
||||
- schedule_timeout(1);
|
||||
+ schedule_min_hrtimeout();
|
||||
else if ((++count & 0x0F) == 0) {
|
||||
/**
|
||||
* FIXME: Use schedule_hr_timeout here for
|
||||
diff --git a/drivers/mfd/ucb1x00-core.c b/drivers/mfd/ucb1x00-core.c
|
||||
index d6fb2e1..7ac951b 100644
|
||||
--- a/drivers/mfd/ucb1x00-core.c
|
||||
+++ b/drivers/mfd/ucb1x00-core.c
|
||||
@@ -253,7 +253,7 @@ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync)
|
||||
break;
|
||||
/* yield to other processes */
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
- schedule_timeout(1);
|
||||
+ schedule_min_hrtimeout();
|
||||
}
|
||||
|
||||
return UCB_ADC_DAT(val);
|
||||
diff --git a/drivers/misc/sgi-xp/xpc_channel.c b/drivers/misc/sgi-xp/xpc_channel.c
|
||||
index 128d561..38e68e9 100644
|
||||
--- a/drivers/misc/sgi-xp/xpc_channel.c
|
||||
+++ b/drivers/misc/sgi-xp/xpc_channel.c
|
||||
@@ -837,7 +837,7 @@ xpc_allocate_msg_wait(struct xpc_channel *ch)
|
||||
|
||||
atomic_inc(&ch->n_on_msg_allocate_wq);
|
||||
prepare_to_wait(&ch->msg_allocate_wq, &wait, TASK_INTERRUPTIBLE);
|
||||
- ret = schedule_timeout(1);
|
||||
+ ret = schedule_min_hrtimeout();
|
||||
finish_wait(&ch->msg_allocate_wq, &wait);
|
||||
atomic_dec(&ch->n_on_msg_allocate_wq);
|
||||
|
||||
diff --git a/drivers/net/caif/caif_hsi.c b/drivers/net/caif/caif_hsi.c
|
||||
index ddabce7..67fb5ce 100644
|
||||
--- a/drivers/net/caif/caif_hsi.c
|
||||
+++ b/drivers/net/caif/caif_hsi.c
|
||||
@@ -944,7 +944,7 @@ static void cfhsi_wake_down(struct work_struct *work)
|
||||
break;
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
- schedule_timeout(1);
|
||||
+ schedule_min_hrtimeout();
|
||||
retry--;
|
||||
}
|
||||
|
||||
diff --git a/drivers/ntb/test/ntb_perf.c b/drivers/ntb/test/ntb_perf.c
|
||||
index 434e1d4..2f9543b 100644
|
||||
--- a/drivers/ntb/test/ntb_perf.c
|
||||
+++ b/drivers/ntb/test/ntb_perf.c
|
||||
@@ -308,7 +308,7 @@ static int perf_move_data(struct pthr_ctx *pctx, char __iomem *dst, char *src,
|
||||
if (unlikely((jiffies - last_sleep) > 5 * HZ)) {
|
||||
last_sleep = jiffies;
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
- schedule_timeout(1);
|
||||
+ schedule_min_hrtimeout();
|
||||
}
|
||||
|
||||
if (unlikely(kthread_should_stop()))
|
||||
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
|
||||
index b2e3828..beae38b 100644
|
||||
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
|
||||
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
|
||||
@@ -4655,7 +4655,7 @@ static int cs5529_wait_for_idle(struct comedi_device *dev)
|
||||
if ((status & NI67XX_CAL_STATUS_BUSY) == 0)
|
||||
break;
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
- if (schedule_timeout(1))
|
||||
+ if (schedule_min_hrtimeout())
|
||||
return -EIO;
|
||||
}
|
||||
if (i == timeout) {
|
||||
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
|
||||
index 45a8639..855d08e 100644
|
||||
--- a/fs/afs/vlocation.c
|
||||
+++ b/fs/afs/vlocation.c
|
||||
@@ -129,7 +129,7 @@ static int afs_vlocation_access_vl_by_id(struct afs_vlocation *vl,
|
||||
if (vl->upd_busy_cnt > 1) {
|
||||
/* second+ BUSY - sleep a little bit */
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
- schedule_timeout(1);
|
||||
+ schedule_min_hrtimeout();
|
||||
}
|
||||
continue;
|
||||
}
|
||||
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
|
||||
index dcd2e79..16bf891 100644
|
||||
--- a/fs/btrfs/extent-tree.c
|
||||
+++ b/fs/btrfs/extent-tree.c
|
||||
@@ -5952,7 +5952,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
|
||||
|
||||
if (flush != BTRFS_RESERVE_NO_FLUSH &&
|
||||
btrfs_transaction_in_commit(fs_info))
|
||||
- schedule_timeout(1);
|
||||
+ schedule_min_hrtimeout();
|
||||
|
||||
if (delalloc_lock)
|
||||
mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
|
||||
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
|
||||
index 144b119..03d2e8e 100644
|
||||
--- a/fs/btrfs/inode-map.c
|
||||
+++ b/fs/btrfs/inode-map.c
|
||||
@@ -89,7 +89,7 @@ static int caching_kthread(void *data)
|
||||
btrfs_release_path(path);
|
||||
root->ino_cache_progress = last;
|
||||
up_read(&fs_info->commit_root_sem);
|
||||
- schedule_timeout(1);
|
||||
+ schedule_min_hrtimeout();
|
||||
goto again;
|
||||
} else
|
||||
continue;
|
||||
diff --git a/sound/usb/line6/pcm.c b/sound/usb/line6/pcm.c
|
||||
index fab53f5..fda1ab5 100644
|
||||
--- a/sound/usb/line6/pcm.c
|
||||
+++ b/sound/usb/line6/pcm.c
|
||||
@@ -131,7 +131,7 @@ static void line6_wait_clear_audio_urbs(struct snd_line6_pcm *line6pcm,
|
||||
if (!alive)
|
||||
break;
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
- schedule_timeout(1);
|
||||
+ schedule_min_hrtimeout();
|
||||
} while (--timeout > 0);
|
||||
if (alive)
|
||||
dev_err(line6pcm->line6->ifcdev,
|
||||
--
|
||||
2.9.3
|
||||
|
|
@ -0,0 +1,397 @@
|
|||
From 56e8b01452fbb6c1aa85b0a52fbd352fddf7e959 Mon Sep 17 00:00:00 2001
|
||||
From: Con Kolivas <kernel@kolivas.org>
|
||||
Date: Mon, 20 Feb 2017 13:29:16 +1100
|
||||
Subject: [PATCH 13/25] Change all schedule_timeout with msecs_to_jiffies
|
||||
potentially under 50ms to use schedule_msec_hrtimeout.
|
||||
|
||||
---
|
||||
drivers/bluetooth/hci_qca.c | 2 +-
|
||||
drivers/char/snsc.c | 4 ++--
|
||||
drivers/media/pci/ivtv/ivtv-ioctl.c | 2 +-
|
||||
drivers/media/pci/ivtv/ivtv-streams.c | 2 +-
|
||||
drivers/net/can/usb/peak_usb/pcan_usb.c | 2 +-
|
||||
drivers/net/usb/lan78xx.c | 2 +-
|
||||
drivers/net/usb/usbnet.c | 2 +-
|
||||
drivers/scsi/fnic/fnic_scsi.c | 4 ++--
|
||||
drivers/scsi/snic/snic_scsi.c | 2 +-
|
||||
drivers/staging/lustre/lnet/lnet/lib-eq.c | 2 +-
|
||||
drivers/staging/rts5208/rtsx.c | 2 +-
|
||||
drivers/staging/speakup/speakup_acntpc.c | 4 ++--
|
||||
drivers/staging/speakup/speakup_apollo.c | 2 +-
|
||||
drivers/staging/speakup/speakup_decext.c | 2 +-
|
||||
drivers/staging/speakup/speakup_decpc.c | 2 +-
|
||||
drivers/staging/speakup/speakup_dectlk.c | 2 +-
|
||||
drivers/staging/speakup/speakup_dtlk.c | 4 ++--
|
||||
drivers/staging/speakup/speakup_keypc.c | 4 ++--
|
||||
drivers/staging/speakup/synth.c | 2 +-
|
||||
drivers/staging/unisys/visornic/visornic_main.c | 6 +++---
|
||||
drivers/target/target_core_user.c | 2 +-
|
||||
drivers/video/fbdev/omap/hwa742.c | 2 +-
|
||||
drivers/video/fbdev/pxafb.c | 2 +-
|
||||
23 files changed, 30 insertions(+), 30 deletions(-)
|
||||
|
||||
diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c
|
||||
index 05c2307..6954d29 100644
|
||||
--- a/drivers/bluetooth/hci_qca.c
|
||||
+++ b/drivers/bluetooth/hci_qca.c
|
||||
@@ -880,7 +880,7 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate)
|
||||
* then host can communicate with new baudrate to controller
|
||||
*/
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
- schedule_timeout(msecs_to_jiffies(BAUDRATE_SETTLE_TIMEOUT_MS));
|
||||
+ schedule_msec_hrtimeout((BAUDRATE_SETTLE_TIMEOUT_MS));
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
|
||||
return 0;
|
||||
diff --git a/drivers/char/snsc.c b/drivers/char/snsc.c
|
||||
index ec07f0e..3410b46 100644
|
||||
--- a/drivers/char/snsc.c
|
||||
+++ b/drivers/char/snsc.c
|
||||
@@ -198,7 +198,7 @@ scdrv_read(struct file *file, char __user *buf, size_t count, loff_t *f_pos)
|
||||
add_wait_queue(&sd->sd_rq, &wait);
|
||||
spin_unlock_irqrestore(&sd->sd_rlock, flags);
|
||||
|
||||
- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
|
||||
+ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
|
||||
|
||||
remove_wait_queue(&sd->sd_rq, &wait);
|
||||
if (signal_pending(current)) {
|
||||
@@ -294,7 +294,7 @@ scdrv_write(struct file *file, const char __user *buf,
|
||||
add_wait_queue(&sd->sd_wq, &wait);
|
||||
spin_unlock_irqrestore(&sd->sd_wlock, flags);
|
||||
|
||||
- schedule_timeout(msecs_to_jiffies(SCDRV_TIMEOUT));
|
||||
+ schedule_msec_hrtimeout((SCDRV_TIMEOUT));
|
||||
|
||||
remove_wait_queue(&sd->sd_wq, &wait);
|
||||
if (signal_pending(current)) {
|
||||
diff --git a/drivers/media/pci/ivtv/ivtv-ioctl.c b/drivers/media/pci/ivtv/ivtv-ioctl.c
|
||||
index 2dc4b20..8e061cf 100644
|
||||
--- a/drivers/media/pci/ivtv/ivtv-ioctl.c
|
||||
+++ b/drivers/media/pci/ivtv/ivtv-ioctl.c
|
||||
@@ -1151,7 +1151,7 @@ void ivtv_s_std_dec(struct ivtv *itv, v4l2_std_id std)
|
||||
TASK_UNINTERRUPTIBLE);
|
||||
if ((read_reg(IVTV_REG_DEC_LINE_FIELD) >> 16) < 100)
|
||||
break;
|
||||
- schedule_timeout(msecs_to_jiffies(25));
|
||||
+ schedule_msec_hrtimeout((25));
|
||||
}
|
||||
finish_wait(&itv->vsync_waitq, &wait);
|
||||
mutex_lock(&itv->serialize_lock);
|
||||
diff --git a/drivers/media/pci/ivtv/ivtv-streams.c b/drivers/media/pci/ivtv/ivtv-streams.c
|
||||
index d27c6df..e9ffc4e 100644
|
||||
--- a/drivers/media/pci/ivtv/ivtv-streams.c
|
||||
+++ b/drivers/media/pci/ivtv/ivtv-streams.c
|
||||
@@ -834,7 +834,7 @@ int ivtv_stop_v4l2_encode_stream(struct ivtv_stream *s, int gop_end)
|
||||
while (!test_bit(IVTV_F_I_EOS, &itv->i_flags) &&
|
||||
time_before(jiffies,
|
||||
then + msecs_to_jiffies(2000))) {
|
||||
- schedule_timeout(msecs_to_jiffies(10));
|
||||
+ schedule_msec_hrtimeout((10));
|
||||
}
|
||||
|
||||
/* To convert jiffies to ms, we must multiply by 1000
|
||||
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
|
||||
index 838545c..34f8972 100644
|
||||
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
|
||||
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
|
||||
@@ -250,7 +250,7 @@ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff)
|
||||
} else {
|
||||
/* the PCAN-USB needs time to init */
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
- schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT));
|
||||
+ schedule_msec_hrtimeout((PCAN_USB_STARTUP_TIMEOUT));
|
||||
}
|
||||
|
||||
return err;
|
||||
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
|
||||
index 08f8703..3b3bc86 100644
|
||||
--- a/drivers/net/usb/lan78xx.c
|
||||
+++ b/drivers/net/usb/lan78xx.c
|
||||
@@ -2544,7 +2544,7 @@ static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
|
||||
while (!skb_queue_empty(&dev->rxq) &&
|
||||
!skb_queue_empty(&dev->txq) &&
|
||||
!skb_queue_empty(&dev->done)) {
|
||||
- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
|
||||
+ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
netif_dbg(dev, ifdown, dev->net,
|
||||
"waited for %d urb completions\n", temp);
|
||||
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
|
||||
index 3de65ea..f8a4b18 100644
|
||||
--- a/drivers/net/usb/usbnet.c
|
||||
+++ b/drivers/net/usb/usbnet.c
|
||||
@@ -767,7 +767,7 @@ static void wait_skb_queue_empty(struct sk_buff_head *q)
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
while (!skb_queue_empty(q)) {
|
||||
spin_unlock_irqrestore(&q->lock, flags);
|
||||
- schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
|
||||
+ schedule_msec_hrtimeout((UNLINK_TIMEOUT_MS));
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
spin_lock_irqsave(&q->lock, flags);
|
||||
}
|
||||
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
|
||||
index adb3d58..de73e78 100644
|
||||
--- a/drivers/scsi/fnic/fnic_scsi.c
|
||||
+++ b/drivers/scsi/fnic/fnic_scsi.c
|
||||
@@ -217,7 +217,7 @@ int fnic_fw_reset_handler(struct fnic *fnic)
|
||||
|
||||
/* wait for io cmpl */
|
||||
while (atomic_read(&fnic->in_flight))
|
||||
- schedule_timeout(msecs_to_jiffies(1));
|
||||
+ schedule_msec_hrtimeout((1));
|
||||
|
||||
spin_lock_irqsave(&fnic->wq_copy_lock[0], flags);
|
||||
|
||||
@@ -2201,7 +2201,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic,
|
||||
}
|
||||
}
|
||||
|
||||
- schedule_timeout(msecs_to_jiffies(2 * fnic->config.ed_tov));
|
||||
+ schedule_msec_hrtimeout((2 * fnic->config.ed_tov));
|
||||
|
||||
/* walk again to check, if IOs are still pending in fw */
|
||||
if (fnic_is_abts_pending(fnic, lr_sc))
|
||||
diff --git a/drivers/scsi/snic/snic_scsi.c b/drivers/scsi/snic/snic_scsi.c
|
||||
index abada16..0bf30dc 100644
|
||||
--- a/drivers/scsi/snic/snic_scsi.c
|
||||
+++ b/drivers/scsi/snic/snic_scsi.c
|
||||
@@ -2356,7 +2356,7 @@ snic_reset(struct Scsi_Host *shost, struct scsi_cmnd *sc)
|
||||
|
||||
/* Wait for all the IOs that are entered in Qcmd */
|
||||
while (atomic_read(&snic->ios_inflight))
|
||||
- schedule_timeout(msecs_to_jiffies(1));
|
||||
+ schedule_msec_hrtimeout((1));
|
||||
|
||||
ret = snic_issue_hba_reset(snic, sc);
|
||||
if (ret) {
|
||||
diff --git a/drivers/staging/lustre/lnet/lnet/lib-eq.c b/drivers/staging/lustre/lnet/lnet/lib-eq.c
|
||||
index d05c6cc..3f62b6f 100644
|
||||
--- a/drivers/staging/lustre/lnet/lnet/lib-eq.c
|
||||
+++ b/drivers/staging/lustre/lnet/lnet/lib-eq.c
|
||||
@@ -328,7 +328,7 @@ __must_hold(&the_lnet.ln_eq_wait_lock)
|
||||
schedule();
|
||||
} else {
|
||||
now = jiffies;
|
||||
- schedule_timeout(msecs_to_jiffies(tms));
|
||||
+ schedule_msec_hrtimeout((tms));
|
||||
tms -= jiffies_to_msecs(jiffies - now);
|
||||
if (tms < 0) /* no more wait but may have new event */
|
||||
tms = 0;
|
||||
diff --git a/drivers/staging/rts5208/rtsx.c b/drivers/staging/rts5208/rtsx.c
|
||||
index 68d75d0..aef88c4 100644
|
||||
--- a/drivers/staging/rts5208/rtsx.c
|
||||
+++ b/drivers/staging/rts5208/rtsx.c
|
||||
@@ -537,7 +537,7 @@ static int rtsx_polling_thread(void *__dev)
|
||||
|
||||
for (;;) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
- schedule_timeout(msecs_to_jiffies(POLLING_INTERVAL));
|
||||
+ schedule_msec_hrtimeout((POLLING_INTERVAL));
|
||||
|
||||
/* lock the device pointers */
|
||||
mutex_lock(&dev->dev_mutex);
|
||||
diff --git a/drivers/staging/speakup/speakup_acntpc.c b/drivers/staging/speakup/speakup_acntpc.c
|
||||
index efb791b..fd02fb2 100644
|
||||
--- a/drivers/staging/speakup/speakup_acntpc.c
|
||||
+++ b/drivers/staging/speakup/speakup_acntpc.c
|
||||
@@ -204,7 +204,7 @@ static void do_catch_up(struct spk_synth *synth)
|
||||
full_time_val = full_time->u.n.value;
|
||||
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
|
||||
if (synth_full()) {
|
||||
- schedule_timeout(msecs_to_jiffies(full_time_val));
|
||||
+ schedule_msec_hrtimeout((full_time_val));
|
||||
continue;
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
@@ -232,7 +232,7 @@ static void do_catch_up(struct spk_synth *synth)
|
||||
jiffy_delta_val = jiffy_delta->u.n.value;
|
||||
delay_time_val = delay_time->u.n.value;
|
||||
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
|
||||
- schedule_timeout(msecs_to_jiffies(delay_time_val));
|
||||
+ schedule_msec_hrtimeout((delay_time_val));
|
||||
jiff_max = jiffies+jiffy_delta_val;
|
||||
}
|
||||
}
|
||||
diff --git a/drivers/staging/speakup/speakup_apollo.c b/drivers/staging/speakup/speakup_apollo.c
|
||||
index 3cbc8a7..3c17854 100644
|
||||
--- a/drivers/staging/speakup/speakup_apollo.c
|
||||
+++ b/drivers/staging/speakup/speakup_apollo.c
|
||||
@@ -172,7 +172,7 @@ static void do_catch_up(struct spk_synth *synth)
|
||||
outb(UART_MCR_DTR, speakup_info.port_tts + UART_MCR);
|
||||
outb(UART_MCR_DTR | UART_MCR_RTS,
|
||||
speakup_info.port_tts + UART_MCR);
|
||||
- schedule_timeout(msecs_to_jiffies(full_time_val));
|
||||
+ schedule_msec_hrtimeout((full_time_val));
|
||||
continue;
|
||||
}
|
||||
if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
|
||||
diff --git a/drivers/staging/speakup/speakup_decext.c b/drivers/staging/speakup/speakup_decext.c
|
||||
index 1a5cf3d..fa2b4e1 100644
|
||||
--- a/drivers/staging/speakup/speakup_decext.c
|
||||
+++ b/drivers/staging/speakup/speakup_decext.c
|
||||
@@ -186,7 +186,7 @@ static void do_catch_up(struct spk_synth *synth)
|
||||
if (ch == '\n')
|
||||
ch = 0x0D;
|
||||
if (synth_full() || !spk_serial_out(ch)) {
|
||||
- schedule_timeout(msecs_to_jiffies(delay_time_val));
|
||||
+ schedule_msec_hrtimeout((delay_time_val));
|
||||
continue;
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
diff --git a/drivers/staging/speakup/speakup_decpc.c b/drivers/staging/speakup/speakup_decpc.c
|
||||
index d6479bd..f7554bf 100644
|
||||
--- a/drivers/staging/speakup/speakup_decpc.c
|
||||
+++ b/drivers/staging/speakup/speakup_decpc.c
|
||||
@@ -403,7 +403,7 @@ static void do_catch_up(struct spk_synth *synth)
|
||||
if (ch == '\n')
|
||||
ch = 0x0D;
|
||||
if (dt_sendchar(ch)) {
|
||||
- schedule_timeout(msecs_to_jiffies(delay_time_val));
|
||||
+ schedule_msec_hrtimeout((delay_time_val));
|
||||
continue;
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
diff --git a/drivers/staging/speakup/speakup_dectlk.c b/drivers/staging/speakup/speakup_dectlk.c
|
||||
index 7646567..639192e 100644
|
||||
--- a/drivers/staging/speakup/speakup_dectlk.c
|
||||
+++ b/drivers/staging/speakup/speakup_dectlk.c
|
||||
@@ -251,7 +251,7 @@ static void do_catch_up(struct spk_synth *synth)
|
||||
if (ch == '\n')
|
||||
ch = 0x0D;
|
||||
if (synth_full_val || !spk_serial_out(ch)) {
|
||||
- schedule_timeout(msecs_to_jiffies(delay_time_val));
|
||||
+ schedule_msec_hrtimeout((delay_time_val));
|
||||
continue;
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
diff --git a/drivers/staging/speakup/speakup_dtlk.c b/drivers/staging/speakup/speakup_dtlk.c
|
||||
index 38aa401..1640519 100644
|
||||
--- a/drivers/staging/speakup/speakup_dtlk.c
|
||||
+++ b/drivers/staging/speakup/speakup_dtlk.c
|
||||
@@ -217,7 +217,7 @@ static void do_catch_up(struct spk_synth *synth)
|
||||
delay_time_val = delay_time->u.n.value;
|
||||
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
|
||||
if (synth_full()) {
|
||||
- schedule_timeout(msecs_to_jiffies(delay_time_val));
|
||||
+ schedule_msec_hrtimeout((delay_time_val));
|
||||
continue;
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
@@ -233,7 +233,7 @@ static void do_catch_up(struct spk_synth *synth)
|
||||
delay_time_val = delay_time->u.n.value;
|
||||
jiffy_delta_val = jiffy_delta->u.n.value;
|
||||
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
|
||||
- schedule_timeout(msecs_to_jiffies(delay_time_val));
|
||||
+ schedule_msec_hrtimeout((delay_time_val));
|
||||
jiff_max = jiffies + jiffy_delta_val;
|
||||
}
|
||||
}
|
||||
diff --git a/drivers/staging/speakup/speakup_keypc.c b/drivers/staging/speakup/speakup_keypc.c
|
||||
index 5e2170b..30b5df7 100644
|
||||
--- a/drivers/staging/speakup/speakup_keypc.c
|
||||
+++ b/drivers/staging/speakup/speakup_keypc.c
|
||||
@@ -206,7 +206,7 @@ spin_lock_irqsave(&speakup_info.spinlock, flags);
|
||||
full_time_val = full_time->u.n.value;
|
||||
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
|
||||
if (synth_full()) {
|
||||
- schedule_timeout(msecs_to_jiffies(full_time_val));
|
||||
+ schedule_msec_hrtimeout((full_time_val));
|
||||
continue;
|
||||
}
|
||||
set_current_state(TASK_RUNNING);
|
||||
@@ -239,7 +239,7 @@ spin_lock_irqsave(&speakup_info.spinlock, flags);
|
||||
jiffy_delta_val = jiffy_delta->u.n.value;
|
||||
delay_time_val = delay_time->u.n.value;
|
||||
spin_unlock_irqrestore(&speakup_info.spinlock, flags);
|
||||
- schedule_timeout(msecs_to_jiffies(delay_time_val));
|
||||
+ schedule_msec_hrtimeout((delay_time_val));
|
||||
jiff_max = jiffies+jiffy_delta_val;
|
||||
}
|
||||
}
|
||||
diff --git a/drivers/staging/speakup/synth.c b/drivers/staging/speakup/synth.c
|
||||
index a61c02b..14299e5 100644
|
||||
--- a/drivers/staging/speakup/synth.c
|
||||
+++ b/drivers/staging/speakup/synth.c
|
||||
@@ -120,7 +120,7 @@ void spk_do_catch_up(struct spk_synth *synth)
|
||||
if (ch == '\n')
|
||||
ch = synth->procspeech;
|
||||
if (!spk_serial_out(ch)) {
|
||||
- schedule_timeout(msecs_to_jiffies(full_time_val));
|
||||
+ schedule_msec_hrtimeout((full_time_val));
|
||||
continue;
|
||||
}
|
||||
if (time_after_eq(jiffies, jiff_max) && (ch == SPACE)) {
|
||||
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
|
||||
index c1f674f..4f30a7a 100644
|
||||
--- a/drivers/staging/unisys/visornic/visornic_main.c
|
||||
+++ b/drivers/staging/unisys/visornic/visornic_main.c
|
||||
@@ -468,7 +468,7 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
|
||||
}
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
spin_unlock_irqrestore(&devdata->priv_lock, flags);
|
||||
- wait += schedule_timeout(msecs_to_jiffies(10));
|
||||
+ wait += schedule_msec_hrtimeout((10));
|
||||
spin_lock_irqsave(&devdata->priv_lock, flags);
|
||||
}
|
||||
|
||||
@@ -479,7 +479,7 @@ visornic_disable_with_timeout(struct net_device *netdev, const int timeout)
|
||||
while (1) {
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
spin_unlock_irqrestore(&devdata->priv_lock, flags);
|
||||
- schedule_timeout(msecs_to_jiffies(10));
|
||||
+ schedule_msec_hrtimeout((10));
|
||||
spin_lock_irqsave(&devdata->priv_lock, flags);
|
||||
if (atomic_read(&devdata->usage))
|
||||
break;
|
||||
@@ -611,7 +611,7 @@ visornic_enable_with_timeout(struct net_device *netdev, const int timeout)
|
||||
}
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
spin_unlock_irqrestore(&devdata->priv_lock, flags);
|
||||
- wait += schedule_timeout(msecs_to_jiffies(10));
|
||||
+ wait += schedule_msec_hrtimeout((10));
|
||||
spin_lock_irqsave(&devdata->priv_lock, flags);
|
||||
}
|
||||
|
||||
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
|
||||
index 8041710..f907a81 100644
|
||||
--- a/drivers/target/target_core_user.c
|
||||
+++ b/drivers/target/target_core_user.c
|
||||
@@ -451,7 +451,7 @@ tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
|
||||
|
||||
pr_debug("sleeping for ring space\n");
|
||||
spin_unlock_irq(&udev->cmdr_lock);
|
||||
- ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
|
||||
+ ret = schedule_msec_hrtimeout((TCMU_TIME_OUT));
|
||||
finish_wait(&udev->wait_cmdr, &__wait);
|
||||
if (!ret) {
|
||||
pr_warn("tcmu: command timed out\n");
|
||||
diff --git a/drivers/video/fbdev/omap/hwa742.c b/drivers/video/fbdev/omap/hwa742.c
|
||||
index a4ee65b..cf38bcb 100644
|
||||
--- a/drivers/video/fbdev/omap/hwa742.c
|
||||
+++ b/drivers/video/fbdev/omap/hwa742.c
|
||||
@@ -926,7 +926,7 @@ static void hwa742_resume(void)
|
||||
if (hwa742_read_reg(HWA742_PLL_DIV_REG) & (1 << 7))
|
||||
break;
|
||||
set_current_state(TASK_UNINTERRUPTIBLE);
|
||||
- schedule_timeout(msecs_to_jiffies(5));
|
||||
+ schedule_msec_hrtimeout((5));
|
||||
}
|
||||
hwa742_set_update_mode(hwa742.update_mode_before_suspend);
|
||||
}
|
||||
diff --git a/drivers/video/fbdev/pxafb.c b/drivers/video/fbdev/pxafb.c
|
||||
index ef73f14..7b5483b 100644
|
||||
--- a/drivers/video/fbdev/pxafb.c
|
||||
+++ b/drivers/video/fbdev/pxafb.c
|
||||
@@ -1287,7 +1287,7 @@ static int pxafb_smart_thread(void *arg)
|
||||
mutex_unlock(&fbi->ctrlr_lock);
|
||||
|
||||
set_current_state(TASK_INTERRUPTIBLE);
|
||||
- schedule_timeout(msecs_to_jiffies(30));
|
||||
+ schedule_msec_hrtimeout((30));
|
||||
}
|
||||
|
||||
pr_debug("%s(): task ending\n", __func__);
|
||||
--
|
||||
2.9.3
|
||||
|
|
@ -0,0 +1,325 @@
|
|||
From af94b59651831b7e176ce8cb98441bdccb87eac0 Mon Sep 17 00:00:00 2001
|
||||
From: Con Kolivas <kernel@kolivas.org>
|
||||
Date: Mon, 20 Feb 2017 13:30:07 +1100
|
||||
Subject: [PATCH 14/25] Replace all calls to schedule_timeout_interruptible of
|
||||
potentially under 50ms to use schedule_msec_hrtimeout_interruptible.
|
||||
|
||||
---
|
||||
drivers/hwmon/fam15h_power.c | 2 +-
|
||||
drivers/iio/light/tsl2563.c | 6 +-----
|
||||
drivers/media/i2c/msp3400-driver.c | 4 ++--
|
||||
drivers/media/pci/ivtv/ivtv-gpio.c | 6 +++---
|
||||
drivers/media/radio/radio-mr800.c | 2 +-
|
||||
drivers/media/radio/radio-tea5777.c | 2 +-
|
||||
drivers/media/radio/tea575x.c | 2 +-
|
||||
drivers/misc/panel.c | 2 +-
|
||||
drivers/parport/ieee1284.c | 2 +-
|
||||
drivers/parport/ieee1284_ops.c | 2 +-
|
||||
drivers/platform/x86/intel_ips.c | 8 ++++----
|
||||
net/core/pktgen.c | 2 +-
|
||||
sound/soc/codecs/wm8350.c | 12 ++++++------
|
||||
sound/soc/codecs/wm8900.c | 2 +-
|
||||
sound/soc/codecs/wm9713.c | 4 ++--
|
||||
15 files changed, 27 insertions(+), 31 deletions(-)
|
||||
|
||||
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
|
||||
index 15aa49d..991e8a7 100644
|
||||
--- a/drivers/hwmon/fam15h_power.c
|
||||
+++ b/drivers/hwmon/fam15h_power.c
|
||||
@@ -238,7 +238,7 @@ static ssize_t acc_show_power(struct device *dev,
|
||||
prev_ptsc[cu] = data->cpu_sw_pwr_ptsc[cu];
|
||||
}
|
||||
|
||||
- leftover = schedule_timeout_interruptible(msecs_to_jiffies(data->power_period));
|
||||
+ leftover = schedule_msec_hrtimeout_interruptible((data->power_period));
|
||||
if (leftover)
|
||||
return 0;
|
||||
|
||||
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
|
||||
index 04598ae..a8c095d 100644
|
||||
--- a/drivers/iio/light/tsl2563.c
|
||||
+++ b/drivers/iio/light/tsl2563.c
|
||||
@@ -282,11 +282,7 @@ static void tsl2563_wait_adc(struct tsl2563_chip *chip)
|
||||
default:
|
||||
delay = 402;
|
||||
}
|
||||
- /*
|
||||
- * TODO: Make sure that we wait at least required delay but why we
|
||||
- * have to extend it one tick more?
|
||||
- */
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(delay) + 2);
|
||||
+ schedule_msec_hrtimeout_interruptible(delay + 1);
|
||||
}
|
||||
|
||||
static int tsl2563_adjust_gainlevel(struct tsl2563_chip *chip, u16 adc)
|
||||
diff --git a/drivers/media/i2c/msp3400-driver.c b/drivers/media/i2c/msp3400-driver.c
|
||||
index 201a9800..5cebabc 100644
|
||||
--- a/drivers/media/i2c/msp3400-driver.c
|
||||
+++ b/drivers/media/i2c/msp3400-driver.c
|
||||
@@ -184,7 +184,7 @@ static int msp_read(struct i2c_client *client, int dev, int addr)
|
||||
break;
|
||||
dev_warn(&client->dev, "I/O error #%d (read 0x%02x/0x%02x)\n", err,
|
||||
dev, addr);
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(10));
|
||||
+ schedule_msec_hrtimeout_interruptible((10));
|
||||
}
|
||||
if (err == 3) {
|
||||
dev_warn(&client->dev, "resetting chip, sound will go off.\n");
|
||||
@@ -225,7 +225,7 @@ static int msp_write(struct i2c_client *client, int dev, int addr, int val)
|
||||
break;
|
||||
dev_warn(&client->dev, "I/O error #%d (write 0x%02x/0x%02x)\n", err,
|
||||
dev, addr);
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(10));
|
||||
+ schedule_msec_hrtimeout_interruptible((10));
|
||||
}
|
||||
if (err == 3) {
|
||||
dev_warn(&client->dev, "resetting chip, sound will go off.\n");
|
||||
diff --git a/drivers/media/pci/ivtv/ivtv-gpio.c b/drivers/media/pci/ivtv/ivtv-gpio.c
|
||||
index f752f39..23372af6 100644
|
||||
--- a/drivers/media/pci/ivtv/ivtv-gpio.c
|
||||
+++ b/drivers/media/pci/ivtv/ivtv-gpio.c
|
||||
@@ -117,7 +117,7 @@ void ivtv_reset_ir_gpio(struct ivtv *itv)
|
||||
curout = (curout & ~0xF) | 1;
|
||||
write_reg(curout, IVTV_REG_GPIO_OUT);
|
||||
/* We could use something else for smaller time */
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(1));
|
||||
+ schedule_msec_hrtimeout_interruptible((1));
|
||||
curout |= 2;
|
||||
write_reg(curout, IVTV_REG_GPIO_OUT);
|
||||
curdir &= ~0x80;
|
||||
@@ -137,11 +137,11 @@ int ivtv_reset_tuner_gpio(void *dev, int component, int cmd, int value)
|
||||
curout = read_reg(IVTV_REG_GPIO_OUT);
|
||||
curout &= ~(1 << itv->card->xceive_pin);
|
||||
write_reg(curout, IVTV_REG_GPIO_OUT);
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(1));
|
||||
+ schedule_msec_hrtimeout_interruptible((1));
|
||||
|
||||
curout |= 1 << itv->card->xceive_pin;
|
||||
write_reg(curout, IVTV_REG_GPIO_OUT);
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(1));
|
||||
+ schedule_msec_hrtimeout_interruptible((1));
|
||||
return 0;
|
||||
}
|
||||
|
||||
diff --git a/drivers/media/radio/radio-mr800.c b/drivers/media/radio/radio-mr800.c
|
||||
index c2927fd..bdee269 100644
|
||||
--- a/drivers/media/radio/radio-mr800.c
|
||||
+++ b/drivers/media/radio/radio-mr800.c
|
||||
@@ -382,7 +382,7 @@ static int vidioc_s_hw_freq_seek(struct file *file, void *priv,
|
||||
retval = -ENODATA;
|
||||
break;
|
||||
}
|
||||
- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
|
||||
+ if (schedule_msec_hrtimeout_interruptible((10))) {
|
||||
retval = -ERESTARTSYS;
|
||||
break;
|
||||
}
|
||||
diff --git a/drivers/media/radio/radio-tea5777.c b/drivers/media/radio/radio-tea5777.c
|
||||
index 83fe7ab..aaae5fa 100644
|
||||
--- a/drivers/media/radio/radio-tea5777.c
|
||||
+++ b/drivers/media/radio/radio-tea5777.c
|
||||
@@ -249,7 +249,7 @@ static int radio_tea5777_update_read_reg(struct radio_tea5777 *tea, int wait)
|
||||
}
|
||||
|
||||
if (wait) {
|
||||
- if (schedule_timeout_interruptible(msecs_to_jiffies(wait)))
|
||||
+ if (schedule_msec_hrtimeout_interruptible((wait)))
|
||||
return -ERESTARTSYS;
|
||||
}
|
||||
|
||||
diff --git a/drivers/media/radio/tea575x.c b/drivers/media/radio/tea575x.c
|
||||
index 4dc2067..29f4416 100644
|
||||
--- a/drivers/media/radio/tea575x.c
|
||||
+++ b/drivers/media/radio/tea575x.c
|
||||
@@ -416,7 +416,7 @@ int snd_tea575x_s_hw_freq_seek(struct file *file, struct snd_tea575x *tea,
|
||||
for (;;) {
|
||||
if (time_after(jiffies, timeout))
|
||||
break;
|
||||
- if (schedule_timeout_interruptible(msecs_to_jiffies(10))) {
|
||||
+ if (schedule_msec_hrtimeout_interruptible((10))) {
|
||||
/* some signal arrived, stop search */
|
||||
tea->val &= ~TEA575X_BIT_SEARCH;
|
||||
snd_tea575x_set_freq(tea);
|
||||
diff --git a/drivers/misc/panel.c b/drivers/misc/panel.c
|
||||
index 6030ac5..f0c1a101 100644
|
||||
--- a/drivers/misc/panel.c
|
||||
+++ b/drivers/misc/panel.c
|
||||
@@ -760,7 +760,7 @@ static void long_sleep(int ms)
|
||||
if (in_interrupt())
|
||||
mdelay(ms);
|
||||
else
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(ms));
|
||||
+ schedule_msec_hrtimeout_interruptible((ms));
|
||||
}
|
||||
|
||||
/*
|
||||
diff --git a/drivers/parport/ieee1284.c b/drivers/parport/ieee1284.c
|
||||
index f9fd4b3..00ad2f3 100644
|
||||
--- a/drivers/parport/ieee1284.c
|
||||
+++ b/drivers/parport/ieee1284.c
|
||||
@@ -215,7 +215,7 @@ int parport_wait_peripheral(struct parport *port,
|
||||
/* parport_wait_event didn't time out, but the
|
||||
* peripheral wasn't actually ready either.
|
||||
* Wait for another 10ms. */
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(10));
|
||||
+ schedule_msec_hrtimeout_interruptible((10));
|
||||
}
|
||||
}
|
||||
|
||||
diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c
|
||||
index c0e7d21..e1b4fd4 100644
|
||||
--- a/drivers/parport/ieee1284_ops.c
|
||||
+++ b/drivers/parport/ieee1284_ops.c
|
||||
@@ -536,7 +536,7 @@ size_t parport_ieee1284_ecp_read_data (struct parport *port,
|
||||
/* Yield the port for a while. */
|
||||
if (count && dev->port->irq != PARPORT_IRQ_NONE) {
|
||||
parport_release (dev);
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(40));
|
||||
+ schedule_msec_hrtimeout_interruptible((40));
|
||||
parport_claim_or_block (dev);
|
||||
}
|
||||
else
|
||||
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
|
||||
index 55663b3..0363fed 100644
|
||||
--- a/drivers/platform/x86/intel_ips.c
|
||||
+++ b/drivers/platform/x86/intel_ips.c
|
||||
@@ -812,7 +812,7 @@ static int ips_adjust(void *data)
|
||||
ips_gpu_lower(ips);
|
||||
|
||||
sleep:
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD));
|
||||
+ schedule_msec_hrtimeout_interruptible((IPS_ADJUST_PERIOD));
|
||||
} while (!kthread_should_stop());
|
||||
|
||||
dev_dbg(&ips->dev->dev, "ips-adjust thread stopped\n");
|
||||
@@ -991,7 +991,7 @@ static int ips_monitor(void *data)
|
||||
seqno_timestamp = get_jiffies_64();
|
||||
|
||||
old_cpu_power = thm_readl(THM_CEC);
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
|
||||
+ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
|
||||
|
||||
/* Collect an initial average */
|
||||
for (i = 0; i < IPS_SAMPLE_COUNT; i++) {
|
||||
@@ -1018,7 +1018,7 @@ static int ips_monitor(void *data)
|
||||
mchp_samples[i] = mchp;
|
||||
}
|
||||
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
|
||||
+ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
|
||||
if (kthread_should_stop())
|
||||
break;
|
||||
}
|
||||
@@ -1045,7 +1045,7 @@ static int ips_monitor(void *data)
|
||||
* us to reduce the sample frequency if the CPU and GPU are idle.
|
||||
*/
|
||||
old_cpu_power = thm_readl(THM_CEC);
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD));
|
||||
+ schedule_msec_hrtimeout_interruptible((IPS_SAMPLE_PERIOD));
|
||||
last_sample_period = IPS_SAMPLE_PERIOD;
|
||||
|
||||
setup_deferrable_timer_on_stack(&timer, monitor_timeout,
|
||||
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
|
||||
index 8e69ce4..0227415 100644
|
||||
--- a/net/core/pktgen.c
|
||||
+++ b/net/core/pktgen.c
|
||||
@@ -1992,7 +1992,7 @@ static void pktgen_mark_device(const struct pktgen_net *pn, const char *ifname)
|
||||
mutex_unlock(&pktgen_thread_lock);
|
||||
pr_debug("%s: waiting for %s to disappear....\n",
|
||||
__func__, ifname);
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(msec_per_try));
|
||||
+ schedule_msec_hrtimeout_interruptible((msec_per_try));
|
||||
mutex_lock(&pktgen_thread_lock);
|
||||
|
||||
if (++i >= max_tries) {
|
||||
diff --git a/sound/soc/codecs/wm8350.c b/sound/soc/codecs/wm8350.c
|
||||
index 2efc5b4..3e3248c 100644
|
||||
--- a/sound/soc/codecs/wm8350.c
|
||||
+++ b/sound/soc/codecs/wm8350.c
|
||||
@@ -236,10 +236,10 @@ static void wm8350_pga_work(struct work_struct *work)
|
||||
out2->ramp == WM8350_RAMP_UP) {
|
||||
/* delay is longer over 0dB as increases are larger */
|
||||
if (i >= WM8350_OUTn_0dB)
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies
|
||||
+ schedule_msec_hrtimeout_interruptible(
|
||||
(2));
|
||||
else
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies
|
||||
+ schedule_msec_hrtimeout_interruptible(
|
||||
(1));
|
||||
} else
|
||||
udelay(50); /* doesn't matter if we delay longer */
|
||||
@@ -1123,7 +1123,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
|
||||
(platform->dis_out4 << 6));
|
||||
|
||||
/* wait for discharge */
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies
|
||||
+ schedule_msec_hrtimeout_interruptible(
|
||||
(platform->
|
||||
cap_discharge_msecs));
|
||||
|
||||
@@ -1139,7 +1139,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
|
||||
WM8350_VBUFEN);
|
||||
|
||||
/* wait for vmid */
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies
|
||||
+ schedule_msec_hrtimeout_interruptible(
|
||||
(platform->
|
||||
vmid_charge_msecs));
|
||||
|
||||
@@ -1190,7 +1190,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
|
||||
wm8350_reg_write(wm8350, WM8350_POWER_MGMT_1, pm1);
|
||||
|
||||
/* wait */
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies
|
||||
+ schedule_msec_hrtimeout_interruptible(
|
||||
(platform->
|
||||
vmid_discharge_msecs));
|
||||
|
||||
@@ -1208,7 +1208,7 @@ static int wm8350_set_bias_level(struct snd_soc_codec *codec,
|
||||
pm1 | WM8350_OUTPUT_DRAIN_EN);
|
||||
|
||||
/* wait */
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies
|
||||
+ schedule_msec_hrtimeout_interruptible(
|
||||
(platform->drain_msecs));
|
||||
|
||||
pm1 &= ~WM8350_BIASEN;
|
||||
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
|
||||
index c77b49a..fc50456 100644
|
||||
--- a/sound/soc/codecs/wm8900.c
|
||||
+++ b/sound/soc/codecs/wm8900.c
|
||||
@@ -1112,7 +1112,7 @@ static int wm8900_set_bias_level(struct snd_soc_codec *codec,
|
||||
/* Need to let things settle before stopping the clock
|
||||
* to ensure that restart works, see "Stopping the
|
||||
* master clock" in the datasheet. */
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(1));
|
||||
+ schedule_msec_hrtimeout_interruptible((1));
|
||||
snd_soc_write(codec, WM8900_REG_POWER2,
|
||||
WM8900_REG_POWER2_SYSCLK_ENA);
|
||||
break;
|
||||
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
|
||||
index 7e48221..0c85a20 100644
|
||||
--- a/sound/soc/codecs/wm9713.c
|
||||
+++ b/sound/soc/codecs/wm9713.c
|
||||
@@ -199,7 +199,7 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
|
||||
|
||||
/* Gracefully shut down the voice interface. */
|
||||
snd_soc_update_bits(codec, AC97_HANDSET_RATE, 0x0f00, 0x0200);
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(1));
|
||||
+ schedule_msec_hrtimeout_interruptible((1));
|
||||
snd_soc_update_bits(codec, AC97_HANDSET_RATE, 0x0f00, 0x0f00);
|
||||
snd_soc_update_bits(codec, AC97_EXTENDED_MID, 0x1000, 0x1000);
|
||||
|
||||
@@ -868,7 +868,7 @@ static int wm9713_set_pll(struct snd_soc_codec *codec,
|
||||
wm9713->pll_in = freq_in;
|
||||
|
||||
/* wait 10ms AC97 link frames for the link to stabilise */
|
||||
- schedule_timeout_interruptible(msecs_to_jiffies(10));
|
||||
+ schedule_msec_hrtimeout_interruptible((10));
|
||||
return 0;
|
||||
}
|
||||
|
||||
--
|
||||
2.9.3
|
||||
|
|
@ -0,0 +1,160 @@
|
|||
From 1137ff2bfa5eb63b53747fe303fdb3937c5e1077 Mon Sep 17 00:00:00 2001
|
||||
From: Con Kolivas <kernel@kolivas.org>
|
||||
Date: Mon, 20 Feb 2017 13:30:32 +1100
|
||||
Subject: [PATCH 15/25] Replace all calls to schedule_timeout_uninterruptible
|
||||
of potentially under 50ms to use schedule_msec_hrtimeout_uninterruptible
|
||||
|
||||
---
|
||||
drivers/media/pci/cx18/cx18-gpio.c | 4 ++--
|
||||
drivers/net/wireless/intel/ipw2x00/ipw2100.c | 4 ++--
|
||||
drivers/rtc/rtc-wm8350.c | 6 +++---
|
||||
drivers/scsi/lpfc/lpfc_scsi.c | 2 +-
|
||||
sound/pci/maestro3.c | 4 ++--
|
||||
sound/soc/codecs/rt5631.c | 4 ++--
|
||||
sound/soc/soc-dapm.c | 2 +-
|
||||
7 files changed, 13 insertions(+), 13 deletions(-)
|
||||
|
||||
diff --git a/drivers/media/pci/cx18/cx18-gpio.c b/drivers/media/pci/cx18/cx18-gpio.c
|
||||
index 38dc6b8..3cd3098 100644
|
||||
--- a/drivers/media/pci/cx18/cx18-gpio.c
|
||||
+++ b/drivers/media/pci/cx18/cx18-gpio.c
|
||||
@@ -95,11 +95,11 @@ static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi,
|
||||
|
||||
/* Assert */
|
||||
gpio_update(cx, mask, ~active_lo);
|
||||
- schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs));
|
||||
+ schedule_msec_hrtimeout_uninterruptible((assert_msecs));
|
||||
|
||||
/* Deassert */
|
||||
gpio_update(cx, mask, ~active_hi);
|
||||
- schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs));
|
||||
+ schedule_msec_hrtimeout_uninterruptible((recovery_msecs));
|
||||
}
|
||||
|
||||
/*
|
||||
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
|
||||
index 356aba9..d2cc761 100644
|
||||
--- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c
|
||||
+++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c
|
||||
@@ -830,7 +830,7 @@ static int ipw2100_hw_send_command(struct ipw2100_priv *priv,
|
||||
* doesn't seem to have as many firmware restart cycles...
|
||||
*
|
||||
* As a test, we're sticking in a 1/100s delay here */
|
||||
- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
|
||||
+ schedule_msec_hrtimeout_uninterruptible((10));
|
||||
|
||||
return 0;
|
||||
|
||||
@@ -1281,7 +1281,7 @@ static int ipw2100_start_adapter(struct ipw2100_priv *priv)
|
||||
IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n");
|
||||
i = 5000;
|
||||
do {
|
||||
- schedule_timeout_uninterruptible(msecs_to_jiffies(40));
|
||||
+ schedule_msec_hrtimeout_uninterruptible((40));
|
||||
/* Todo... wait for sync command ... */
|
||||
|
||||
read_register(priv->net_dev, IPW_REG_INTA, &inta);
|
||||
diff --git a/drivers/rtc/rtc-wm8350.c b/drivers/rtc/rtc-wm8350.c
|
||||
index fa247de..f1a28d8 100644
|
||||
--- a/drivers/rtc/rtc-wm8350.c
|
||||
+++ b/drivers/rtc/rtc-wm8350.c
|
||||
@@ -121,7 +121,7 @@ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm)
|
||||
/* Wait until confirmation of stopping */
|
||||
do {
|
||||
rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
|
||||
- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
|
||||
+ schedule_msec_hrtimeout_uninterruptible((1));
|
||||
} while (--retries && !(rtc_ctrl & WM8350_RTC_STS));
|
||||
|
||||
if (!retries) {
|
||||
@@ -204,7 +204,7 @@ static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350)
|
||||
/* Wait until confirmation of stopping */
|
||||
do {
|
||||
rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
|
||||
- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
|
||||
+ schedule_msec_hrtimeout_uninterruptible((1));
|
||||
} while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS));
|
||||
|
||||
if (!(rtc_ctrl & WM8350_RTC_ALMSTS))
|
||||
@@ -227,7 +227,7 @@ static int wm8350_rtc_start_alarm(struct wm8350 *wm8350)
|
||||
/* Wait until confirmation */
|
||||
do {
|
||||
rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL);
|
||||
- schedule_timeout_uninterruptible(msecs_to_jiffies(1));
|
||||
+ schedule_msec_hrtimeout_uninterruptible((1));
|
||||
} while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS);
|
||||
|
||||
if (rtc_ctrl & WM8350_RTC_ALMSTS)
|
||||
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
|
||||
index ad350d9..69a58a8 100644
|
||||
--- a/drivers/scsi/lpfc/lpfc_scsi.c
|
||||
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
|
||||
@@ -5109,7 +5109,7 @@ lpfc_reset_flush_io_context(struct lpfc_vport *vport, uint16_t tgt_id,
|
||||
tgt_id, lun_id, context);
|
||||
later = msecs_to_jiffies(2 * vport->cfg_devloss_tmo * 1000) + jiffies;
|
||||
while (time_after(later, jiffies) && cnt) {
|
||||
- schedule_timeout_uninterruptible(msecs_to_jiffies(20));
|
||||
+ schedule_msec_hrtimeout_uninterruptible((20));
|
||||
cnt = lpfc_sli_sum_iocb(vport, tgt_id, lun_id, context);
|
||||
}
|
||||
if (cnt) {
|
||||
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
|
||||
index cafea6d..d374514 100644
|
||||
--- a/sound/pci/maestro3.c
|
||||
+++ b/sound/pci/maestro3.c
|
||||
@@ -2016,7 +2016,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
|
||||
outw(0, io + GPIO_DATA);
|
||||
outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION);
|
||||
|
||||
- schedule_timeout_uninterruptible(msecs_to_jiffies(delay1));
|
||||
+ schedule_msec_hrtimeout_uninterruptible((delay1));
|
||||
|
||||
outw(GPO_PRIMARY_AC97, io + GPIO_DATA);
|
||||
udelay(5);
|
||||
@@ -2024,7 +2024,7 @@ static void snd_m3_ac97_reset(struct snd_m3 *chip)
|
||||
outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A);
|
||||
outw(~0, io + GPIO_MASK);
|
||||
|
||||
- schedule_timeout_uninterruptible(msecs_to_jiffies(delay2));
|
||||
+ schedule_msec_hrtimeout_uninterruptible((delay2));
|
||||
|
||||
if (! snd_m3_try_read_vendor(chip))
|
||||
break;
|
||||
diff --git a/sound/soc/codecs/rt5631.c b/sound/soc/codecs/rt5631.c
|
||||
index 0e41808..611cb9f 100644
|
||||
--- a/sound/soc/codecs/rt5631.c
|
||||
+++ b/sound/soc/codecs/rt5631.c
|
||||
@@ -419,7 +419,7 @@ static void onebit_depop_mute_stage(struct snd_soc_codec *codec, int enable)
|
||||
hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2);
|
||||
snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
|
||||
if (enable) {
|
||||
- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
|
||||
+ schedule_msec_hrtimeout_uninterruptible((10));
|
||||
/* config one-bit depop parameter */
|
||||
rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x307f);
|
||||
snd_soc_update_bits(codec, RT5631_HP_OUT_VOL,
|
||||
@@ -529,7 +529,7 @@ static void depop_seq_mute_stage(struct snd_soc_codec *codec, int enable)
|
||||
hp_zc = snd_soc_read(codec, RT5631_INT_ST_IRQ_CTRL_2);
|
||||
snd_soc_write(codec, RT5631_INT_ST_IRQ_CTRL_2, hp_zc & 0xf7ff);
|
||||
if (enable) {
|
||||
- schedule_timeout_uninterruptible(msecs_to_jiffies(10));
|
||||
+ schedule_msec_hrtimeout_uninterruptible((10));
|
||||
|
||||
/* config depop sequence parameter */
|
||||
rt5631_write_index(codec, RT5631_SPK_INTL_CTRL, 0x302f);
|
||||
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
|
||||
index 27dd02e..7ba49f4 100644
|
||||
--- a/sound/soc/soc-dapm.c
|
||||
+++ b/sound/soc/soc-dapm.c
|
||||
@@ -134,7 +134,7 @@ static void dapm_assert_locked(struct snd_soc_dapm_context *dapm)
|
||||
static void pop_wait(u32 pop_time)
|
||||
{
|
||||
if (pop_time)
|
||||
- schedule_timeout_uninterruptible(msecs_to_jiffies(pop_time));
|
||||
+ schedule_msec_hrtimeout_uninterruptible((pop_time));
|
||||
}
|
||||
|
||||
static void pop_dbg(struct device *dev, u32 pop_time, const char *fmt, ...)
|
||||
--
|
||||
2.9.3
|
||||
|
|
@ -0,0 +1,84 @@
|
|||
From 37496baeea800e745a77620e90660496135f7fa5 Mon Sep 17 00:00:00 2001
|
||||
From: Con Kolivas <kernel@kolivas.org>
|
||||
Date: Mon, 20 Feb 2017 13:31:42 +1100
|
||||
Subject: [PATCH 16/25] Fix build for disabled highres timers with hrtimeout
|
||||
code.
|
||||
|
||||
---
|
||||
include/linux/freezer.h | 1 +
|
||||
include/linux/sched.h | 22 ++++++++++++++++++++++
|
||||
kernel/time/timer.c | 2 ++
|
||||
3 files changed, 25 insertions(+)
|
||||
|
||||
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
|
||||
index dd03e83..2fda682 100644
|
||||
--- a/include/linux/freezer.h
|
||||
+++ b/include/linux/freezer.h
|
||||
@@ -296,6 +296,7 @@ static inline void set_freezable(void) {}
|
||||
#define wait_event_freezekillable_unsafe(wq, condition) \
|
||||
wait_event_killable(wq, condition)
|
||||
|
||||
+#define pm_freezing (false)
|
||||
#endif /* !CONFIG_FREEZER */
|
||||
|
||||
#endif /* FREEZER_H_INCLUDED */
|
||||
diff --git a/include/linux/sched.h b/include/linux/sched.h
|
||||
index 46544f4..680494d 100644
|
||||
--- a/include/linux/sched.h
|
||||
+++ b/include/linux/sched.h
|
||||
@@ -458,10 +458,32 @@ extern signed long schedule_timeout_killable(signed long timeout);
|
||||
extern signed long schedule_timeout_uninterruptible(signed long timeout);
|
||||
extern signed long schedule_timeout_idle(signed long timeout);
|
||||
|
||||
+#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
extern signed long schedule_msec_hrtimeout(signed long timeout);
|
||||
extern signed long schedule_min_hrtimeout(void);
|
||||
extern signed long schedule_msec_hrtimeout_interruptible(signed long timeout);
|
||||
extern signed long schedule_msec_hrtimeout_uninterruptible(signed long timeout);
|
||||
+#else
|
||||
+static inline signed long schedule_msec_hrtimeout(signed long timeout)
|
||||
+{
|
||||
+ return schedule_timeout(msecs_to_jiffies(timeout));
|
||||
+}
|
||||
+
|
||||
+static inline signed long schedule_min_hrtimeout(void)
|
||||
+{
|
||||
+ return schedule_timeout(1);
|
||||
+}
|
||||
+
|
||||
+static inline signed long schedule_msec_hrtimeout_interruptible(signed long timeout)
|
||||
+{
|
||||
+ return schedule_timeout_interruptible(msecs_to_jiffies(timeout));
|
||||
+}
|
||||
+
|
||||
+static inline signed long schedule_msec_hrtimeout_uninterruptible(signed long timeout)
|
||||
+{
|
||||
+ return schedule_timeout_uninterruptible(msecs_to_jiffies(timeout));
|
||||
+}
|
||||
+#endif
|
||||
|
||||
asmlinkage void schedule(void);
|
||||
extern void schedule_preempt_disabled(void);
|
||||
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
|
||||
index bc53598..0e22641 100644
|
||||
--- a/kernel/time/timer.c
|
||||
+++ b/kernel/time/timer.c
|
||||
@@ -1750,6 +1750,7 @@ signed long __sched schedule_timeout(signed long timeout)
|
||||
|
||||
expire = timeout + jiffies;
|
||||
|
||||
+#ifdef CONFIG_HIGH_RES_TIMERS
|
||||
if (timeout == 1 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
|
||||
/*
|
||||
* Special case 1 as being a request for the minimum timeout
|
||||
@@ -1760,6 +1761,7 @@ signed long __sched schedule_timeout(signed long timeout)
|
||||
return 0;
|
||||
goto out_timeout;
|
||||
}
|
||||
+#endif
|
||||
|
||||
setup_timer_on_stack(&timer, process_timeout, (unsigned long)current);
|
||||
__mod_timer(&timer, expire, false);
|
||||
--
|
||||
2.9.3
|
||||
|
|
@ -0,0 +1,61 @@
|
|||
From a0d29f014dbcb29649dff1a9b8df58bad7be3926 Mon Sep 17 00:00:00 2001
|
||||
From: Con Kolivas <kernel@kolivas.org>
|
||||
Date: Wed, 7 Dec 2016 21:13:16 +1100
|
||||
Subject: [PATCH 18/25] Make threaded IRQs optionally the default which can be
|
||||
disabled.
|
||||
|
||||
---
|
||||
kernel/irq/Kconfig | 14 ++++++++++++++
|
||||
kernel/irq/manage.c | 10 ++++++++++
|
||||
2 files changed, 24 insertions(+)
|
||||
|
||||
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
|
||||
index 3bbfd6a..351bf16 100644
|
||||
--- a/kernel/irq/Kconfig
|
||||
+++ b/kernel/irq/Kconfig
|
||||
@@ -95,6 +95,20 @@ config IRQ_DOMAIN_DEBUG
|
||||
config IRQ_FORCED_THREADING
|
||||
bool
|
||||
|
||||
+config FORCE_IRQ_THREADING
|
||||
+ bool "Make IRQ threading compulsory"
|
||||
+ depends on IRQ_FORCED_THREADING
|
||||
+ default y
|
||||
+ ---help---
|
||||
+
|
||||
+ Make IRQ threading mandatory for any IRQ handlers that support it
|
||||
+ instead of being optional and requiring the threadirqs kernel
|
||||
+ parameter. Instead they can be optionally disabled with the
|
||||
+ nothreadirqs kernel parameter.
|
||||
+
|
||||
+ Enable if you are building for a desktop or low latency system,
|
||||
+ otherwise say N.
|
||||
+
|
||||
config SPARSE_IRQ
|
||||
bool "Support sparse irq numbering" if MAY_HAVE_SPARSE_IRQ
|
||||
---help---
|
||||
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
|
||||
index 6b66959..6b3fb17 100644
|
||||
--- a/kernel/irq/manage.c
|
||||
+++ b/kernel/irq/manage.c
|
||||
@@ -22,7 +22,17 @@
|
||||
#include "internals.h"
|
||||
|
||||
#ifdef CONFIG_IRQ_FORCED_THREADING
|
||||
+#ifdef CONFIG_FORCE_IRQ_THREADING
|
||||
+__read_mostly bool force_irqthreads = true;
|
||||
+#else
|
||||
__read_mostly bool force_irqthreads;
|
||||
+#endif
|
||||
+static int __init setup_noforced_irqthreads(char *arg)
|
||||
+{
|
||||
+ force_irqthreads = false;
|
||||
+ return 0;
|
||||
+}
|
||||
+early_param("nothreadirqs", setup_noforced_irqthreads);
|
||||
|
||||
static int __init setup_forced_irqthreads(char *arg)
|
||||
{
|
||||
--
|
||||
2.9.3
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
From a278cad439033005610ddda23882f2c681c669d1 Mon Sep 17 00:00:00 2001
|
||||
From: Con Kolivas <kernel@kolivas.org>
|
||||
Date: Mon, 20 Feb 2017 13:32:58 +1100
|
||||
Subject: [PATCH 20/25] Don't use hrtimer overlay when pm_freezing since some
|
||||
drivers still don't correctly use freezable timeouts.
|
||||
|
||||
---
|
||||
kernel/time/hrtimer.c | 2 +-
|
||||
kernel/time/timer.c | 9 +++++----
|
||||
2 files changed, 6 insertions(+), 5 deletions(-)
|
||||
|
||||
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
|
||||
index 26ac1f8..25e1555 100644
|
||||
--- a/kernel/time/hrtimer.c
|
||||
+++ b/kernel/time/hrtimer.c
|
||||
@@ -1818,7 +1818,7 @@ signed long __sched schedule_msec_hrtimeout(signed long timeout)
|
||||
* (yet) better than Hz, as would occur during startup, use regular
|
||||
* timers.
|
||||
*/
|
||||
- if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ)
|
||||
+ if (jiffs > 4 || hrtimer_resolution >= NSEC_PER_SEC / HZ || pm_freezing)
|
||||
return schedule_timeout(jiffs);
|
||||
|
||||
delta = (timeout % 1000) * NSEC_PER_MSEC;
|
||||
diff --git a/kernel/time/timer.c b/kernel/time/timer.c
|
||||
index 0e22641..45a6e1f 100644
|
||||
--- a/kernel/time/timer.c
|
||||
+++ b/kernel/time/timer.c
|
||||
@@ -42,6 +42,7 @@
|
||||
#include <linux/sched/sysctl.h>
|
||||
#include <linux/slab.h>
|
||||
#include <linux/compat.h>
|
||||
+#include <linux/freezer.h>
|
||||
|
||||
#include <linux/uaccess.h>
|
||||
#include <asm/unistd.h>
|
||||
@@ -1901,12 +1902,12 @@ void msleep(unsigned int msecs)
|
||||
* Use high resolution timers where the resolution of tick based
|
||||
* timers is inadequate.
|
||||
*/
|
||||
- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
|
||||
+ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
|
||||
while (msecs)
|
||||
msecs = schedule_msec_hrtimeout_uninterruptible(msecs);
|
||||
return;
|
||||
}
|
||||
- timeout = msecs_to_jiffies(msecs) + 1;
|
||||
+ timeout = jiffs + 1;
|
||||
|
||||
while (timeout)
|
||||
timeout = schedule_timeout_uninterruptible(timeout);
|
||||
@@ -1923,12 +1924,12 @@ unsigned long msleep_interruptible(unsigned int msecs)
|
||||
int jiffs = msecs_to_jiffies(msecs);
|
||||
unsigned long timeout;
|
||||
|
||||
- if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ) {
|
||||
+ if (jiffs < 5 && hrtimer_resolution < NSEC_PER_SEC / HZ && !pm_freezing) {
|
||||
while (msecs && !signal_pending(current))
|
||||
msecs = schedule_msec_hrtimeout_interruptible(msecs);
|
||||
return msecs;
|
||||
}
|
||||
- timeout = msecs_to_jiffies(msecs) + 1;
|
||||
+ timeout = jiffs + 1;
|
||||
|
||||
while (timeout && !signal_pending(current))
|
||||
timeout = schedule_timeout_interruptible(timeout);
|
||||
--
|
||||
2.9.3
|
||||
|
|
@ -0,0 +1,34 @@
|
|||
From da915e0f3abeb61f6a132bb77b7d0a9bf0573233 Mon Sep 17 00:00:00 2001
|
||||
From: Con Kolivas <kernel@kolivas.org>
|
||||
Date: Mon, 20 Feb 2017 13:38:23 +1100
|
||||
Subject: [PATCH 21/25] Make writeback throttling default enabled.
|
||||
|
||||
---
|
||||
block/Kconfig | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/block/Kconfig b/block/Kconfig
|
||||
index 8bf114a..83e6f9d 100644
|
||||
--- a/block/Kconfig
|
||||
+++ b/block/Kconfig
|
||||
@@ -123,7 +123,7 @@ config BLK_CMDLINE_PARSER
|
||||
|
||||
config BLK_WBT
|
||||
bool "Enable support for block device writeback throttling"
|
||||
- default n
|
||||
+ default y
|
||||
---help---
|
||||
Enabling this option enables the block layer to throttle buffered
|
||||
background writeback from the VM, making it more smooth and having
|
||||
@@ -133,7 +133,7 @@ config BLK_WBT
|
||||
|
||||
config BLK_WBT_SQ
|
||||
bool "Single queue writeback throttling"
|
||||
- default n
|
||||
+ default y
|
||||
depends on BLK_WBT
|
||||
---help---
|
||||
Enable writeback throttling by default on legacy single queue devices
|
||||
--
|
||||
2.9.3
|
||||
|
|
@ -0,0 +1,43 @@
|
|||
From 2f96168f72bbd431c0e6d28b44393e98b49ca787 Mon Sep 17 00:00:00 2001
|
||||
From: Con Kolivas <kernel@kolivas.org>
|
||||
Date: Mon, 20 Feb 2017 13:48:54 +1100
|
||||
Subject: [PATCH 22/25] Swap sucks.
|
||||
|
||||
---
|
||||
include/linux/swap.h | 6 +-----
|
||||
mm/vmscan.c | 2 +-
|
||||
2 files changed, 2 insertions(+), 6 deletions(-)
|
||||
|
||||
diff --git a/include/linux/swap.h b/include/linux/swap.h
|
||||
index 7f47b70..1c2ed28 100644
|
||||
--- a/include/linux/swap.h
|
||||
+++ b/include/linux/swap.h
|
||||
@@ -361,11 +361,7 @@ extern struct page *swapin_readahead(swp_entry_t, gfp_t,
|
||||
extern atomic_long_t nr_swap_pages;
|
||||
extern long total_swap_pages;
|
||||
|
||||
-/* Swap 50% full? Release swapcache more aggressively.. */
|
||||
-static inline bool vm_swap_full(void)
|
||||
-{
|
||||
- return atomic_long_read(&nr_swap_pages) * 2 < total_swap_pages;
|
||||
-}
|
||||
+#define vm_swap_full() 1
|
||||
|
||||
static inline long get_nr_swap_pages(void)
|
||||
{
|
||||
diff --git a/mm/vmscan.c b/mm/vmscan.c
|
||||
index 532a2a7..15e4260 100644
|
||||
--- a/mm/vmscan.c
|
||||
+++ b/mm/vmscan.c
|
||||
@@ -141,7 +141,7 @@ struct scan_control {
|
||||
/*
|
||||
* From 0 .. 100. Higher means more swappy.
|
||||
*/
|
||||
-int vm_swappiness = 60;
|
||||
+int vm_swappiness = 33;
|
||||
/*
|
||||
* The total number of pages which are beyond the high watermark within all
|
||||
* zones.
|
||||
--
|
||||
2.9.3
|
||||
|
|
@ -0,0 +1,533 @@
|
|||
WARNING
|
||||
This patch works with gcc versions 4.9+ and with kernel version 3.15+ and should
|
||||
NOT be applied when compiling on older versions of gcc due to key name changes
|
||||
of the march flags introduced with the version 4.9 release of gcc.[1]
|
||||
|
||||
Use the older version of this patch hosted on the same github for older
|
||||
versions of gcc.
|
||||
|
||||
FEATURES
|
||||
This patch adds additional CPU options to the Linux kernel accessible under:
|
||||
Processor type and features --->
|
||||
Processor family --->
|
||||
|
||||
The expanded microarchitectures include:
|
||||
* AMD Improved K8-family
|
||||
* AMD K10-family
|
||||
* AMD Family 10h (Barcelona)
|
||||
* AMD Family 14h (Bobcat)
|
||||
* AMD Family 16h (Jaguar)
|
||||
* AMD Family 15h (Bulldozer)
|
||||
* AMD Family 15h (Piledriver)
|
||||
* AMD Family 15h (Steamroller)
|
||||
* AMD Family 15h (Excavator)
|
||||
* AMD Family 17h (Zen)
|
||||
* Intel Silvermont low-power processors
|
||||
* Intel 1st Gen Core i3/i5/i7 (Nehalem)
|
||||
* Intel 1.5 Gen Core i3/i5/i7 (Westmere)
|
||||
* Intel 2nd Gen Core i3/i5/i7 (Sandybridge)
|
||||
* Intel 3rd Gen Core i3/i5/i7 (Ivybridge)
|
||||
* Intel 4th Gen Core i3/i5/i7 (Haswell)
|
||||
* Intel 5th Gen Core i3/i5/i7 (Broadwell)
|
||||
* Intel 6th Gen Core i3/i5.i7 (Skylake)
|
||||
|
||||
It also offers to compile passing the 'native' option which, "selects the CPU
|
||||
to generate code for at compilation time by determining the processor type of
|
||||
the compiling machine. Using -march=native enables all instruction subsets
|
||||
supported by the local machine and will produce code optimized for the local
|
||||
machine under the constraints of the selected instruction set."[3]
|
||||
|
||||
MINOR NOTES
|
||||
This patch also changes 'atom' to 'bonnell' in accordance with the gcc v4.9
|
||||
changes. Note that upstream is using the deprecated 'match=atom' flags when I
|
||||
believe it should use the newer 'march=bonnell' flag for atom processors.[2]
|
||||
|
||||
It is not recommended to compile on Atom-CPUs with the 'native' option.[4] The
|
||||
recommendation is use to the 'atom' option instead.
|
||||
|
||||
BENEFITS
|
||||
Small but real speed increases are measurable using a make endpoint comparing
|
||||
a generic kernel to one built with one of the respective microarchs.
|
||||
|
||||
See the following experimental evidence supporting this statement:
|
||||
https://github.com/graysky2/kernel_gcc_patch
|
||||
|
||||
REQUIREMENTS
|
||||
linux version >=3.15
|
||||
gcc version >=4.9
|
||||
|
||||
ACKNOWLEDGMENTS
|
||||
This patch builds on the seminal work by Jeroen.[5]
|
||||
|
||||
REFERENCES
|
||||
1. https://gcc.gnu.org/gcc-4.9/changes.html
|
||||
2. https://bugzilla.kernel.org/show_bug.cgi?id=77461
|
||||
3. https://gcc.gnu.org/onlinedocs/gcc/x86-Options.html
|
||||
4. https://github.com/graysky2/kernel_gcc_patch/issues/15
|
||||
5. http://www.linuxforge.net/docs/linux/linux-gcc.php
|
||||
|
||||
--- a/arch/x86/include/asm/module.h 2016-12-11 14:17:54.000000000 -0500
|
||||
+++ b/arch/x86/include/asm/module.h 2017-01-06 20:44:36.602227264 -0500
|
||||
@@ -15,6 +15,24 @@
|
||||
#define MODULE_PROC_FAMILY "586MMX "
|
||||
#elif defined CONFIG_MCORE2
|
||||
#define MODULE_PROC_FAMILY "CORE2 "
|
||||
+#elif defined CONFIG_MNATIVE
|
||||
+#define MODULE_PROC_FAMILY "NATIVE "
|
||||
+#elif defined CONFIG_MNEHALEM
|
||||
+#define MODULE_PROC_FAMILY "NEHALEM "
|
||||
+#elif defined CONFIG_MWESTMERE
|
||||
+#define MODULE_PROC_FAMILY "WESTMERE "
|
||||
+#elif defined CONFIG_MSILVERMONT
|
||||
+#define MODULE_PROC_FAMILY "SILVERMONT "
|
||||
+#elif defined CONFIG_MSANDYBRIDGE
|
||||
+#define MODULE_PROC_FAMILY "SANDYBRIDGE "
|
||||
+#elif defined CONFIG_MIVYBRIDGE
|
||||
+#define MODULE_PROC_FAMILY "IVYBRIDGE "
|
||||
+#elif defined CONFIG_MHASWELL
|
||||
+#define MODULE_PROC_FAMILY "HASWELL "
|
||||
+#elif defined CONFIG_MBROADWELL
|
||||
+#define MODULE_PROC_FAMILY "BROADWELL "
|
||||
+#elif defined CONFIG_MSKYLAKE
|
||||
+#define MODULE_PROC_FAMILY "SKYLAKE "
|
||||
#elif defined CONFIG_MATOM
|
||||
#define MODULE_PROC_FAMILY "ATOM "
|
||||
#elif defined CONFIG_M686
|
||||
@@ -33,6 +51,26 @@
|
||||
#define MODULE_PROC_FAMILY "K7 "
|
||||
#elif defined CONFIG_MK8
|
||||
#define MODULE_PROC_FAMILY "K8 "
|
||||
+#elif defined CONFIG_MK8SSE3
|
||||
+#define MODULE_PROC_FAMILY "K8SSE3 "
|
||||
+#elif defined CONFIG_MK10
|
||||
+#define MODULE_PROC_FAMILY "K10 "
|
||||
+#elif defined CONFIG_MBARCELONA
|
||||
+#define MODULE_PROC_FAMILY "BARCELONA "
|
||||
+#elif defined CONFIG_MBOBCAT
|
||||
+#define MODULE_PROC_FAMILY "BOBCAT "
|
||||
+#elif defined CONFIG_MBULLDOZER
|
||||
+#define MODULE_PROC_FAMILY "BULLDOZER "
|
||||
+#elif defined CONFIG_MPILEDRIVER
|
||||
+#define MODULE_PROC_FAMILY "PILEDRIVER "
|
||||
+#elif defined CONFIG_MSTEAMROLLER
|
||||
+#define MODULE_PROC_FAMILY "STEAMROLLER "
|
||||
+#elif defined CONFIG_MJAGUAR
|
||||
+#define MODULE_PROC_FAMILY "JAGUAR "
|
||||
+#elif defined CONFIG_MEXCAVATOR
|
||||
+#define MODULE_PROC_FAMILY "EXCAVATOR "
|
||||
+#elif defined CONFIG_MZEN
|
||||
+#define MODULE_PROC_FAMILY "ZEN "
|
||||
#elif defined CONFIG_MELAN
|
||||
#define MODULE_PROC_FAMILY "ELAN "
|
||||
#elif defined CONFIG_MCRUSOE
|
||||
--- a/arch/x86/Kconfig.cpu 2016-12-11 14:17:54.000000000 -0500
|
||||
+++ b/arch/x86/Kconfig.cpu 2017-01-06 20:46:14.004109597 -0500
|
||||
@@ -115,6 +115,7 @@ config MPENTIUMM
|
||||
config MPENTIUM4
|
||||
bool "Pentium-4/Celeron(P4-based)/Pentium-4 M/older Xeon"
|
||||
depends on X86_32
|
||||
+ select X86_P6_NOP
|
||||
---help---
|
||||
Select this for Intel Pentium 4 chips. This includes the
|
||||
Pentium 4, Pentium D, P4-based Celeron and Xeon, and
|
||||
@@ -147,9 +148,8 @@ config MPENTIUM4
|
||||
-Paxville
|
||||
-Dempsey
|
||||
|
||||
-
|
||||
config MK6
|
||||
- bool "K6/K6-II/K6-III"
|
||||
+ bool "AMD K6/K6-II/K6-III"
|
||||
depends on X86_32
|
||||
---help---
|
||||
Select this for an AMD K6-family processor. Enables use of
|
||||
@@ -157,7 +157,7 @@ config MK6
|
||||
flags to GCC.
|
||||
|
||||
config MK7
|
||||
- bool "Athlon/Duron/K7"
|
||||
+ bool "AMD Athlon/Duron/K7"
|
||||
depends on X86_32
|
||||
---help---
|
||||
Select this for an AMD Athlon K7-family processor. Enables use of
|
||||
@@ -165,12 +165,83 @@ config MK7
|
||||
flags to GCC.
|
||||
|
||||
config MK8
|
||||
- bool "Opteron/Athlon64/Hammer/K8"
|
||||
+ bool "AMD Opteron/Athlon64/Hammer/K8"
|
||||
---help---
|
||||
Select this for an AMD Opteron or Athlon64 Hammer-family processor.
|
||||
Enables use of some extended instructions, and passes appropriate
|
||||
optimization flags to GCC.
|
||||
|
||||
+config MK8SSE3
|
||||
+ bool "AMD Opteron/Athlon64/Hammer/K8 with SSE3"
|
||||
+ ---help---
|
||||
+ Select this for improved AMD Opteron or Athlon64 Hammer-family processors.
|
||||
+ Enables use of some extended instructions, and passes appropriate
|
||||
+ optimization flags to GCC.
|
||||
+
|
||||
+config MK10
|
||||
+ bool "AMD 61xx/7x50/PhenomX3/X4/II/K10"
|
||||
+ ---help---
|
||||
+ Select this for an AMD 61xx Eight-Core Magny-Cours, Athlon X2 7x50,
|
||||
+ Phenom X3/X4/II, Athlon II X2/X3/X4, or Turion II-family processor.
|
||||
+ Enables use of some extended instructions, and passes appropriate
|
||||
+ optimization flags to GCC.
|
||||
+
|
||||
+config MBARCELONA
|
||||
+ bool "AMD Barcelona"
|
||||
+ ---help---
|
||||
+ Select this for AMD Family 10h Barcelona processors.
|
||||
+
|
||||
+ Enables -march=barcelona
|
||||
+
|
||||
+config MBOBCAT
|
||||
+ bool "AMD Bobcat"
|
||||
+ ---help---
|
||||
+ Select this for AMD Family 14h Bobcat processors.
|
||||
+
|
||||
+ Enables -march=btver1
|
||||
+
|
||||
+config MJAGUAR
|
||||
+ bool "AMD Jaguar"
|
||||
+ ---help---
|
||||
+ Select this for AMD Family 16h Jaguar processors.
|
||||
+
|
||||
+ Enables -march=btver2
|
||||
+
|
||||
+config MBULLDOZER
|
||||
+ bool "AMD Bulldozer"
|
||||
+ ---help---
|
||||
+ Select this for AMD Family 15h Bulldozer processors.
|
||||
+
|
||||
+ Enables -march=bdver1
|
||||
+
|
||||
+config MPILEDRIVER
|
||||
+ bool "AMD Piledriver"
|
||||
+ ---help---
|
||||
+ Select this for AMD Family 15h Piledriver processors.
|
||||
+
|
||||
+ Enables -march=bdver2
|
||||
+
|
||||
+config MSTEAMROLLER
|
||||
+ bool "AMD Steamroller"
|
||||
+ ---help---
|
||||
+ Select this for AMD Family 15h Steamroller processors.
|
||||
+
|
||||
+ Enables -march=bdver3
|
||||
+
|
||||
+config MEXCAVATOR
|
||||
+ bool "AMD Excavator"
|
||||
+ ---help---
|
||||
+ Select this for AMD Family 15h Excavator processors.
|
||||
+
|
||||
+ Enables -march=bdver4
|
||||
+
|
||||
+config MZEN
|
||||
+ bool "AMD Zen"
|
||||
+ ---help---
|
||||
+ Select this for AMD Family 17h Zen processors.
|
||||
+
|
||||
+ Enables -march=znver1
|
||||
+
|
||||
config MCRUSOE
|
||||
bool "Crusoe"
|
||||
depends on X86_32
|
||||
@@ -252,6 +323,7 @@ config MVIAC7
|
||||
|
||||
config MPSC
|
||||
bool "Intel P4 / older Netburst based Xeon"
|
||||
+ select X86_P6_NOP
|
||||
depends on X86_64
|
||||
---help---
|
||||
Optimize for Intel Pentium 4, Pentium D and older Nocona/Dempsey
|
||||
@@ -261,8 +333,19 @@ config MPSC
|
||||
using the cpu family field
|
||||
in /proc/cpuinfo. Family 15 is an older Xeon, Family 6 a newer one.
|
||||
|
||||
+config MATOM
|
||||
+ bool "Intel Atom"
|
||||
+ select X86_P6_NOP
|
||||
+ ---help---
|
||||
+
|
||||
+ Select this for the Intel Atom platform. Intel Atom CPUs have an
|
||||
+ in-order pipelining architecture and thus can benefit from
|
||||
+ accordingly optimized code. Use a recent GCC with specific Atom
|
||||
+ support in order to fully benefit from selecting this option.
|
||||
+
|
||||
config MCORE2
|
||||
- bool "Core 2/newer Xeon"
|
||||
+ bool "Intel Core 2"
|
||||
+ select X86_P6_NOP
|
||||
---help---
|
||||
|
||||
Select this for Intel Core 2 and newer Core 2 Xeons (Xeon 51xx and
|
||||
@@ -270,14 +353,79 @@ config MCORE2
|
||||
family in /proc/cpuinfo. Newer ones have 6 and older ones 15
|
||||
(not a typo)
|
||||
|
||||
-config MATOM
|
||||
- bool "Intel Atom"
|
||||
+ Enables -march=core2
|
||||
+
|
||||
+config MNEHALEM
|
||||
+ bool "Intel Nehalem"
|
||||
+ select X86_P6_NOP
|
||||
---help---
|
||||
|
||||
- Select this for the Intel Atom platform. Intel Atom CPUs have an
|
||||
- in-order pipelining architecture and thus can benefit from
|
||||
- accordingly optimized code. Use a recent GCC with specific Atom
|
||||
- support in order to fully benefit from selecting this option.
|
||||
+ Select this for 1st Gen Core processors in the Nehalem family.
|
||||
+
|
||||
+ Enables -march=nehalem
|
||||
+
|
||||
+config MWESTMERE
|
||||
+ bool "Intel Westmere"
|
||||
+ select X86_P6_NOP
|
||||
+ ---help---
|
||||
+
|
||||
+ Select this for the Intel Westmere formerly Nehalem-C family.
|
||||
+
|
||||
+ Enables -march=westmere
|
||||
+
|
||||
+config MSILVERMONT
|
||||
+ bool "Intel Silvermont"
|
||||
+ select X86_P6_NOP
|
||||
+ ---help---
|
||||
+
|
||||
+ Select this for the Intel Silvermont platform.
|
||||
+
|
||||
+ Enables -march=silvermont
|
||||
+
|
||||
+config MSANDYBRIDGE
|
||||
+ bool "Intel Sandy Bridge"
|
||||
+ select X86_P6_NOP
|
||||
+ ---help---
|
||||
+
|
||||
+ Select this for 2nd Gen Core processors in the Sandy Bridge family.
|
||||
+
|
||||
+ Enables -march=sandybridge
|
||||
+
|
||||
+config MIVYBRIDGE
|
||||
+ bool "Intel Ivy Bridge"
|
||||
+ select X86_P6_NOP
|
||||
+ ---help---
|
||||
+
|
||||
+ Select this for 3rd Gen Core processors in the Ivy Bridge family.
|
||||
+
|
||||
+ Enables -march=ivybridge
|
||||
+
|
||||
+config MHASWELL
|
||||
+ bool "Intel Haswell"
|
||||
+ select X86_P6_NOP
|
||||
+ ---help---
|
||||
+
|
||||
+ Select this for 4th Gen Core processors in the Haswell family.
|
||||
+
|
||||
+ Enables -march=haswell
|
||||
+
|
||||
+config MBROADWELL
|
||||
+ bool "Intel Broadwell"
|
||||
+ select X86_P6_NOP
|
||||
+ ---help---
|
||||
+
|
||||
+ Select this for 5th Gen Core processors in the Broadwell family.
|
||||
+
|
||||
+ Enables -march=broadwell
|
||||
+
|
||||
+config MSKYLAKE
|
||||
+ bool "Intel Skylake"
|
||||
+ select X86_P6_NOP
|
||||
+ ---help---
|
||||
+
|
||||
+ Select this for 6th Gen Core processors in the Skylake family.
|
||||
+
|
||||
+ Enables -march=skylake
|
||||
|
||||
config GENERIC_CPU
|
||||
bool "Generic-x86-64"
|
||||
@@ -286,6 +434,19 @@ config GENERIC_CPU
|
||||
Generic x86-64 CPU.
|
||||
Run equally well on all x86-64 CPUs.
|
||||
|
||||
+config MNATIVE
|
||||
+ bool "Native optimizations autodetected by GCC"
|
||||
+ ---help---
|
||||
+
|
||||
+ GCC 4.2 and above support -march=native, which automatically detects
|
||||
+ the optimum settings to use based on your processor. -march=native
|
||||
+ also detects and applies additional settings beyond -march specific
|
||||
+ to your CPU, (eg. -msse4). Unless you have a specific reason not to
|
||||
+ (e.g. distcc cross-compiling), you should probably be using
|
||||
+ -march=native rather than anything listed below.
|
||||
+
|
||||
+ Enables -march=native
|
||||
+
|
||||
endchoice
|
||||
|
||||
config X86_GENERIC
|
||||
@@ -310,7 +471,7 @@ config X86_INTERNODE_CACHE_SHIFT
|
||||
config X86_L1_CACHE_SHIFT
|
||||
int
|
||||
default "7" if MPENTIUM4 || MPSC
|
||||
- default "6" if MK7 || MK8 || MPENTIUMM || MCORE2 || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
|
||||
+ default "6" if MK7 || MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MJAGUAR || MPENTIUMM || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE || MATOM || MVIAC7 || X86_GENERIC || GENERIC_CPU
|
||||
default "4" if MELAN || M486 || MGEODEGX1
|
||||
default "5" if MWINCHIP3D || MWINCHIPC6 || MCRUSOE || MEFFICEON || MCYRIXIII || MK6 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || MVIAC3_2 || MGEODE_LX
|
||||
|
||||
@@ -341,45 +502,46 @@ config X86_ALIGNMENT_16
|
||||
|
||||
config X86_INTEL_USERCOPY
|
||||
def_bool y
|
||||
- depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK7 || MEFFICEON || MCORE2
|
||||
+ depends on MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M586MMX || X86_GENERIC || MK8 || MK8SSE3 || MK7 || MEFFICEON || MCORE2 || MK10 || MBARCELONA || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE
|
||||
|
||||
config X86_USE_PPRO_CHECKSUM
|
||||
def_bool y
|
||||
- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MATOM
|
||||
+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MK7 || MK6 || MK10 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MEFFICEON || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MATOM || MNATIVE
|
||||
|
||||
config X86_USE_3DNOW
|
||||
def_bool y
|
||||
depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
|
||||
|
||||
-#
|
||||
-# P6_NOPs are a relatively minor optimization that require a family >=
|
||||
-# 6 processor, except that it is broken on certain VIA chips.
|
||||
-# Furthermore, AMD chips prefer a totally different sequence of NOPs
|
||||
-# (which work on all CPUs). In addition, it looks like Virtual PC
|
||||
-# does not understand them.
|
||||
-#
|
||||
-# As a result, disallow these if we're not compiling for X86_64 (these
|
||||
-# NOPs do work on all x86-64 capable chips); the list of processors in
|
||||
-# the right-hand clause are the cores that benefit from this optimization.
|
||||
-#
|
||||
config X86_P6_NOP
|
||||
- def_bool y
|
||||
- depends on X86_64
|
||||
- depends on (MCORE2 || MPENTIUM4 || MPSC)
|
||||
+ default n
|
||||
+ bool "Support for P6_NOPs on Intel chips"
|
||||
+ depends on (MCORE2 || MPENTIUM4 || MPSC || MATOM || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE)
|
||||
+ ---help---
|
||||
+ P6_NOPs are a relatively minor optimization that require a family >=
|
||||
+ 6 processor, except that it is broken on certain VIA chips.
|
||||
+ Furthermore, AMD chips prefer a totally different sequence of NOPs
|
||||
+ (which work on all CPUs). In addition, it looks like Virtual PC
|
||||
+ does not understand them.
|
||||
+
|
||||
+ As a result, disallow these if we're not compiling for X86_64 (these
|
||||
+ NOPs do work on all x86-64 capable chips); the list of processors in
|
||||
+ the right-hand clause are the cores that benefit from this optimization.
|
||||
+
|
||||
+ Say Y if you have Intel CPU newer than Pentium Pro, N otherwise.
|
||||
|
||||
config X86_TSC
|
||||
def_bool y
|
||||
- depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MATOM) || X86_64
|
||||
+ depends on (MWINCHIP3D || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MK8SSE3 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MNATIVE || MATOM) || X86_64
|
||||
|
||||
config X86_CMPXCHG64
|
||||
def_bool y
|
||||
- depends on X86_PAE || X86_64 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM
|
||||
+ depends on X86_PAE || X86_64 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MATOM || MNATIVE
|
||||
|
||||
# this should be set for all -march=.. options where the compiler
|
||||
# generates cmov.
|
||||
config X86_CMOV
|
||||
def_bool y
|
||||
- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
|
||||
+ depends on (MK8 || MK8SSE3 || MK10 || MBARCELONA || MBOBCAT || MBULLDOZER || MPILEDRIVER || MSTEAMROLLER || MEXCAVATOR || MZEN || MJAGUAR || MK7 || MCORE2 || MNEHALEM || MWESTMERE || MSILVERMONT || MSANDYBRIDGE || MIVYBRIDGE || MHASWELL || MBROADWELL || MSKYLAKE || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MNATIVE || MATOM || MGEODE_LX)
|
||||
|
||||
config X86_MINIMUM_CPU_FAMILY
|
||||
int
|
||||
--- a/arch/x86/Makefile 2016-12-11 14:17:54.000000000 -0500
|
||||
+++ b/arch/x86/Makefile 2017-01-06 20:44:36.603227283 -0500
|
||||
@@ -104,13 +104,40 @@ else
|
||||
KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
|
||||
|
||||
# FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
|
||||
+ cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
|
||||
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
|
||||
+ cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-mtune=k8)
|
||||
+ cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10)
|
||||
+ cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona)
|
||||
+ cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1)
|
||||
+ cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2)
|
||||
+ cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1)
|
||||
+ cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2)
|
||||
+ cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3)
|
||||
+ cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4)
|
||||
+ cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1)
|
||||
cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
|
||||
|
||||
cflags-$(CONFIG_MCORE2) += \
|
||||
- $(call cc-option,-march=core2,$(call cc-option,-mtune=generic))
|
||||
- cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom) \
|
||||
- $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
|
||||
+ $(call cc-option,-march=core2,$(call cc-option,-mtune=core2))
|
||||
+ cflags-$(CONFIG_MNEHALEM) += \
|
||||
+ $(call cc-option,-march=nehalem,$(call cc-option,-mtune=nehalem))
|
||||
+ cflags-$(CONFIG_MWESTMERE) += \
|
||||
+ $(call cc-option,-march=westmere,$(call cc-option,-mtune=westmere))
|
||||
+ cflags-$(CONFIG_MSILVERMONT) += \
|
||||
+ $(call cc-option,-march=silvermont,$(call cc-option,-mtune=silvermont))
|
||||
+ cflags-$(CONFIG_MSANDYBRIDGE) += \
|
||||
+ $(call cc-option,-march=sandybridge,$(call cc-option,-mtune=sandybridge))
|
||||
+ cflags-$(CONFIG_MIVYBRIDGE) += \
|
||||
+ $(call cc-option,-march=ivybridge,$(call cc-option,-mtune=ivybridge))
|
||||
+ cflags-$(CONFIG_MHASWELL) += \
|
||||
+ $(call cc-option,-march=haswell,$(call cc-option,-mtune=haswell))
|
||||
+ cflags-$(CONFIG_MBROADWELL) += \
|
||||
+ $(call cc-option,-march=broadwell,$(call cc-option,-mtune=broadwell))
|
||||
+ cflags-$(CONFIG_MSKYLAKE) += \
|
||||
+ $(call cc-option,-march=skylake,$(call cc-option,-mtune=skylake))
|
||||
+ cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell) \
|
||||
+ $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
|
||||
cflags-$(CONFIG_GENERIC_CPU) += $(call cc-option,-mtune=generic)
|
||||
KBUILD_CFLAGS += $(cflags-y)
|
||||
|
||||
--- a/arch/x86/Makefile_32.cpu 2016-12-11 14:17:54.000000000 -0500
|
||||
+++ b/arch/x86/Makefile_32.cpu 2017-01-06 20:44:36.603227283 -0500
|
||||
@@ -23,7 +23,18 @@ cflags-$(CONFIG_MK6) += -march=k6
|
||||
# Please note, that patches that add -march=athlon-xp and friends are pointless.
|
||||
# They make zero difference whatsosever to performance at this time.
|
||||
cflags-$(CONFIG_MK7) += -march=athlon
|
||||
+cflags-$(CONFIG_MNATIVE) += $(call cc-option,-march=native)
|
||||
cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8,-march=athlon)
|
||||
+cflags-$(CONFIG_MK8SSE3) += $(call cc-option,-march=k8-sse3,-march=athlon)
|
||||
+cflags-$(CONFIG_MK10) += $(call cc-option,-march=amdfam10,-march=athlon)
|
||||
+cflags-$(CONFIG_MBARCELONA) += $(call cc-option,-march=barcelona,-march=athlon)
|
||||
+cflags-$(CONFIG_MBOBCAT) += $(call cc-option,-march=btver1,-march=athlon)
|
||||
+cflags-$(CONFIG_MJAGUAR) += $(call cc-option,-march=btver2,-march=athlon)
|
||||
+cflags-$(CONFIG_MBULLDOZER) += $(call cc-option,-march=bdver1,-march=athlon)
|
||||
+cflags-$(CONFIG_MPILEDRIVER) += $(call cc-option,-march=bdver2,-march=athlon)
|
||||
+cflags-$(CONFIG_MSTEAMROLLER) += $(call cc-option,-march=bdver3,-march=athlon)
|
||||
+cflags-$(CONFIG_MEXCAVATOR) += $(call cc-option,-march=bdver4,-march=athlon)
|
||||
+cflags-$(CONFIG_MZEN) += $(call cc-option,-march=znver1,-march=athlon)
|
||||
cflags-$(CONFIG_MCRUSOE) += -march=i686 $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
|
||||
cflags-$(CONFIG_MEFFICEON) += -march=i686 $(call tune,pentium3) $(align)-functions=0 $(align)-jumps=0 $(align)-loops=0
|
||||
cflags-$(CONFIG_MWINCHIPC6) += $(call cc-option,-march=winchip-c6,-march=i586)
|
||||
@@ -32,8 +43,16 @@ cflags-$(CONFIG_MCYRIXIII) += $(call cc-
|
||||
cflags-$(CONFIG_MVIAC3_2) += $(call cc-option,-march=c3-2,-march=i686)
|
||||
cflags-$(CONFIG_MVIAC7) += -march=i686
|
||||
cflags-$(CONFIG_MCORE2) += -march=i686 $(call tune,core2)
|
||||
-cflags-$(CONFIG_MATOM) += $(call cc-option,-march=atom,$(call cc-option,-march=core2,-march=i686)) \
|
||||
- $(call cc-option,-mtune=atom,$(call cc-option,-mtune=generic))
|
||||
+cflags-$(CONFIG_MNEHALEM) += -march=i686 $(call tune,nehalem)
|
||||
+cflags-$(CONFIG_MWESTMERE) += -march=i686 $(call tune,westmere)
|
||||
+cflags-$(CONFIG_MSILVERMONT) += -march=i686 $(call tune,silvermont)
|
||||
+cflags-$(CONFIG_MSANDYBRIDGE) += -march=i686 $(call tune,sandybridge)
|
||||
+cflags-$(CONFIG_MIVYBRIDGE) += -march=i686 $(call tune,ivybridge)
|
||||
+cflags-$(CONFIG_MHASWELL) += -march=i686 $(call tune,haswell)
|
||||
+cflags-$(CONFIG_MBROADWELL) += -march=i686 $(call tune,broadwell)
|
||||
+cflags-$(CONFIG_MSKYLAKE) += -march=i686 $(call tune,skylake)
|
||||
+cflags-$(CONFIG_MATOM) += $(call cc-option,-march=bonnell,$(call cc-option,-march=core2,-march=i686)) \
|
||||
+ $(call cc-option,-mtune=bonnell,$(call cc-option,-mtune=generic))
|
||||
|
||||
# AMD Elan support
|
||||
cflags-$(CONFIG_MELAN) += -march=i486
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,148 @@
|
|||
--- linux-4.8/drivers/cpufreq/intel_pstate.c.orig 2016-10-02 19:24:33.000000000 -0400
|
||||
+++ linux-4.8/drivers/cpufreq/intel_pstate.c 2016-10-09 19:32:01.073141319 -0400
|
||||
@@ -181,6 +181,8 @@
|
||||
* @cpu: CPU number for this instance data
|
||||
* @update_util: CPUFreq utility callback information
|
||||
* @update_util_set: CPUFreq utility callback is set
|
||||
+ * @iowait_boost: iowait-related boost fraction
|
||||
+ * @last_update: Time of the last update.
|
||||
* @pstate: Stores P state limits for this CPU
|
||||
* @vid: Stores VID limits for this CPU
|
||||
* @pid: Stores PID parameters for this CPU
|
||||
@@ -206,6 +208,7 @@
|
||||
struct vid_data vid;
|
||||
struct _pid pid;
|
||||
|
||||
+ u64 last_update;
|
||||
u64 last_sample_time;
|
||||
u64 prev_aperf;
|
||||
u64 prev_mperf;
|
||||
@@ -216,6 +219,7 @@
|
||||
struct acpi_processor_performance acpi_perf_data;
|
||||
bool valid_pss_table;
|
||||
#endif
|
||||
+ unsigned int iowait_boost;
|
||||
};
|
||||
|
||||
static struct cpudata **all_cpu_data;
|
||||
@@ -229,6 +233,7 @@
|
||||
* @p_gain_pct: PID proportional gain
|
||||
* @i_gain_pct: PID integral gain
|
||||
* @d_gain_pct: PID derivative gain
|
||||
+ * @boost_iowait: Whether or not to use iowait boosting.
|
||||
*
|
||||
* Stores per CPU model static PID configuration data.
|
||||
*/
|
||||
@@ -240,6 +245,7 @@
|
||||
int p_gain_pct;
|
||||
int d_gain_pct;
|
||||
int i_gain_pct;
|
||||
+ bool boost_iowait;
|
||||
};
|
||||
|
||||
/**
|
||||
@@ -1029,7 +1035,7 @@
|
||||
},
|
||||
};
|
||||
|
||||
-static struct cpu_defaults silvermont_params = {
|
||||
+static const struct cpu_defaults silvermont_params = {
|
||||
.pid_policy = {
|
||||
.sample_rate_ms = 10,
|
||||
.deadband = 0,
|
||||
@@ -1037,6 +1043,7 @@
|
||||
.p_gain_pct = 14,
|
||||
.d_gain_pct = 0,
|
||||
.i_gain_pct = 4,
|
||||
+ .boost_iowait = true,
|
||||
},
|
||||
.funcs = {
|
||||
.get_max = atom_get_max_pstate,
|
||||
@@ -1050,7 +1057,7 @@
|
||||
},
|
||||
};
|
||||
|
||||
-static struct cpu_defaults airmont_params = {
|
||||
+static const struct cpu_defaults airmont_params = {
|
||||
.pid_policy = {
|
||||
.sample_rate_ms = 10,
|
||||
.deadband = 0,
|
||||
@@ -1058,6 +1065,7 @@
|
||||
.p_gain_pct = 14,
|
||||
.d_gain_pct = 0,
|
||||
.i_gain_pct = 4,
|
||||
+ .boost_iowait = true,
|
||||
},
|
||||
.funcs = {
|
||||
.get_max = atom_get_max_pstate,
|
||||
@@ -1071,7 +1079,7 @@
|
||||
},
|
||||
};
|
||||
|
||||
-static struct cpu_defaults knl_params = {
|
||||
+static const struct cpu_defaults knl_params = {
|
||||
.pid_policy = {
|
||||
.sample_rate_ms = 10,
|
||||
.deadband = 0,
|
||||
@@ -1091,7 +1099,7 @@
|
||||
},
|
||||
};
|
||||
|
||||
-static struct cpu_defaults bxt_params = {
|
||||
+static const struct cpu_defaults bxt_params = {
|
||||
.pid_policy = {
|
||||
.sample_rate_ms = 10,
|
||||
.deadband = 0,
|
||||
@@ -1099,6 +1107,7 @@
|
||||
.p_gain_pct = 14,
|
||||
.d_gain_pct = 0,
|
||||
.i_gain_pct = 4,
|
||||
+ .boost_iowait = true,
|
||||
},
|
||||
.funcs = {
|
||||
.get_max = core_get_max_pstate,
|
||||
@@ -1222,36 +1231,18 @@
|
||||
static inline int32_t get_target_pstate_use_cpu_load(struct cpudata *cpu)
|
||||
{
|
||||
struct sample *sample = &cpu->sample;
|
||||
- u64 cummulative_iowait, delta_iowait_us;
|
||||
- u64 delta_iowait_mperf;
|
||||
- u64 mperf, now;
|
||||
- int32_t cpu_load;
|
||||
+ int32_t busy_frac, boost;
|
||||
|
||||
- cummulative_iowait = get_cpu_iowait_time_us(cpu->cpu, &now);
|
||||
+ busy_frac = div_fp(sample->mperf, sample->tsc);
|
||||
|
||||
- /*
|
||||
- * Convert iowait time into number of IO cycles spent at max_freq.
|
||||
- * IO is considered as busy only for the cpu_load algorithm. For
|
||||
- * performance this is not needed since we always try to reach the
|
||||
- * maximum P-State, so we are already boosting the IOs.
|
||||
- */
|
||||
- delta_iowait_us = cummulative_iowait - cpu->prev_cummulative_iowait;
|
||||
- delta_iowait_mperf = div64_u64(delta_iowait_us * cpu->pstate.scaling *
|
||||
- cpu->pstate.max_pstate, MSEC_PER_SEC);
|
||||
-
|
||||
- mperf = cpu->sample.mperf + delta_iowait_mperf;
|
||||
- cpu->prev_cummulative_iowait = cummulative_iowait;
|
||||
+ boost = cpu->iowait_boost;
|
||||
+ cpu->iowait_boost >>= 1;
|
||||
|
||||
- /*
|
||||
- * The load can be estimated as the ratio of the mperf counter
|
||||
- * running at a constant frequency during active periods
|
||||
- * (C0) and the time stamp counter running at the same frequency
|
||||
- * also during C-states.
|
||||
- */
|
||||
- cpu_load = div64_u64(int_tofp(100) * mperf, sample->tsc);
|
||||
- cpu->sample.busy_scaled = cpu_load;
|
||||
+ if (busy_frac < boost)
|
||||
+ busy_frac = boost;
|
||||
|
||||
- return get_avg_pstate(cpu) - pid_calc(&cpu->pid, cpu_load);
|
||||
+ sample->busy_scaled = busy_frac * 100;
|
||||
+ return get_avg_pstate(cpu) - pid_calc(&cpu->pid, sample->busy_scaled);
|
||||
}
|
||||
|
||||
static inline int32_t get_target_pstate_use_performance(struct cpudata *cpu)
|
|
@ -0,0 +1,94 @@
|
|||
From 19be0eaffa3ac7d8eb6784ad9bdbc7d67ed8e619 Mon Sep 17 00:00:00 2001
|
||||
From: Linus Torvalds <torvalds@linux-foundation.org>
|
||||
Date: Thu, 13 Oct 2016 13:07:36 -0700
|
||||
Subject: mm: remove gup_flags FOLL_WRITE games from __get_user_pages()
|
||||
|
||||
This is an ancient bug that was actually attempted to be fixed once
|
||||
(badly) by me eleven years ago in commit 4ceb5db9757a ("Fix
|
||||
get_user_pages() race for write access") but that was then undone due to
|
||||
problems on s390 by commit f33ea7f404e5 ("fix get_user_pages bug").
|
||||
|
||||
In the meantime, the s390 situation has long been fixed, and we can now
|
||||
fix it by checking the pte_dirty() bit properly (and do it better). The
|
||||
s390 dirty bit was implemented in abf09bed3cce ("s390/mm: implement
|
||||
software dirty bits") which made it into v3.9. Earlier kernels will
|
||||
have to look at the page state itself.
|
||||
|
||||
Also, the VM has become more scalable, and what used a purely
|
||||
theoretical race back then has become easier to trigger.
|
||||
|
||||
To fix it, we introduce a new internal FOLL_COW flag to mark the "yes,
|
||||
we already did a COW" rather than play racy games with FOLL_WRITE that
|
||||
is very fundamental, and then use the pte dirty flag to validate that
|
||||
the FOLL_COW flag is still valid.
|
||||
|
||||
Reported-and-tested-by: Phil "not Paul" Oester <kernel@linuxace.com>
|
||||
Acked-by: Hugh Dickins <hughd@google.com>
|
||||
Reviewed-by: Michal Hocko <mhocko@suse.com>
|
||||
Cc: Andy Lutomirski <luto@kernel.org>
|
||||
Cc: Kees Cook <keescook@chromium.org>
|
||||
Cc: Oleg Nesterov <oleg@redhat.com>
|
||||
Cc: Willy Tarreau <w@1wt.eu>
|
||||
Cc: Nick Piggin <npiggin@gmail.com>
|
||||
Cc: Greg Thelen <gthelen@google.com>
|
||||
Cc: stable@vger.kernel.org
|
||||
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
|
||||
---
|
||||
include/linux/mm.h | 1 +
|
||||
mm/gup.c | 14 ++++++++++++--
|
||||
2 files changed, 13 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/include/linux/mm.h b/include/linux/mm.h
|
||||
index e9caec6..ed85879 100644
|
||||
--- a/include/linux/mm.h
|
||||
+++ b/include/linux/mm.h
|
||||
@@ -2232,6 +2232,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
|
||||
#define FOLL_TRIED 0x800 /* a retry, previous pass started an IO */
|
||||
#define FOLL_MLOCK 0x1000 /* lock present pages */
|
||||
#define FOLL_REMOTE 0x2000 /* we are working on non-current tsk/mm */
|
||||
+#define FOLL_COW 0x4000 /* internal GUP flag */
|
||||
|
||||
typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
|
||||
void *data);
|
||||
diff --git a/mm/gup.c b/mm/gup.c
|
||||
index 96b2b2f..22cc22e 100644
|
||||
--- a/mm/gup.c
|
||||
+++ b/mm/gup.c
|
||||
@@ -60,6 +60,16 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
|
||||
return -EEXIST;
|
||||
}
|
||||
|
||||
+/*
|
||||
+ * FOLL_FORCE can write to even unwritable pte's, but only
|
||||
+ * after we've gone through a COW cycle and they are dirty.
|
||||
+ */
|
||||
+static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
|
||||
+{
|
||||
+ return pte_write(pte) ||
|
||||
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
|
||||
+}
|
||||
+
|
||||
static struct page *follow_page_pte(struct vm_area_struct *vma,
|
||||
unsigned long address, pmd_t *pmd, unsigned int flags)
|
||||
{
|
||||
@@ -95,7 +105,7 @@ retry:
|
||||
}
|
||||
if ((flags & FOLL_NUMA) && pte_protnone(pte))
|
||||
goto no_page;
|
||||
- if ((flags & FOLL_WRITE) && !pte_write(pte)) {
|
||||
+ if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
|
||||
pte_unmap_unlock(ptep, ptl);
|
||||
return NULL;
|
||||
}
|
||||
@@ -412,7 +422,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
|
||||
* reCOWed by userspace write).
|
||||
*/
|
||||
if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
|
||||
- *flags &= ~FOLL_WRITE;
|
||||
+ *flags |= FOLL_COW;
|
||||
return 0;
|
||||
}
|
||||
|
||||
--
|
||||
cgit v0.12
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,708 @@
|
|||
diff --git a/init/Kconfig b/init/Kconfig
|
||||
index 0dfd09d..8d704e5 100644
|
||||
--- a/init/Kconfig
|
||||
+++ b/init/Kconfig
|
||||
@@ -36,6 +36,15 @@ config BROKEN_ON_SMP
|
||||
depends on BROKEN || !SMP
|
||||
default y
|
||||
|
||||
+config BLD
|
||||
+ bool "An alternate CPU load distribution technique for task scheduler"
|
||||
+ depends on SMP
|
||||
+ default y
|
||||
+ help
|
||||
+ This is an alternate CPU load distribution technique based for task
|
||||
+ scheduler based on The Barbershop Load Distribution algorithm. Not
|
||||
+ suitable for NUMA, should work well on SMP.
|
||||
+
|
||||
config INIT_ENV_ARG_LIMIT
|
||||
int
|
||||
default 32 if !UML
|
||||
diff --git a/kernel/sched/bld.h b/kernel/sched/bld.h
|
||||
new file mode 100644
|
||||
index 0000000..f1f9fba
|
||||
--- /dev/null
|
||||
+++ b/kernel/sched/bld.h
|
||||
@@ -0,0 +1,215 @@
|
||||
+#ifdef CONFIG_BLD
|
||||
+
|
||||
+static DEFINE_RWLOCK(rt_list_lock);
|
||||
+static LIST_HEAD(rt_rq_head);
|
||||
+static LIST_HEAD(cfs_rq_head);
|
||||
+static DEFINE_RWLOCK(cfs_list_lock);
|
||||
+
|
||||
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
+static inline struct rq *rq_of_cfs(struct cfs_rq *cfs_rq)
|
||||
+{
|
||||
+ return cfs_rq->rq;
|
||||
+}
|
||||
+#else
|
||||
+static inline struct rq *rq_of_cfs(struct cfs_rq *cfs_rq)
|
||||
+{
|
||||
+ return container_of(cfs_rq, struct rq, cfs);
|
||||
+}
|
||||
+#endif
|
||||
+
|
||||
+#ifdef CONFIG_RT_GROUP_SCHED
|
||||
+static inline struct rq *rq_of_rt(struct rt_rq *rt_rq)
|
||||
+{
|
||||
+ return rt_rq->rq;
|
||||
+}
|
||||
+#else
|
||||
+static inline struct rq *rq_of_rt(struct rt_rq *rt_rq)
|
||||
+{
|
||||
+ return container_of(rt_rq, struct rq, rt);
|
||||
+}
|
||||
+#endif
|
||||
+
|
||||
+static int select_cpu_for_wakeup(int task_type, struct cpumask *mask)
|
||||
+{
|
||||
+ int cpu = smp_processor_id(), i;
|
||||
+ unsigned long load, varload;
|
||||
+ struct rq *rq;
|
||||
+
|
||||
+ if (task_type) {
|
||||
+ varload = ULONG_MAX;
|
||||
+ for_each_cpu(i, mask) {
|
||||
+ rq = cpu_rq(i);
|
||||
+ load = rq->cfs.load.weight;
|
||||
+ if (load < varload) {
|
||||
+ varload = load;
|
||||
+ cpu = i;
|
||||
+ }
|
||||
+ }
|
||||
+ } else {
|
||||
+ /* Here's an attempt to get a CPU within the mask where
|
||||
+ * we can preempt easily. To achieve this we tried to
|
||||
+ * maintain a lowbit, which indicate the lowest bit set on
|
||||
+ * array bitmap. Since all CPUs contains high priority
|
||||
+ * kernel threads therefore we eliminate 0, so it might not
|
||||
+ * be right every time, but it's just an indicator.
|
||||
+ */
|
||||
+ varload = 1;
|
||||
+
|
||||
+ for_each_cpu(i, mask) {
|
||||
+ rq = cpu_rq(i);
|
||||
+ load = rq->rt.lowbit;
|
||||
+ if (load >= varload) {
|
||||
+ varload = load;
|
||||
+ cpu = i;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ return cpu;
|
||||
+}
|
||||
+
|
||||
+static int bld_pick_cpu_cfs(struct task_struct *p, int sd_flags, int wake_flags)
|
||||
+{
|
||||
+ struct cfs_rq *cfs;
|
||||
+ unsigned long flags;
|
||||
+ unsigned int cpu = smp_processor_id();
|
||||
+
|
||||
+ read_lock_irqsave(&cfs_list_lock, flags);
|
||||
+ list_for_each_entry(cfs, &cfs_rq_head, bld_cfs_list) {
|
||||
+ cpu = cpu_of(rq_of_cfs(cfs));
|
||||
+ if (cpu_online(cpu))
|
||||
+ break;
|
||||
+ }
|
||||
+ read_unlock_irqrestore(&cfs_list_lock, flags);
|
||||
+ return cpu;
|
||||
+}
|
||||
+
|
||||
+static int bld_pick_cpu_rt(struct task_struct *p, int sd_flags, int wake_flags)
|
||||
+{
|
||||
+ struct rt_rq *rt;
|
||||
+ unsigned long flags;
|
||||
+ unsigned int cpu = smp_processor_id();
|
||||
+
|
||||
+ read_lock_irqsave(&rt_list_lock, flags);
|
||||
+ list_for_each_entry(rt, &rt_rq_head, bld_rt_list) {
|
||||
+ cpu = cpu_of(rq_of_rt(rt));
|
||||
+ if (cpu_online(cpu))
|
||||
+ break;
|
||||
+ }
|
||||
+ read_unlock_irqrestore(&rt_list_lock, flags);
|
||||
+ return cpu;
|
||||
+}
|
||||
+
|
||||
+static int bld_pick_cpu_domain(struct task_struct *p, int sd_flags, int wake_flags)
|
||||
+{
|
||||
+ unsigned int cpu = smp_processor_id(), want_affine = 0;
|
||||
+ struct cpumask *tmpmask;
|
||||
+
|
||||
+ if (p->nr_cpus_allowed == 1)
|
||||
+ return task_cpu(p);
|
||||
+
|
||||
+ if (sd_flags & SD_BALANCE_WAKE) {
|
||||
+ if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
|
||||
+ want_affine = 1;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (want_affine)
|
||||
+ tmpmask = tsk_cpus_allowed(p);
|
||||
+ else
|
||||
+ tmpmask = sched_domain_span(cpu_rq(task_cpu(p))->sd);
|
||||
+
|
||||
+ if (rt_task(p))
|
||||
+ cpu = select_cpu_for_wakeup(0, tmpmask);
|
||||
+ else
|
||||
+ cpu = select_cpu_for_wakeup(1, tmpmask);
|
||||
+
|
||||
+ return cpu;
|
||||
+}
|
||||
+
|
||||
+static void track_load_rt(struct rq *rq, struct task_struct *p)
|
||||
+{
|
||||
+ unsigned long flag;
|
||||
+ int firstbit;
|
||||
+ struct rt_rq *first;
|
||||
+ struct rt_prio_array *array = &rq->rt.active;
|
||||
+
|
||||
+ first = list_entry(rt_rq_head.next, struct rt_rq, bld_rt_list);
|
||||
+ firstbit = sched_find_first_bit(array->bitmap);
|
||||
+
|
||||
+ /* Maintaining rt.lowbit */
|
||||
+ if (firstbit > 0 && firstbit <= rq->rt.lowbit)
|
||||
+ rq->rt.lowbit = firstbit;
|
||||
+
|
||||
+ if (rq->rt.lowbit < first->lowbit) {
|
||||
+ write_lock_irqsave(&rt_list_lock, flag);
|
||||
+ list_del(&rq->rt.bld_rt_list);
|
||||
+ list_add_tail(&rq->rt.bld_rt_list, &rt_rq_head);
|
||||
+ write_unlock_irqrestore(&rt_list_lock, flag);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static int bld_get_cpu(struct task_struct *p, int sd_flags, int wake_flags)
|
||||
+{
|
||||
+ unsigned int cpu;
|
||||
+
|
||||
+ if (sd_flags == SD_BALANCE_WAKE || (sd_flags == SD_BALANCE_EXEC && (get_nr_threads(p) > 1)))
|
||||
+ cpu = bld_pick_cpu_domain(p, sd_flags, wake_flags);
|
||||
+ else {
|
||||
+ if (rt_task(p))
|
||||
+ cpu = bld_pick_cpu_rt(p, sd_flags, wake_flags);
|
||||
+ else
|
||||
+ cpu = bld_pick_cpu_cfs(p, sd_flags, wake_flags);
|
||||
+ }
|
||||
+
|
||||
+ return cpu;
|
||||
+}
|
||||
+
|
||||
+static void bld_track_load_activate(struct rq *rq, struct task_struct *p)
|
||||
+{
|
||||
+ unsigned long flag;
|
||||
+ if (rt_task(p)) {
|
||||
+ track_load_rt(rq, p);
|
||||
+ } else {
|
||||
+ if (rq->cfs.pos != 2) {
|
||||
+ struct cfs_rq *last;
|
||||
+ last = list_entry(cfs_rq_head.prev, struct cfs_rq, bld_cfs_list);
|
||||
+ if (rq->cfs.load.weight >= last->load.weight) {
|
||||
+ write_lock_irqsave(&cfs_list_lock, flag);
|
||||
+ list_del(&rq->cfs.bld_cfs_list);
|
||||
+ list_add_tail(&rq->cfs.bld_cfs_list, &cfs_rq_head);
|
||||
+ rq->cfs.pos = 2; last->pos = 1;
|
||||
+ write_unlock_irqrestore(&cfs_list_lock, flag);
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static void bld_track_load_deactivate(struct rq *rq, struct task_struct *p)
|
||||
+{
|
||||
+ unsigned long flag;
|
||||
+ if (rt_task(p)) {
|
||||
+ track_load_rt(rq, p);
|
||||
+ } else {
|
||||
+ if (rq->cfs.pos != 0) {
|
||||
+ struct cfs_rq *first;
|
||||
+ first = list_entry(cfs_rq_head.next, struct cfs_rq, bld_cfs_list);
|
||||
+ if (rq->cfs.load.weight <= first->load.weight) {
|
||||
+ write_lock_irqsave(&cfs_list_lock, flag);
|
||||
+ list_del(&rq->cfs.bld_cfs_list);
|
||||
+ list_add(&rq->cfs.bld_cfs_list, &cfs_rq_head);
|
||||
+ rq->cfs.pos = 0; first->pos = 1;
|
||||
+ write_unlock_irqrestore(&cfs_list_lock, flag);
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
+#else
|
||||
+static inline void bld_track_load_activate(struct rq *rq, struct task_struct *p)
|
||||
+{
|
||||
+}
|
||||
+
|
||||
+static inline void bld_track_load_deactivate(struct rq *rq, struct task_struct *p)
|
||||
+{
|
||||
+}
|
||||
+#endif /* CONFIG_BLD */
|
||||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
||||
index d1f7149..c3236de 100644
|
||||
--- a/kernel/sched/core.c
|
||||
+++ b/kernel/sched/core.c
|
||||
@@ -24,6 +24,8 @@
|
||||
* 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
|
||||
* 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
|
||||
* Thomas Gleixner, Mike Kravetz
|
||||
+ * 2012-Feb The Barbershop Load Distribution (BLD) algorithm - an alternate
|
||||
+ * CPU load distribution technique for kernel scheduler by Rakib Mullick.
|
||||
*/
|
||||
|
||||
#include <linux/kasan.h>
|
||||
@@ -86,6 +88,7 @@
|
||||
#include "sched.h"
|
||||
#include "../workqueue_internal.h"
|
||||
#include "../smpboot.h"
|
||||
+#include "bld.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/sched.h>
|
||||
@@ -713,6 +716,8 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
if (!(flags & ENQUEUE_RESTORE))
|
||||
sched_info_queued(rq, p);
|
||||
p->sched_class->enqueue_task(rq, p, flags);
|
||||
+ if (!dl_task(p))
|
||||
+ bld_track_load_activate(rq, p);
|
||||
}
|
||||
|
||||
static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
@@ -721,6 +726,8 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
if (!(flags & DEQUEUE_SAVE))
|
||||
sched_info_dequeued(rq, p);
|
||||
p->sched_class->dequeue_task(rq, p, flags);
|
||||
+ if (!dl_task(p))
|
||||
+ bld_track_load_deactivate(rq, p);
|
||||
}
|
||||
|
||||
void activate_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
@@ -1515,8 +1522,16 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
|
||||
{
|
||||
lockdep_assert_held(&p->pi_lock);
|
||||
|
||||
- if (p->nr_cpus_allowed > 1)
|
||||
+ if (p->nr_cpus_allowed > 1) {
|
||||
+#ifndef CONFIG_BLD
|
||||
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
|
||||
+#else
|
||||
+ if(dl_task(p))
|
||||
+ cpu = dl_sched_class.select_task_rq(p, cpu, sd_flags, wake_flags);
|
||||
+ else
|
||||
+ cpu = bld_get_cpu(p, sd_flags, wake_flags);
|
||||
+#endif
|
||||
+ }
|
||||
|
||||
/*
|
||||
* In order not to call set_task_cpu() on a blocking task we need
|
||||
@@ -1706,7 +1721,11 @@ void scheduler_ipi(void)
|
||||
*/
|
||||
preempt_fold_need_resched();
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
|
||||
+#else
|
||||
+ if (llist_empty(&this_rq()->wake_list))
|
||||
+#endif
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -1728,13 +1747,16 @@ void scheduler_ipi(void)
|
||||
/*
|
||||
* Check if someone kicked us for doing the nohz idle load balance.
|
||||
*/
|
||||
+#ifndef CONFIG_BLD
|
||||
if (unlikely(got_nohz_idle_kick())) {
|
||||
this_rq()->idle_balance = 1;
|
||||
raise_softirq_irqoff(SCHED_SOFTIRQ);
|
||||
}
|
||||
+#endif
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
static void ttwu_queue_remote(struct task_struct *p, int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
@@ -1747,6 +1769,13 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu)
|
||||
}
|
||||
}
|
||||
|
||||
+#endif
|
||||
+
|
||||
+bool cpus_share_cache(int this_cpu, int that_cpu)
|
||||
+{
|
||||
+ return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
|
||||
+}
|
||||
+
|
||||
void wake_up_if_idle(int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
@@ -1770,18 +1799,13 @@ void wake_up_if_idle(int cpu)
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
-
|
||||
-bool cpus_share_cache(int this_cpu, int that_cpu)
|
||||
-{
|
||||
- return per_cpu(sd_llc_id, this_cpu) == per_cpu(sd_llc_id, that_cpu);
|
||||
-}
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
static void ttwu_queue(struct task_struct *p, int cpu)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
|
||||
-#if defined(CONFIG_SMP)
|
||||
+#if defined(CONFIG_SMP) && !defined(CONFIG_BLD)
|
||||
if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
|
||||
sched_clock_cpu(cpu); /* sync clocks x-cpu */
|
||||
ttwu_queue_remote(p, cpu);
|
||||
@@ -2292,7 +2316,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
|
||||
* Silence PROVE_RCU.
|
||||
*/
|
||||
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||
- set_task_cpu(p, cpu);
|
||||
+ __set_task_cpu(p, cpu);
|
||||
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
|
||||
|
||||
#ifdef CONFIG_SCHED_INFO
|
||||
@@ -2837,7 +2861,14 @@ void sched_exec(void)
|
||||
int dest_cpu;
|
||||
|
||||
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||
+#ifndef CONFIG_BLD
|
||||
dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
|
||||
+#else
|
||||
+ if (dl_task(p))
|
||||
+ dest_cpu = task_cpu(p);
|
||||
+ else
|
||||
+ dest_cpu = bld_get_cpu(p, SD_BALANCE_EXEC, 0);
|
||||
+#endif
|
||||
if (dest_cpu == smp_processor_id())
|
||||
goto unlock;
|
||||
|
||||
@@ -2926,8 +2957,10 @@ void scheduler_tick(void)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
rq->idle_balance = idle_cpu(cpu);
|
||||
+#ifndef CONFIG_BLD
|
||||
trigger_load_balance(rq);
|
||||
#endif
|
||||
+#endif
|
||||
rq_last_tick_reset(rq);
|
||||
}
|
||||
|
||||
@@ -7359,6 +7392,15 @@ void __init sched_init(void)
|
||||
#endif
|
||||
init_rq_hrtick(rq);
|
||||
atomic_set(&rq->nr_iowait, 0);
|
||||
+#ifdef CONFIG_BLD
|
||||
+ INIT_LIST_HEAD(&rq->cfs.bld_cfs_list);
|
||||
+ list_add_tail(&rq->cfs.bld_cfs_list, &cfs_rq_head);
|
||||
+ rq->cfs.pos = 0;
|
||||
+
|
||||
+ INIT_LIST_HEAD(&rq->rt.bld_rt_list);
|
||||
+ list_add_tail(&rq->rt.bld_rt_list, &rt_rq_head);
|
||||
+ rq->rt.lowbit = INT_MAX;
|
||||
+#endif
|
||||
}
|
||||
|
||||
set_load_weight(&init_task);
|
||||
@@ -7399,6 +7441,9 @@ void __init sched_init(void)
|
||||
init_sched_fair_class();
|
||||
|
||||
scheduler_running = 1;
|
||||
+#ifdef CONFIG_BLD
|
||||
+ printk(KERN_INFO "BLD: An Alternate CPU load distributor activated.\n");
|
||||
+#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
||||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
||||
index e7dd0ec..555572f 100644
|
||||
--- a/kernel/sched/fair.c
|
||||
+++ b/kernel/sched/fair.c
|
||||
@@ -4746,6 +4746,7 @@ static void task_waking_fair(struct task_struct *p)
|
||||
record_wakee(p);
|
||||
}
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
/*
|
||||
* effective_load() calculates the load change as seen from the root_task_group
|
||||
@@ -5248,6 +5249,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
||||
|
||||
return new_cpu;
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
/*
|
||||
* Called immediately before a task is migrated to a new cpu; task_cpu(p) and
|
||||
@@ -5552,6 +5554,7 @@ idle:
|
||||
* further scheduler activity on it and we're being very careful to
|
||||
* re-start the picking loop.
|
||||
*/
|
||||
+#ifndef CONFIG_BLD
|
||||
lockdep_unpin_lock(&rq->lock);
|
||||
new_tasks = idle_balance(rq);
|
||||
lockdep_pin_lock(&rq->lock);
|
||||
@@ -5565,7 +5568,7 @@ idle:
|
||||
|
||||
if (new_tasks > 0)
|
||||
goto again;
|
||||
-
|
||||
+#endif
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -6226,8 +6229,9 @@ static unsigned long task_h_load(struct task_struct *p)
|
||||
}
|
||||
#endif
|
||||
|
||||
-/********** Helpers for find_busiest_group ************************/
|
||||
+#ifndef CONFIG_BLD
|
||||
|
||||
+/********** Helpers for find_busiest_group ************************/
|
||||
enum group_type {
|
||||
group_other = 0,
|
||||
group_imbalanced,
|
||||
@@ -6318,6 +6322,7 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
|
||||
|
||||
return load_idx;
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
static unsigned long scale_rt_capacity(int cpu)
|
||||
{
|
||||
@@ -6426,6 +6431,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
|
||||
sdg->sgc->capacity = capacity;
|
||||
}
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
/*
|
||||
* Check whether the capacity of the rq has been noticeably reduced by side
|
||||
* activity. The imbalance_pct is used for the threshold.
|
||||
@@ -7659,6 +7665,8 @@ static inline int on_null_domain(struct rq *rq)
|
||||
return unlikely(!rcu_dereference_sched(rq->sd));
|
||||
}
|
||||
|
||||
+#endif /* CONFIG_BLD */
|
||||
+
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
/*
|
||||
* idle load balancing details
|
||||
@@ -7666,12 +7674,39 @@ static inline int on_null_domain(struct rq *rq)
|
||||
* needed, they will kick the idle load balancer, which then does idle
|
||||
* load balancing for all the idle CPUs.
|
||||
*/
|
||||
+#ifndef CONFIG_BLD
|
||||
static struct {
|
||||
cpumask_var_t idle_cpus_mask;
|
||||
atomic_t nr_cpus;
|
||||
unsigned long next_balance; /* in jiffy units */
|
||||
} nohz ____cacheline_aligned;
|
||||
|
||||
+static inline void nohz_balance_exit_idle(int cpu)
|
||||
+{
|
||||
+ if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
|
||||
+ /*
|
||||
+ * Completely isolated CPUs don't ever set, so we must test.
|
||||
+ */
|
||||
+ if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
|
||||
+ cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
|
||||
+ atomic_dec(&nohz.nr_cpus);
|
||||
+ }
|
||||
+ clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static int sched_ilb_notifier(struct notifier_block *nfb,
|
||||
+ unsigned long action, void *hcpu)
|
||||
+{
|
||||
+ switch (action & ~CPU_TASKS_FROZEN) {
|
||||
+ case CPU_DYING:
|
||||
+ nohz_balance_exit_idle(smp_processor_id());
|
||||
+ return NOTIFY_OK;
|
||||
+ default:
|
||||
+ return NOTIFY_DONE;
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
static inline int find_new_ilb(void)
|
||||
{
|
||||
int ilb = cpumask_first(nohz.idle_cpus_mask);
|
||||
@@ -7709,20 +7744,7 @@ static void nohz_balancer_kick(void)
|
||||
smp_send_reschedule(ilb_cpu);
|
||||
return;
|
||||
}
|
||||
-
|
||||
-static inline void nohz_balance_exit_idle(int cpu)
|
||||
-{
|
||||
- if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
|
||||
- /*
|
||||
- * Completely isolated CPUs don't ever set, so we must test.
|
||||
- */
|
||||
- if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
|
||||
- cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
|
||||
- atomic_dec(&nohz.nr_cpus);
|
||||
- }
|
||||
- clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
||||
- }
|
||||
-}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
static inline void set_cpu_sd_state_busy(void)
|
||||
{
|
||||
@@ -7764,6 +7786,7 @@ unlock:
|
||||
*/
|
||||
void nohz_balance_enter_idle(int cpu)
|
||||
{
|
||||
+#ifndef CONFIG_BLD
|
||||
/*
|
||||
* If this cpu is going down, then nothing needs to be done.
|
||||
*/
|
||||
@@ -7782,23 +7805,10 @@ void nohz_balance_enter_idle(int cpu)
|
||||
cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
|
||||
atomic_inc(&nohz.nr_cpus);
|
||||
set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
||||
-}
|
||||
-
|
||||
-static int sched_ilb_notifier(struct notifier_block *nfb,
|
||||
- unsigned long action, void *hcpu)
|
||||
-{
|
||||
- switch (action & ~CPU_TASKS_FROZEN) {
|
||||
- case CPU_DYING:
|
||||
- nohz_balance_exit_idle(smp_processor_id());
|
||||
- return NOTIFY_OK;
|
||||
- default:
|
||||
- return NOTIFY_DONE;
|
||||
- }
|
||||
+#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
-static DEFINE_SPINLOCK(balancing);
|
||||
-
|
||||
/*
|
||||
* Scale the max load_balance interval with the number of CPUs in the system.
|
||||
* This trades load-balance latency on larger machines for less cross talk.
|
||||
@@ -7808,6 +7818,9 @@ void update_max_interval(void)
|
||||
max_load_balance_interval = HZ*num_online_cpus()/10;
|
||||
}
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
+static DEFINE_SPINLOCK(balancing);
|
||||
+
|
||||
/*
|
||||
* It checks each scheduling domain to see if it is due to be balanced,
|
||||
* and initiates a balancing operation if so.
|
||||
@@ -8095,6 +8108,7 @@ void trigger_load_balance(struct rq *rq)
|
||||
nohz_balancer_kick();
|
||||
#endif
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
static void rq_online_fair(struct rq *rq)
|
||||
{
|
||||
@@ -8531,7 +8545,9 @@ const struct sched_class fair_sched_class = {
|
||||
.put_prev_task = put_prev_task_fair,
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
+#ifndef CONFIG_BLD
|
||||
.select_task_rq = select_task_rq_fair,
|
||||
+#endif
|
||||
.migrate_task_rq = migrate_task_rq_fair,
|
||||
|
||||
.rq_online = rq_online_fair,
|
||||
@@ -8593,6 +8609,7 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
|
||||
|
||||
__init void init_sched_fair_class(void)
|
||||
{
|
||||
+#ifndef CONFIG_BLD
|
||||
#ifdef CONFIG_SMP
|
||||
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
|
||||
|
||||
@@ -8602,5 +8619,5 @@ __init void init_sched_fair_class(void)
|
||||
cpu_notifier(sched_ilb_notifier, 0);
|
||||
#endif
|
||||
#endif /* SMP */
|
||||
-
|
||||
+#endif /* BLD */
|
||||
}
|
||||
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
|
||||
index ec4f538d..4462bed 100644
|
||||
--- a/kernel/sched/rt.c
|
||||
+++ b/kernel/sched/rt.c
|
||||
@@ -1375,6 +1375,7 @@ static void yield_task_rt(struct rq *rq)
|
||||
#ifdef CONFIG_SMP
|
||||
static int find_lowest_rq(struct task_struct *task);
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
static int
|
||||
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
||||
{
|
||||
@@ -1430,6 +1431,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
||||
out:
|
||||
return cpu;
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
@@ -2335,7 +2337,9 @@ const struct sched_class rt_sched_class = {
|
||||
.put_prev_task = put_prev_task_rt,
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
+#ifndef CONFIG_BLD
|
||||
.select_task_rq = select_task_rq_rt,
|
||||
+#endif
|
||||
|
||||
.set_cpus_allowed = set_cpus_allowed_common,
|
||||
.rq_online = rq_online_rt,
|
||||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
|
||||
index ec2e8d2..aaab735 100644
|
||||
--- a/kernel/sched/sched.h
|
||||
+++ b/kernel/sched/sched.h
|
||||
@@ -408,9 +408,8 @@ struct cfs_rq {
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
-#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
|
||||
-
|
||||
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
/*
|
||||
* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
|
||||
* a hierarchy). Non-leaf lrqs hold other higher schedulable entities
|
||||
@@ -434,6 +433,11 @@ struct cfs_rq {
|
||||
struct list_head throttled_list;
|
||||
#endif /* CONFIG_CFS_BANDWIDTH */
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
+
|
||||
+#ifdef CONFIG_BLD
|
||||
+ struct list_head bld_cfs_list;
|
||||
+ char pos;
|
||||
+#endif
|
||||
};
|
||||
|
||||
static inline int rt_bandwidth_enabled(void)
|
||||
@@ -479,12 +483,16 @@ struct rt_rq {
|
||||
/* Nests inside the rq lock: */
|
||||
raw_spinlock_t rt_runtime_lock;
|
||||
|
||||
+ struct rq *rq;
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
unsigned long rt_nr_boosted;
|
||||
|
||||
- struct rq *rq;
|
||||
struct task_group *tg;
|
||||
#endif
|
||||
+#ifdef CONFIG_BLD
|
||||
+ struct list_head bld_rt_list;
|
||||
+ int lowbit;
|
||||
+#endif
|
||||
};
|
||||
|
||||
/* Deadline class' related fields in a runqueue */
|
|
@ -0,0 +1,695 @@
|
|||
diff --git a/init/Kconfig b/init/Kconfig
|
||||
index cac3f09..4e49d16 100644
|
||||
--- a/init/Kconfig
|
||||
+++ b/init/Kconfig
|
||||
@@ -36,6 +36,15 @@ config BROKEN_ON_SMP
|
||||
depends on BROKEN || !SMP
|
||||
default y
|
||||
|
||||
+config BLD
|
||||
+ bool "An alternate CPU load distribution technique for task scheduler"
|
||||
+ depends on SMP
|
||||
+ default y
|
||||
+ help
|
||||
+ This is an alternate CPU load distribution technique based for task
|
||||
+ scheduler based on The Barbershop Load Distribution algorithm. Not
|
||||
+ suitable for NUMA, should work well on SMP.
|
||||
+
|
||||
config INIT_ENV_ARG_LIMIT
|
||||
int
|
||||
default 32 if !UML
|
||||
diff --git a/kernel/sched/bld.h b/kernel/sched/bld.h
|
||||
new file mode 100644
|
||||
index 0000000..f1f9fba
|
||||
--- /dev/null
|
||||
+++ b/kernel/sched/bld.h
|
||||
@@ -0,0 +1,215 @@
|
||||
+#ifdef CONFIG_BLD
|
||||
+
|
||||
+static DEFINE_RWLOCK(rt_list_lock);
|
||||
+static LIST_HEAD(rt_rq_head);
|
||||
+static LIST_HEAD(cfs_rq_head);
|
||||
+static DEFINE_RWLOCK(cfs_list_lock);
|
||||
+
|
||||
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
+static inline struct rq *rq_of_cfs(struct cfs_rq *cfs_rq)
|
||||
+{
|
||||
+ return cfs_rq->rq;
|
||||
+}
|
||||
+#else
|
||||
+static inline struct rq *rq_of_cfs(struct cfs_rq *cfs_rq)
|
||||
+{
|
||||
+ return container_of(cfs_rq, struct rq, cfs);
|
||||
+}
|
||||
+#endif
|
||||
+
|
||||
+#ifdef CONFIG_RT_GROUP_SCHED
|
||||
+static inline struct rq *rq_of_rt(struct rt_rq *rt_rq)
|
||||
+{
|
||||
+ return rt_rq->rq;
|
||||
+}
|
||||
+#else
|
||||
+static inline struct rq *rq_of_rt(struct rt_rq *rt_rq)
|
||||
+{
|
||||
+ return container_of(rt_rq, struct rq, rt);
|
||||
+}
|
||||
+#endif
|
||||
+
|
||||
+static int select_cpu_for_wakeup(int task_type, struct cpumask *mask)
|
||||
+{
|
||||
+ int cpu = smp_processor_id(), i;
|
||||
+ unsigned long load, varload;
|
||||
+ struct rq *rq;
|
||||
+
|
||||
+ if (task_type) {
|
||||
+ varload = ULONG_MAX;
|
||||
+ for_each_cpu(i, mask) {
|
||||
+ rq = cpu_rq(i);
|
||||
+ load = rq->cfs.load.weight;
|
||||
+ if (load < varload) {
|
||||
+ varload = load;
|
||||
+ cpu = i;
|
||||
+ }
|
||||
+ }
|
||||
+ } else {
|
||||
+ /* Here's an attempt to get a CPU within the mask where
|
||||
+ * we can preempt easily. To achieve this we tried to
|
||||
+ * maintain a lowbit, which indicate the lowest bit set on
|
||||
+ * array bitmap. Since all CPUs contains high priority
|
||||
+ * kernel threads therefore we eliminate 0, so it might not
|
||||
+ * be right every time, but it's just an indicator.
|
||||
+ */
|
||||
+ varload = 1;
|
||||
+
|
||||
+ for_each_cpu(i, mask) {
|
||||
+ rq = cpu_rq(i);
|
||||
+ load = rq->rt.lowbit;
|
||||
+ if (load >= varload) {
|
||||
+ varload = load;
|
||||
+ cpu = i;
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ return cpu;
|
||||
+}
|
||||
+
|
||||
+static int bld_pick_cpu_cfs(struct task_struct *p, int sd_flags, int wake_flags)
|
||||
+{
|
||||
+ struct cfs_rq *cfs;
|
||||
+ unsigned long flags;
|
||||
+ unsigned int cpu = smp_processor_id();
|
||||
+
|
||||
+ read_lock_irqsave(&cfs_list_lock, flags);
|
||||
+ list_for_each_entry(cfs, &cfs_rq_head, bld_cfs_list) {
|
||||
+ cpu = cpu_of(rq_of_cfs(cfs));
|
||||
+ if (cpu_online(cpu))
|
||||
+ break;
|
||||
+ }
|
||||
+ read_unlock_irqrestore(&cfs_list_lock, flags);
|
||||
+ return cpu;
|
||||
+}
|
||||
+
|
||||
+static int bld_pick_cpu_rt(struct task_struct *p, int sd_flags, int wake_flags)
|
||||
+{
|
||||
+ struct rt_rq *rt;
|
||||
+ unsigned long flags;
|
||||
+ unsigned int cpu = smp_processor_id();
|
||||
+
|
||||
+ read_lock_irqsave(&rt_list_lock, flags);
|
||||
+ list_for_each_entry(rt, &rt_rq_head, bld_rt_list) {
|
||||
+ cpu = cpu_of(rq_of_rt(rt));
|
||||
+ if (cpu_online(cpu))
|
||||
+ break;
|
||||
+ }
|
||||
+ read_unlock_irqrestore(&rt_list_lock, flags);
|
||||
+ return cpu;
|
||||
+}
|
||||
+
|
||||
+static int bld_pick_cpu_domain(struct task_struct *p, int sd_flags, int wake_flags)
|
||||
+{
|
||||
+ unsigned int cpu = smp_processor_id(), want_affine = 0;
|
||||
+ struct cpumask *tmpmask;
|
||||
+
|
||||
+ if (p->nr_cpus_allowed == 1)
|
||||
+ return task_cpu(p);
|
||||
+
|
||||
+ if (sd_flags & SD_BALANCE_WAKE) {
|
||||
+ if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) {
|
||||
+ want_affine = 1;
|
||||
+ }
|
||||
+ }
|
||||
+
|
||||
+ if (want_affine)
|
||||
+ tmpmask = tsk_cpus_allowed(p);
|
||||
+ else
|
||||
+ tmpmask = sched_domain_span(cpu_rq(task_cpu(p))->sd);
|
||||
+
|
||||
+ if (rt_task(p))
|
||||
+ cpu = select_cpu_for_wakeup(0, tmpmask);
|
||||
+ else
|
||||
+ cpu = select_cpu_for_wakeup(1, tmpmask);
|
||||
+
|
||||
+ return cpu;
|
||||
+}
|
||||
+
|
||||
+static void track_load_rt(struct rq *rq, struct task_struct *p)
|
||||
+{
|
||||
+ unsigned long flag;
|
||||
+ int firstbit;
|
||||
+ struct rt_rq *first;
|
||||
+ struct rt_prio_array *array = &rq->rt.active;
|
||||
+
|
||||
+ first = list_entry(rt_rq_head.next, struct rt_rq, bld_rt_list);
|
||||
+ firstbit = sched_find_first_bit(array->bitmap);
|
||||
+
|
||||
+ /* Maintaining rt.lowbit */
|
||||
+ if (firstbit > 0 && firstbit <= rq->rt.lowbit)
|
||||
+ rq->rt.lowbit = firstbit;
|
||||
+
|
||||
+ if (rq->rt.lowbit < first->lowbit) {
|
||||
+ write_lock_irqsave(&rt_list_lock, flag);
|
||||
+ list_del(&rq->rt.bld_rt_list);
|
||||
+ list_add_tail(&rq->rt.bld_rt_list, &rt_rq_head);
|
||||
+ write_unlock_irqrestore(&rt_list_lock, flag);
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static int bld_get_cpu(struct task_struct *p, int sd_flags, int wake_flags)
|
||||
+{
|
||||
+ unsigned int cpu;
|
||||
+
|
||||
+ if (sd_flags == SD_BALANCE_WAKE || (sd_flags == SD_BALANCE_EXEC && (get_nr_threads(p) > 1)))
|
||||
+ cpu = bld_pick_cpu_domain(p, sd_flags, wake_flags);
|
||||
+ else {
|
||||
+ if (rt_task(p))
|
||||
+ cpu = bld_pick_cpu_rt(p, sd_flags, wake_flags);
|
||||
+ else
|
||||
+ cpu = bld_pick_cpu_cfs(p, sd_flags, wake_flags);
|
||||
+ }
|
||||
+
|
||||
+ return cpu;
|
||||
+}
|
||||
+
|
||||
+static void bld_track_load_activate(struct rq *rq, struct task_struct *p)
|
||||
+{
|
||||
+ unsigned long flag;
|
||||
+ if (rt_task(p)) {
|
||||
+ track_load_rt(rq, p);
|
||||
+ } else {
|
||||
+ if (rq->cfs.pos != 2) {
|
||||
+ struct cfs_rq *last;
|
||||
+ last = list_entry(cfs_rq_head.prev, struct cfs_rq, bld_cfs_list);
|
||||
+ if (rq->cfs.load.weight >= last->load.weight) {
|
||||
+ write_lock_irqsave(&cfs_list_lock, flag);
|
||||
+ list_del(&rq->cfs.bld_cfs_list);
|
||||
+ list_add_tail(&rq->cfs.bld_cfs_list, &cfs_rq_head);
|
||||
+ rq->cfs.pos = 2; last->pos = 1;
|
||||
+ write_unlock_irqrestore(&cfs_list_lock, flag);
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static void bld_track_load_deactivate(struct rq *rq, struct task_struct *p)
|
||||
+{
|
||||
+ unsigned long flag;
|
||||
+ if (rt_task(p)) {
|
||||
+ track_load_rt(rq, p);
|
||||
+ } else {
|
||||
+ if (rq->cfs.pos != 0) {
|
||||
+ struct cfs_rq *first;
|
||||
+ first = list_entry(cfs_rq_head.next, struct cfs_rq, bld_cfs_list);
|
||||
+ if (rq->cfs.load.weight <= first->load.weight) {
|
||||
+ write_lock_irqsave(&cfs_list_lock, flag);
|
||||
+ list_del(&rq->cfs.bld_cfs_list);
|
||||
+ list_add(&rq->cfs.bld_cfs_list, &cfs_rq_head);
|
||||
+ rq->cfs.pos = 0; first->pos = 1;
|
||||
+ write_unlock_irqrestore(&cfs_list_lock, flag);
|
||||
+ }
|
||||
+ }
|
||||
+ }
|
||||
+}
|
||||
+#else
|
||||
+static inline void bld_track_load_activate(struct rq *rq, struct task_struct *p)
|
||||
+{
|
||||
+}
|
||||
+
|
||||
+static inline void bld_track_load_deactivate(struct rq *rq, struct task_struct *p)
|
||||
+{
|
||||
+}
|
||||
+#endif /* CONFIG_BLD */
|
||||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
|
||||
index 44817c6..f0f3321 100644
|
||||
--- a/kernel/sched/core.c
|
||||
+++ b/kernel/sched/core.c
|
||||
@@ -24,6 +24,8 @@
|
||||
* 2007-07-01 Group scheduling enhancements by Srivatsa Vaddagiri
|
||||
* 2007-11-29 RT balancing improvements by Steven Rostedt, Gregory Haskins,
|
||||
* Thomas Gleixner, Mike Kravetz
|
||||
+ * 2012-Feb The Barbershop Load Distribution (BLD) algorithm - an alternate
|
||||
+ * CPU load distribution technique for kernel scheduler by Rakib Mullick.
|
||||
*/
|
||||
|
||||
#include <linux/kasan.h>
|
||||
@@ -87,6 +89,7 @@
|
||||
#include "sched.h"
|
||||
#include "../workqueue_internal.h"
|
||||
#include "../smpboot.h"
|
||||
+#include "bld.h"
|
||||
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include <trace/events/sched.h>
|
||||
@@ -751,6 +754,8 @@ static inline void enqueue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
if (!(flags & ENQUEUE_RESTORE))
|
||||
sched_info_queued(rq, p);
|
||||
p->sched_class->enqueue_task(rq, p, flags);
|
||||
+ if (!dl_task(p))
|
||||
+ bld_track_load_activate(rq, p);
|
||||
}
|
||||
|
||||
static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
@@ -759,6 +764,8 @@ static inline void dequeue_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
if (!(flags & DEQUEUE_SAVE))
|
||||
sched_info_dequeued(rq, p);
|
||||
p->sched_class->dequeue_task(rq, p, flags);
|
||||
+ if (!dl_task(p))
|
||||
+ bld_track_load_deactivate(rq, p);
|
||||
}
|
||||
|
||||
void activate_task(struct rq *rq, struct task_struct *p, int flags)
|
||||
@@ -1588,11 +1595,17 @@ int select_task_rq(struct task_struct *p, int cpu, int sd_flags, int wake_flags)
|
||||
{
|
||||
lockdep_assert_held(&p->pi_lock);
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
if (tsk_nr_cpus_allowed(p) > 1)
|
||||
cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags);
|
||||
else
|
||||
cpu = cpumask_any(tsk_cpus_allowed(p));
|
||||
-
|
||||
+#else
|
||||
+ if (dl_task(p))
|
||||
+ cpu = dl_sched_class.select_task_rq(p, cpu, sd_flags, wake_flags);
|
||||
+ else
|
||||
+ cpu = bld_get_cpu(p, sd_flags, wake_flags);
|
||||
+#endif
|
||||
/*
|
||||
* In order not to call set_task_cpu() on a blocking task we need
|
||||
* to rely on ttwu() to place the task on a valid ->cpus_allowed
|
||||
@@ -1795,7 +1808,11 @@ void scheduler_ipi(void)
|
||||
*/
|
||||
preempt_fold_need_resched();
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick())
|
||||
+#else
|
||||
+ if (llist_empty(&this_rq()->wake_list))
|
||||
+#endif
|
||||
return;
|
||||
|
||||
/*
|
||||
@@ -1817,13 +1834,16 @@ void scheduler_ipi(void)
|
||||
/*
|
||||
* Check if someone kicked us for doing the nohz idle load balance.
|
||||
*/
|
||||
+#ifndef CONFIG_BLD
|
||||
if (unlikely(got_nohz_idle_kick())) {
|
||||
this_rq()->idle_balance = 1;
|
||||
raise_softirq_irqoff(SCHED_SOFTIRQ);
|
||||
}
|
||||
+#endif
|
||||
irq_exit();
|
||||
}
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
|
||||
{
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
@@ -1837,6 +1857,7 @@ static void ttwu_queue_remote(struct task_struct *p, int cpu, int wake_flags)
|
||||
trace_sched_wake_idle_without_ipi(cpu);
|
||||
}
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
void wake_up_if_idle(int cpu)
|
||||
{
|
||||
@@ -1873,7 +1894,7 @@ static void ttwu_queue(struct task_struct *p, int cpu, int wake_flags)
|
||||
struct rq *rq = cpu_rq(cpu);
|
||||
struct pin_cookie cookie;
|
||||
|
||||
-#if defined(CONFIG_SMP)
|
||||
+#if defined(CONFIG_SMP) && !defined(CONFIG_BLD)
|
||||
if (sched_feat(TTWU_QUEUE) && !cpus_share_cache(smp_processor_id(), cpu)) {
|
||||
sched_clock_cpu(cpu); /* sync clocks x-cpu */
|
||||
ttwu_queue_remote(p, cpu, wake_flags);
|
||||
@@ -2971,7 +2992,14 @@ void sched_exec(void)
|
||||
int dest_cpu;
|
||||
|
||||
raw_spin_lock_irqsave(&p->pi_lock, flags);
|
||||
+#ifndef CONFIG_BLD
|
||||
dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), SD_BALANCE_EXEC, 0);
|
||||
+#else
|
||||
+ if (dl_task(p))
|
||||
+ dest_cpu = task_cpu(p);
|
||||
+ else
|
||||
+ dest_cpu = bld_get_cpu(p, SD_BALANCE_EXEC, 0);
|
||||
+#endif
|
||||
if (dest_cpu == smp_processor_id())
|
||||
goto unlock;
|
||||
|
||||
@@ -3078,8 +3106,10 @@ void scheduler_tick(void)
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
rq->idle_balance = idle_cpu(cpu);
|
||||
+#ifndef CONFIG_BLD
|
||||
trigger_load_balance(rq);
|
||||
#endif
|
||||
+#endif
|
||||
rq_last_tick_reset(rq);
|
||||
}
|
||||
|
||||
@@ -7313,7 +7343,9 @@ int sched_cpu_dying(unsigned int cpu)
|
||||
raw_spin_unlock_irqrestore(&rq->lock, flags);
|
||||
calc_load_migrate(rq);
|
||||
update_max_interval();
|
||||
+#ifndef CONFIG_BLD
|
||||
nohz_balance_exit_idle(cpu);
|
||||
+#endif
|
||||
hrtick_clear(rq);
|
||||
return 0;
|
||||
}
|
||||
@@ -7519,6 +7551,15 @@ void __init sched_init(void)
|
||||
#endif /* CONFIG_SMP */
|
||||
init_rq_hrtick(rq);
|
||||
atomic_set(&rq->nr_iowait, 0);
|
||||
+#ifdef CONFIG_BLD
|
||||
+ INIT_LIST_HEAD(&rq->cfs.bld_cfs_list);
|
||||
+ list_add_tail(&rq->cfs.bld_cfs_list, &cfs_rq_head);
|
||||
+ rq->cfs.pos = 0;
|
||||
+
|
||||
+ INIT_LIST_HEAD(&rq->rt.bld_rt_list);
|
||||
+ list_add_tail(&rq->rt.bld_rt_list, &rt_rq_head);
|
||||
+ rq->rt.lowbit = INT_MAX;
|
||||
+#endif
|
||||
}
|
||||
|
||||
set_load_weight(&init_task);
|
||||
@@ -7561,6 +7602,9 @@ void __init sched_init(void)
|
||||
init_schedstats();
|
||||
|
||||
scheduler_running = 1;
|
||||
+#ifdef CONFIG_BLD
|
||||
+ printk(KERN_INFO "BLD: An Alternate CPU load distributor activated.\n");
|
||||
+#endif
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
|
||||
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
|
||||
index 039de34..f823e5b 100644
|
||||
--- a/kernel/sched/fair.c
|
||||
+++ b/kernel/sched/fair.c
|
||||
@@ -4924,6 +4924,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
|
||||
return 0;
|
||||
}
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
/*
|
||||
* effective_load() calculates the load change as seen from the root_task_group
|
||||
@@ -5455,6 +5456,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
|
||||
|
||||
return new_cpu;
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
/*
|
||||
* Called immediately before a task is migrated to a new cpu; task_cpu(p) and
|
||||
@@ -5785,6 +5787,7 @@ idle:
|
||||
* further scheduler activity on it and we're being very careful to
|
||||
* re-start the picking loop.
|
||||
*/
|
||||
+#ifndef CONFIG_BLD
|
||||
lockdep_unpin_lock(&rq->lock, cookie);
|
||||
new_tasks = idle_balance(rq);
|
||||
lockdep_repin_lock(&rq->lock, cookie);
|
||||
@@ -5798,7 +5801,7 @@ idle:
|
||||
|
||||
if (new_tasks > 0)
|
||||
goto again;
|
||||
-
|
||||
+#endif /* CONFIG_BLD */
|
||||
return NULL;
|
||||
}
|
||||
|
||||
@@ -6459,8 +6462,9 @@ static unsigned long task_h_load(struct task_struct *p)
|
||||
}
|
||||
#endif
|
||||
|
||||
-/********** Helpers for find_busiest_group ************************/
|
||||
+#ifndef CONFIG_BLD
|
||||
|
||||
+/********** Helpers for find_busiest_group ************************/
|
||||
enum group_type {
|
||||
group_other = 0,
|
||||
group_imbalanced,
|
||||
@@ -6551,6 +6555,7 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
|
||||
|
||||
return load_idx;
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
static unsigned long scale_rt_capacity(int cpu)
|
||||
{
|
||||
@@ -6659,6 +6664,7 @@ void update_group_capacity(struct sched_domain *sd, int cpu)
|
||||
sdg->sgc->capacity = capacity;
|
||||
}
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
/*
|
||||
* Check whether the capacity of the rq has been noticeably reduced by side
|
||||
* activity. The imbalance_pct is used for the threshold.
|
||||
@@ -7892,6 +7898,7 @@ static inline int on_null_domain(struct rq *rq)
|
||||
{
|
||||
return unlikely(!rcu_dereference_sched(rq->sd));
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
#ifdef CONFIG_NO_HZ_COMMON
|
||||
/*
|
||||
@@ -7900,12 +7907,39 @@ static inline int on_null_domain(struct rq *rq)
|
||||
* needed, they will kick the idle load balancer, which then does idle
|
||||
* load balancing for all the idle CPUs.
|
||||
*/
|
||||
+#ifndef CONFIG_BLD
|
||||
static struct {
|
||||
cpumask_var_t idle_cpus_mask;
|
||||
atomic_t nr_cpus;
|
||||
unsigned long next_balance; /* in jiffy units */
|
||||
} nohz ____cacheline_aligned;
|
||||
|
||||
+void nohz_balance_exit_idle(unsigned int cpu)
|
||||
+{
|
||||
+ if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
|
||||
+ /*
|
||||
+ * Completely isolated CPUs don't ever set, so we must test.
|
||||
+ */
|
||||
+ if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
|
||||
+ cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
|
||||
+ atomic_dec(&nohz.nr_cpus);
|
||||
+ }
|
||||
+ clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
+static int sched_ilb_notifier(struct notifier_block *nfb,
|
||||
+ unsigned long action, void *hcpu)
|
||||
+{
|
||||
+ switch (action & ~CPU_TASKS_FROZEN) {
|
||||
+ case CPU_DYING:
|
||||
+ nohz_balance_exit_idle(smp_processor_id());
|
||||
+ return NOTIFY_OK;
|
||||
+ default:
|
||||
+ return NOTIFY_DONE;
|
||||
+ }
|
||||
+}
|
||||
+
|
||||
static inline int find_new_ilb(void)
|
||||
{
|
||||
int ilb = cpumask_first(nohz.idle_cpus_mask);
|
||||
@@ -7944,20 +7978,6 @@ static void nohz_balancer_kick(void)
|
||||
return;
|
||||
}
|
||||
|
||||
-void nohz_balance_exit_idle(unsigned int cpu)
|
||||
-{
|
||||
- if (unlikely(test_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu)))) {
|
||||
- /*
|
||||
- * Completely isolated CPUs don't ever set, so we must test.
|
||||
- */
|
||||
- if (likely(cpumask_test_cpu(cpu, nohz.idle_cpus_mask))) {
|
||||
- cpumask_clear_cpu(cpu, nohz.idle_cpus_mask);
|
||||
- atomic_dec(&nohz.nr_cpus);
|
||||
- }
|
||||
- clear_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
||||
- }
|
||||
-}
|
||||
-
|
||||
static inline void set_cpu_sd_state_busy(void)
|
||||
{
|
||||
struct sched_domain *sd;
|
||||
@@ -7974,6 +7994,8 @@ static inline void set_cpu_sd_state_busy(void)
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
+#endif /* NO_HZ_COMMON */
|
||||
|
||||
void set_cpu_sd_state_idle(void)
|
||||
{
|
||||
@@ -7998,6 +8020,7 @@ unlock:
|
||||
*/
|
||||
void nohz_balance_enter_idle(int cpu)
|
||||
{
|
||||
+#ifndef CONFIG_BLD
|
||||
/*
|
||||
* If this cpu is going down, then nothing needs to be done.
|
||||
*/
|
||||
@@ -8016,10 +8039,8 @@ void nohz_balance_enter_idle(int cpu)
|
||||
cpumask_set_cpu(cpu, nohz.idle_cpus_mask);
|
||||
atomic_inc(&nohz.nr_cpus);
|
||||
set_bit(NOHZ_TICK_STOPPED, nohz_flags(cpu));
|
||||
-}
|
||||
#endif
|
||||
-
|
||||
-static DEFINE_SPINLOCK(balancing);
|
||||
+}
|
||||
|
||||
/*
|
||||
* Scale the max load_balance interval with the number of CPUs in the system.
|
||||
@@ -8030,6 +8051,9 @@ void update_max_interval(void)
|
||||
max_load_balance_interval = HZ*num_online_cpus()/10;
|
||||
}
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
+static DEFINE_SPINLOCK(balancing);
|
||||
+
|
||||
/*
|
||||
* It checks each scheduling domain to see if it is due to be balanced,
|
||||
* and initiates a balancing operation if so.
|
||||
@@ -8317,6 +8341,7 @@ void trigger_load_balance(struct rq *rq)
|
||||
nohz_balancer_kick();
|
||||
#endif
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
static void rq_online_fair(struct rq *rq)
|
||||
{
|
||||
@@ -8332,7 +8357,6 @@ static void rq_offline_fair(struct rq *rq)
|
||||
/* Ensure any throttled groups are reachable by pick_next_task */
|
||||
unthrottle_offline_cfs_rqs(rq);
|
||||
}
|
||||
-
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
/*
|
||||
@@ -8791,7 +8815,9 @@ const struct sched_class fair_sched_class = {
|
||||
.put_prev_task = put_prev_task_fair,
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
+#ifndef CONFIG_BLD
|
||||
.select_task_rq = select_task_rq_fair,
|
||||
+#endif
|
||||
.migrate_task_rq = migrate_task_rq_fair,
|
||||
|
||||
.rq_online = rq_online_fair,
|
||||
@@ -8852,6 +8878,7 @@ void show_numa_stats(struct task_struct *p, struct seq_file *m)
|
||||
|
||||
__init void init_sched_fair_class(void)
|
||||
{
|
||||
+#ifndef CONFIG_BLD
|
||||
#ifdef CONFIG_SMP
|
||||
open_softirq(SCHED_SOFTIRQ, run_rebalance_domains);
|
||||
|
||||
@@ -8860,5 +8887,5 @@ __init void init_sched_fair_class(void)
|
||||
zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
|
||||
#endif
|
||||
#endif /* SMP */
|
||||
-
|
||||
+#endif /* BLD */
|
||||
}
|
||||
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
|
||||
index d5690b7..6f3589e 100644
|
||||
--- a/kernel/sched/rt.c
|
||||
+++ b/kernel/sched/rt.c
|
||||
@@ -1375,6 +1375,7 @@ static void yield_task_rt(struct rq *rq)
|
||||
#ifdef CONFIG_SMP
|
||||
static int find_lowest_rq(struct task_struct *task);
|
||||
|
||||
+#ifndef CONFIG_BLD
|
||||
static int
|
||||
select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
||||
{
|
||||
@@ -1430,6 +1431,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
|
||||
out:
|
||||
return cpu;
|
||||
}
|
||||
+#endif /* CONFIG_BLD */
|
||||
|
||||
static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
|
||||
{
|
||||
@@ -2335,7 +2337,9 @@ const struct sched_class rt_sched_class = {
|
||||
.put_prev_task = put_prev_task_rt,
|
||||
|
||||
#ifdef CONFIG_SMP
|
||||
+#ifndef CONFIG_BLD
|
||||
.select_task_rq = select_task_rq_rt,
|
||||
+#endif
|
||||
|
||||
.set_cpus_allowed = set_cpus_allowed_common,
|
||||
.rq_online = rq_online_rt,
|
||||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
|
||||
index c64fc51..a1d329b 100644
|
||||
--- a/kernel/sched/sched.h
|
||||
+++ b/kernel/sched/sched.h
|
||||
@@ -416,9 +416,8 @@ struct cfs_rq {
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
#endif /* CONFIG_SMP */
|
||||
|
||||
-#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
struct rq *rq; /* cpu runqueue to which this cfs_rq is attached */
|
||||
-
|
||||
+#ifdef CONFIG_FAIR_GROUP_SCHED
|
||||
/*
|
||||
* leaf cfs_rqs are those that hold tasks (lowest schedulable entity in
|
||||
* a hierarchy). Non-leaf lrqs hold other higher schedulable entities
|
||||
@@ -442,6 +441,11 @@ struct cfs_rq {
|
||||
struct list_head throttled_list;
|
||||
#endif /* CONFIG_CFS_BANDWIDTH */
|
||||
#endif /* CONFIG_FAIR_GROUP_SCHED */
|
||||
+
|
||||
+#ifdef CONFIG_BLD
|
||||
+ struct list_head bld_cfs_list;
|
||||
+ char pos;
|
||||
+#endif
|
||||
};
|
||||
|
||||
static inline int rt_bandwidth_enabled(void)
|
||||
@@ -487,12 +491,16 @@ struct rt_rq {
|
||||
/* Nests inside the rq lock: */
|
||||
raw_spinlock_t rt_runtime_lock;
|
||||
|
||||
+ struct rq *rq;
|
||||
#ifdef CONFIG_RT_GROUP_SCHED
|
||||
unsigned long rt_nr_boosted;
|
||||
|
||||
- struct rq *rq;
|
||||
struct task_group *tg;
|
||||
#endif
|
||||
+#ifdef CONFIG_BLD
|
||||
+ struct list_head bld_rt_list;
|
||||
+ int lowbit;
|
||||
+#endif
|
||||
};
|
||||
|
||||
/* Deadline class' related fields in a runqueue */
|
|
@ -0,0 +1,24 @@
|
|||
Add -ck version in inconspicuous place where it will merge relatively easily
|
||||
with later kernel versions.
|
||||
|
||||
-ck
|
||||
|
||||
---
|
||||
Makefile | 4 ++++
|
||||
1 file changed, 4 insertions(+)
|
||||
|
||||
Index: linux-3.12-ck2/Makefile
|
||||
===================================================================
|
||||
--- linux-3.12-ck2.orig/Makefile 2013-12-03 20:12:24.295109675 +1100
|
||||
+++ linux-3.12-ck2/Makefile 2013-12-03 20:12:24.285109800 +1100
|
||||
@@ -10,6 +10,10 @@ NAME = One Giant Leap for Frogkind
|
||||
# Comments in this file are targeted only to the developer, do not
|
||||
# expect to learn how to build the kernel reading this file.
|
||||
|
||||
+CKVERSION = -ck2
|
||||
+CKNAME = BFS Powered
|
||||
+EXTRAVERSION := $(EXTRAVERSION)$(CKVERSION)
|
||||
+
|
||||
# Do not:
|
||||
# o use make's built-in rules and variables
|
||||
# (this increases performance and avoids hard-to-debug behaviour);
|
|
@ -0,0 +1,21 @@
|
|||
Set default HZ to 1000 which is what most desktop users should still be using.
|
||||
|
||||
-ck
|
||||
|
||||
---
|
||||
kernel/Kconfig.hz | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
Index: linux-3.12-ck2/kernel/Kconfig.hz
|
||||
===================================================================
|
||||
--- linux-3.12-ck2.orig/kernel/Kconfig.hz 2013-12-03 20:12:23.401120851 +1100
|
||||
+++ linux-3.12-ck2/kernel/Kconfig.hz 2013-12-03 20:12:23.390120989 +1100
|
||||
@@ -4,7 +4,7 @@
|
||||
|
||||
choice
|
||||
prompt "Timer frequency"
|
||||
- default HZ_250
|
||||
+ default HZ_1000
|
||||
help
|
||||
Allows the configuration of the timer frequency. It is customary
|
||||
to have the timer interrupt run at 1000 Hz but 100 Hz may be more
|
|
@ -0,0 +1,53 @@
|
|||
Make 250HZ not be the default to discourage desktop users from choosing this
|
||||
option since 1000 will provide better latencies with only miniscule amounts
|
||||
of extra overhead and power consumption.
|
||||
|
||||
-ck
|
||||
|
||||
---
|
||||
kernel/Kconfig.hz | 17 ++++++++++-------
|
||||
1 file changed, 10 insertions(+), 7 deletions(-)
|
||||
|
||||
Index: linux-3.12-ck2/kernel/Kconfig.hz
|
||||
===================================================================
|
||||
--- linux-3.12-ck2.orig/kernel/Kconfig.hz 2013-12-03 20:12:22.956126414 +1100
|
||||
+++ linux-3.12-ck2/kernel/Kconfig.hz 2013-12-03 20:12:22.946126539 +1100
|
||||
@@ -23,13 +23,14 @@ choice
|
||||
with lots of processors that may show reduced performance if
|
||||
too many timer interrupts are occurring.
|
||||
|
||||
- config HZ_250
|
||||
+ config HZ_250_NODEFAULT
|
||||
bool "250 HZ"
|
||||
help
|
||||
- 250 Hz is a good compromise choice allowing server performance
|
||||
- while also showing good interactive responsiveness even
|
||||
- on SMP and NUMA systems. If you are going to be using NTSC video
|
||||
- or multimedia, selected 300Hz instead.
|
||||
+ 250 HZ is a lousy compromise choice allowing server interactivity
|
||||
+ while also showing desktop throughput and no extra power saving on
|
||||
+ laptops. No good for anything.
|
||||
+
|
||||
+ Recommend 100 or 1000 instead.
|
||||
|
||||
config HZ_300
|
||||
bool "300 HZ"
|
||||
@@ -43,14 +44,16 @@ choice
|
||||
bool "1000 HZ"
|
||||
help
|
||||
1000 Hz is the preferred choice for desktop systems and other
|
||||
- systems requiring fast interactive responses to events.
|
||||
+ systems requiring fast interactive responses to events. Laptops
|
||||
+ can also benefit from this choice without sacrificing battery life
|
||||
+ if dynticks is also enabled.
|
||||
|
||||
endchoice
|
||||
|
||||
config HZ
|
||||
int
|
||||
default 100 if HZ_100
|
||||
- default 250 if HZ_250
|
||||
+ default 250 if HZ_250_NODEFAULT
|
||||
default 300 if HZ_300
|
||||
default 1000 if HZ_1000
|
||||
|
|
@ -0,0 +1,46 @@
|
|||
The options to alter the vmsplit to enable more lowmem are hidden behind the
|
||||
expert option. Make it more exposed for -ck users and make the help menu
|
||||
more explicit about what each option means.
|
||||
|
||||
-ck
|
||||
|
||||
---
|
||||
arch/x86/Kconfig | 12 ++++++------
|
||||
1 file changed, 6 insertions(+), 6 deletions(-)
|
||||
|
||||
Index: linux-3.12-ck2/arch/x86/Kconfig
|
||||
===================================================================
|
||||
--- linux-3.12-ck2.orig/arch/x86/Kconfig 2013-12-03 20:12:23.848115263 +1100
|
||||
+++ linux-3.12-ck2/arch/x86/Kconfig 2013-12-03 20:12:23.838115388 +1100
|
||||
@@ -1175,7 +1175,7 @@ config HIGHMEM64G
|
||||
endchoice
|
||||
|
||||
choice
|
||||
- prompt "Memory split" if EXPERT
|
||||
+ prompt "Memory split"
|
||||
default VMSPLIT_3G
|
||||
depends on X86_32
|
||||
---help---
|
||||
@@ -1195,17 +1195,17 @@ choice
|
||||
option alone!
|
||||
|
||||
config VMSPLIT_3G
|
||||
- bool "3G/1G user/kernel split"
|
||||
+ bool "Default 896MB lowmem (3G/1G user/kernel split)"
|
||||
config VMSPLIT_3G_OPT
|
||||
depends on !X86_PAE
|
||||
- bool "3G/1G user/kernel split (for full 1G low memory)"
|
||||
+ bool "1GB lowmem (3G/1G user/kernel split)"
|
||||
config VMSPLIT_2G
|
||||
- bool "2G/2G user/kernel split"
|
||||
+ bool "2GB lowmem (2G/2G user/kernel split)"
|
||||
config VMSPLIT_2G_OPT
|
||||
depends on !X86_PAE
|
||||
- bool "2G/2G user/kernel split (for full 2G low memory)"
|
||||
+ bool "2GB lowmem (2G/2G user/kernel split)"
|
||||
config VMSPLIT_1G
|
||||
- bool "1G/3G user/kernel split"
|
||||
+ bool "3GB lowmem (1G/3G user/kernel split)"
|
||||
endchoice
|
||||
|
||||
config PAGE_OFFSET
|
|
@ -0,0 +1,40 @@
|
|||
Enable preempt by default and make people steer away from voluntary.
|
||||
|
||||
-ck
|
||||
|
||||
---
|
||||
kernel/Kconfig.preempt | 7 ++++---
|
||||
1 file changed, 4 insertions(+), 3 deletions(-)
|
||||
|
||||
Index: linux-3.12-ck2/kernel/Kconfig.preempt
|
||||
===================================================================
|
||||
--- linux-3.12-ck2.orig/kernel/Kconfig.preempt 2013-12-03 20:12:22.511131977 +1100
|
||||
+++ linux-3.12-ck2/kernel/Kconfig.preempt 2013-12-03 20:12:22.500132115 +1100
|
||||
@@ -1,7 +1,7 @@
|
||||
|
||||
choice
|
||||
prompt "Preemption Model"
|
||||
- default PREEMPT_NONE
|
||||
+ default PREEMPT
|
||||
|
||||
config PREEMPT_NONE
|
||||
bool "No Forced Preemption (Server)"
|
||||
@@ -17,7 +17,7 @@ config PREEMPT_NONE
|
||||
latencies.
|
||||
|
||||
config PREEMPT_VOLUNTARY
|
||||
- bool "Voluntary Kernel Preemption (Desktop)"
|
||||
+ bool "Voluntary Kernel Preemption (Nothing)"
|
||||
help
|
||||
This option reduces the latency of the kernel by adding more
|
||||
"explicit preemption points" to the kernel code. These new
|
||||
@@ -31,7 +31,8 @@ config PREEMPT_VOLUNTARY
|
||||
applications to run more 'smoothly' even when the system is
|
||||
under load.
|
||||
|
||||
- Select this if you are building a kernel for a desktop system.
|
||||
+ Select this for no system in particular (choose Preemptible
|
||||
+ instead on a desktop if you know what's good for you).
|
||||
|
||||
config PREEMPT
|
||||
bool "Preemptible Kernel (Low-Latency Desktop)"
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue