mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-12-24 02:37:15 +07:00
04646aebd3
Anything that walks all inodes on sb->s_inodes list without rescheduling risks softlockups. Previous efforts were made in 2 functions, see:c27d82f
fs/drop_caches.c: avoid softlockups in drop_pagecache_sb()ac05fbb
inode: don't softlockup when evicting inodes but there hasn't been an audit of all walkers, so do that now. This also consistently moves the cond_resched() calls to the bottom of each loop in cases where it already exists. One loop remains: remove_dquot_ref(), because I'm not quite sure how to deal with that one w/o taking the i_lock. Signed-off-by: Eric Sandeen <sandeen@redhat.com> Reviewed-by: Jan Kara <jack@suse.cz> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
77 lines
1.8 KiB
C
77 lines
1.8 KiB
C
// SPDX-License-Identifier: GPL-2.0
|
|
/*
|
|
* Implement the manual drop-all-pagecache function
|
|
*/
|
|
|
|
#include <linux/kernel.h>
|
|
#include <linux/mm.h>
|
|
#include <linux/fs.h>
|
|
#include <linux/writeback.h>
|
|
#include <linux/sysctl.h>
|
|
#include <linux/gfp.h>
|
|
#include "internal.h"
|
|
|
|
/* A global variable is a bit ugly, but it keeps the code simple */
|
|
int sysctl_drop_caches;
|
|
|
|
static void drop_pagecache_sb(struct super_block *sb, void *unused)
|
|
{
|
|
struct inode *inode, *toput_inode = NULL;
|
|
|
|
spin_lock(&sb->s_inode_list_lock);
|
|
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
|
|
spin_lock(&inode->i_lock);
|
|
/*
|
|
* We must skip inodes in unusual state. We may also skip
|
|
* inodes without pages but we deliberately won't in case
|
|
* we need to reschedule to avoid softlockups.
|
|
*/
|
|
if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
|
|
(inode->i_mapping->nrpages == 0 && !need_resched())) {
|
|
spin_unlock(&inode->i_lock);
|
|
continue;
|
|
}
|
|
__iget(inode);
|
|
spin_unlock(&inode->i_lock);
|
|
spin_unlock(&sb->s_inode_list_lock);
|
|
|
|
invalidate_mapping_pages(inode->i_mapping, 0, -1);
|
|
iput(toput_inode);
|
|
toput_inode = inode;
|
|
|
|
cond_resched();
|
|
spin_lock(&sb->s_inode_list_lock);
|
|
}
|
|
spin_unlock(&sb->s_inode_list_lock);
|
|
iput(toput_inode);
|
|
}
|
|
|
|
int drop_caches_sysctl_handler(struct ctl_table *table, int write,
|
|
void __user *buffer, size_t *length, loff_t *ppos)
|
|
{
|
|
int ret;
|
|
|
|
ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
|
|
if (ret)
|
|
return ret;
|
|
if (write) {
|
|
static int stfu;
|
|
|
|
if (sysctl_drop_caches & 1) {
|
|
iterate_supers(drop_pagecache_sb, NULL);
|
|
count_vm_event(DROP_PAGECACHE);
|
|
}
|
|
if (sysctl_drop_caches & 2) {
|
|
drop_slab();
|
|
count_vm_event(DROP_SLAB);
|
|
}
|
|
if (!stfu) {
|
|
pr_info("%s (%d): drop_caches: %d\n",
|
|
current->comm, task_pid_nr(current),
|
|
sysctl_drop_caches);
|
|
}
|
|
stfu |= sysctl_drop_caches & 4;
|
|
}
|
|
return 0;
|
|
}
|