Reduce cpuset.c write_lock_irq() to read_lock()

cpuset.c:update_nodemask() uses a write_lock_irq() on tasklist_lock to
block concurrent forks; a read_lock() suffices and is less intrusive.

Signed-off-by: Paul Menage<menage@google.com>
Acked-by: Paul Jackson <pj@sgi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Paul Menage 2007-07-15 23:40:11 -07:00 committed by Linus Torvalds
parent b2ff457b09
commit c2aef333c9

View File

@ -981,10 +981,10 @@ static int update_nodemask(struct cpuset *cs, char *buf)
mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL); mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL);
if (!mmarray) if (!mmarray)
goto done; goto done;
write_lock_irq(&tasklist_lock); /* block fork */ read_lock(&tasklist_lock); /* block fork */
if (atomic_read(&cs->count) <= ntasks) if (atomic_read(&cs->count) <= ntasks)
break; /* got enough */ break; /* got enough */
write_unlock_irq(&tasklist_lock); /* try again */ read_unlock(&tasklist_lock); /* try again */
kfree(mmarray); kfree(mmarray);
} }
@ -1006,7 +1006,7 @@ static int update_nodemask(struct cpuset *cs, char *buf)
continue; continue;
mmarray[n++] = mm; mmarray[n++] = mm;
} while_each_thread(g, p); } while_each_thread(g, p);
write_unlock_irq(&tasklist_lock); read_unlock(&tasklist_lock);
/* /*
* Now that we've dropped the tasklist spinlock, we can * Now that we've dropped the tasklist spinlock, we can