[PATCH] ppc32: Fix pte_update for 64-bit PTEs

While the existing pte_update code handled atomically modifying a 64-bit PTE,
it did not return all 64-bits of the PTE before it was modified.  This causes
problems in some places that expect the full PTE to be returned, like
ptep_get_and_clear().

Created a new pte_update function that is conditional on CONFIG_PTE_64BIT.  It
atomically reads the low PTE word which all PTE flags are required to be in
and returns a premodified full 64-bit PTE.

Since we now have an explicit 64-bit PTE version of pte_update we can also
remove the hack that existed to get the low PTE word regardless of size.

Signed-off-by: Kumar Gala <kumar.gala@freescale.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
This commit is contained in:
Kumar Gala 2005-04-16 15:24:20 -07:00 committed by Linus Torvalds
parent 0c541b4406
commit 7a1e335085

View File

@ -526,10 +526,10 @@ extern void add_hash_page(unsigned context, unsigned long va,
* Atomic PTE updates. * Atomic PTE updates.
* *
* pte_update clears and sets bit atomically, and returns * pte_update clears and sets bit atomically, and returns
* the old pte value. * the old pte value. In the 64-bit PTE case we lock around the
* The ((unsigned long)(p+1) - 4) hack is to get to the least-significant * low PTE word since we expect ALL flag bits to be there
* 32 bits of the PTE regardless of whether PTEs are 32 or 64 bits.
*/ */
#ifndef CONFIG_PTE_64BIT
static inline unsigned long pte_update(pte_t *p, unsigned long clr, static inline unsigned long pte_update(pte_t *p, unsigned long clr,
unsigned long set) unsigned long set)
{ {
@ -543,10 +543,31 @@ static inline unsigned long pte_update(pte_t *p, unsigned long clr,
" stwcx. %1,0,%3\n\ " stwcx. %1,0,%3\n\
bne- 1b" bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*p) : "=&r" (old), "=&r" (tmp), "=m" (*p)
: "r" ((unsigned long)(p+1) - 4), "r" (clr), "r" (set), "m" (*p) : "r" (p), "r" (clr), "r" (set), "m" (*p)
: "cc" ); : "cc" );
return old; return old;
} }
#else
static inline unsigned long long pte_update(pte_t *p, unsigned long clr,
unsigned long set)
{
unsigned long long old;
unsigned long tmp;
__asm__ __volatile__("\
1: lwarx %L0,0,%4\n\
lwzx %0,0,%3\n\
andc %1,%L0,%5\n\
or %1,%1,%6\n"
PPC405_ERR77(0,%3)
" stwcx. %1,0,%4\n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*p)
: "r" (p), "r" ((unsigned long)(p) + 4), "r" (clr), "r" (set), "m" (*p)
: "cc" );
return old;
}
#endif
/* /*
* set_pte stores a linux PTE into the linux page table. * set_pte stores a linux PTE into the linux page table.