2005-06-24 12:05:25 +07:00
|
|
|
/*
|
|
|
|
* linux/mm/filemap.h
|
|
|
|
*
|
|
|
|
* Copyright (C) 1994-1999 Linus Torvalds
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __FILEMAP_H
|
|
|
|
#define __FILEMAP_H
|
|
|
|
|
|
|
|
#include <linux/types.h>
|
|
|
|
#include <linux/fs.h>
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/highmem.h>
|
|
|
|
#include <linux/uio.h>
|
2006-06-23 16:04:16 +07:00
|
|
|
#include <linux/uaccess.h>
|
2005-06-24 12:05:25 +07:00
|
|
|
|
2005-06-24 12:05:28 +07:00
|
|
|
size_t
|
[PATCH] Prepare for __copy_from_user_inatomic to not zero missed bytes
The problem is that when we write to a file, the copy from userspace to
pagecache is first done with preemption disabled, so if the source address is
not immediately available the copy fails *and* *zeros* *the* *destination*.
This is a problem because a concurrent read (which admittedly is an odd thing
to do) might see zeros rather that was there before the write, or what was
there after, or some mixture of the two (any of these being a reasonable thing
to see).
If the copy did fail, it will immediately be retried with preemption
re-enabled so any transient problem with accessing the source won't cause an
error.
The first copying does not need to zero any uncopied bytes, and doing so
causes the problem. It uses copy_from_user_atomic rather than copy_from_user
so the simple expedient is to change copy_from_user_atomic to *not* zero out
bytes on failure.
The first of these two patches prepares for the change by fixing two places
which assume copy_from_user_atomic does zero the tail. The two usages are
very similar pieces of code which copy from a userspace iovec into one or more
page-cache pages. These are changed to remove the assumption.
The second patch changes __copy_from_user_inatomic* to not zero the tail.
Once these are accepted, I will look at similar patches of other architectures
where this is important (ppc, mips and sparc being the ones I can find).
This patch:
There is a problem with __copy_from_user_inatomic zeroing the tail of the
buffer in the case of an error. As it is called in atomic context, the error
may be transient, so it results in zeros being written where maybe they
shouldn't be.
In the usage in filemap, this opens a window for a well timed read to see data
(zeros) which is not consistent with any ordering of reads and writes.
Most cases where __copy_from_user_inatomic is called, a failure results in
__copy_from_user being called immediately. As long as the latter zeros the
tail, the former doesn't need to. However in *copy_from_user_iovec
implementations (in both filemap and ntfs/file), it is assumed that
copy_from_user_inatomic will zero the tail.
This patch removes that assumption, so that after this patch it will
be safe for copy_from_user_inatomic to not zero the tail.
This patch also adds some commentary to filemap.h and asm-i386/uaccess.h.
After this patch, all architectures that might disable preempt when
kmap_atomic is called need to have their __copy_from_user_inatomic* "fixed".
This includes
- powerpc
- i386
- mips
- sparc
Signed-off-by: Neil Brown <neilb@suse.de>
Cc: David Howells <dhowells@redhat.com>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-25 19:47:58 +07:00
|
|
|
__filemap_copy_from_user_iovec_inatomic(char *vaddr,
|
|
|
|
const struct iovec *iov,
|
|
|
|
size_t base,
|
|
|
|
size_t bytes);
|
2005-06-24 12:05:25 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Copy as much as we can into the page and return the number of bytes which
|
|
|
|
* were sucessfully copied. If a fault is encountered then clear the page
|
|
|
|
* out to (offset+bytes) and return the number of bytes which were copied.
|
[PATCH] Prepare for __copy_from_user_inatomic to not zero missed bytes
The problem is that when we write to a file, the copy from userspace to
pagecache is first done with preemption disabled, so if the source address is
not immediately available the copy fails *and* *zeros* *the* *destination*.
This is a problem because a concurrent read (which admittedly is an odd thing
to do) might see zeros rather that was there before the write, or what was
there after, or some mixture of the two (any of these being a reasonable thing
to see).
If the copy did fail, it will immediately be retried with preemption
re-enabled so any transient problem with accessing the source won't cause an
error.
The first copying does not need to zero any uncopied bytes, and doing so
causes the problem. It uses copy_from_user_atomic rather than copy_from_user
so the simple expedient is to change copy_from_user_atomic to *not* zero out
bytes on failure.
The first of these two patches prepares for the change by fixing two places
which assume copy_from_user_atomic does zero the tail. The two usages are
very similar pieces of code which copy from a userspace iovec into one or more
page-cache pages. These are changed to remove the assumption.
The second patch changes __copy_from_user_inatomic* to not zero the tail.
Once these are accepted, I will look at similar patches of other architectures
where this is important (ppc, mips and sparc being the ones I can find).
This patch:
There is a problem with __copy_from_user_inatomic zeroing the tail of the
buffer in the case of an error. As it is called in atomic context, the error
may be transient, so it results in zeros being written where maybe they
shouldn't be.
In the usage in filemap, this opens a window for a well timed read to see data
(zeros) which is not consistent with any ordering of reads and writes.
Most cases where __copy_from_user_inatomic is called, a failure results in
__copy_from_user being called immediately. As long as the latter zeros the
tail, the former doesn't need to. However in *copy_from_user_iovec
implementations (in both filemap and ntfs/file), it is assumed that
copy_from_user_inatomic will zero the tail.
This patch removes that assumption, so that after this patch it will
be safe for copy_from_user_inatomic to not zero the tail.
This patch also adds some commentary to filemap.h and asm-i386/uaccess.h.
After this patch, all architectures that might disable preempt when
kmap_atomic is called need to have their __copy_from_user_inatomic* "fixed".
This includes
- powerpc
- i386
- mips
- sparc
Signed-off-by: Neil Brown <neilb@suse.de>
Cc: David Howells <dhowells@redhat.com>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-25 19:47:58 +07:00
|
|
|
*
|
|
|
|
* NOTE: For this to work reliably we really want copy_from_user_inatomic_nocache
|
|
|
|
* to *NOT* zero any tail of the buffer that it failed to copy. If it does,
|
|
|
|
* and if the following non-atomic copy succeeds, then there is a small window
|
|
|
|
* where the target page contains neither the data before the write, nor the
|
|
|
|
* data after the write (it contains zero). A read at this time will see
|
|
|
|
* data that is inconsistent with any ordering of the read and the write.
|
|
|
|
* (This has been detected in practice).
|
2005-06-24 12:05:25 +07:00
|
|
|
*/
|
|
|
|
static inline size_t
|
|
|
|
filemap_copy_from_user(struct page *page, unsigned long offset,
|
|
|
|
const char __user *buf, unsigned bytes)
|
|
|
|
{
|
|
|
|
char *kaddr;
|
|
|
|
int left;
|
|
|
|
|
|
|
|
kaddr = kmap_atomic(page, KM_USER0);
|
2006-06-23 16:04:16 +07:00
|
|
|
left = __copy_from_user_inatomic_nocache(kaddr + offset, buf, bytes);
|
2005-06-24 12:05:25 +07:00
|
|
|
kunmap_atomic(kaddr, KM_USER0);
|
|
|
|
|
|
|
|
if (left != 0) {
|
|
|
|
/* Do it the slow way */
|
|
|
|
kaddr = kmap(page);
|
2006-06-23 16:04:16 +07:00
|
|
|
left = __copy_from_user_nocache(kaddr + offset, buf, bytes);
|
2005-06-24 12:05:25 +07:00
|
|
|
kunmap(page);
|
|
|
|
}
|
|
|
|
return bytes - left;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This has the same sideeffects and return value as filemap_copy_from_user().
|
|
|
|
* The difference is that on a fault we need to memset the remainder of the
|
|
|
|
* page (out to offset+bytes), to emulate filemap_copy_from_user()'s
|
|
|
|
* single-segment behaviour.
|
|
|
|
*/
|
|
|
|
static inline size_t
|
|
|
|
filemap_copy_from_user_iovec(struct page *page, unsigned long offset,
|
|
|
|
const struct iovec *iov, size_t base, size_t bytes)
|
|
|
|
{
|
|
|
|
char *kaddr;
|
|
|
|
size_t copied;
|
|
|
|
|
|
|
|
kaddr = kmap_atomic(page, KM_USER0);
|
[PATCH] Prepare for __copy_from_user_inatomic to not zero missed bytes
The problem is that when we write to a file, the copy from userspace to
pagecache is first done with preemption disabled, so if the source address is
not immediately available the copy fails *and* *zeros* *the* *destination*.
This is a problem because a concurrent read (which admittedly is an odd thing
to do) might see zeros rather that was there before the write, or what was
there after, or some mixture of the two (any of these being a reasonable thing
to see).
If the copy did fail, it will immediately be retried with preemption
re-enabled so any transient problem with accessing the source won't cause an
error.
The first copying does not need to zero any uncopied bytes, and doing so
causes the problem. It uses copy_from_user_atomic rather than copy_from_user
so the simple expedient is to change copy_from_user_atomic to *not* zero out
bytes on failure.
The first of these two patches prepares for the change by fixing two places
which assume copy_from_user_atomic does zero the tail. The two usages are
very similar pieces of code which copy from a userspace iovec into one or more
page-cache pages. These are changed to remove the assumption.
The second patch changes __copy_from_user_inatomic* to not zero the tail.
Once these are accepted, I will look at similar patches of other architectures
where this is important (ppc, mips and sparc being the ones I can find).
This patch:
There is a problem with __copy_from_user_inatomic zeroing the tail of the
buffer in the case of an error. As it is called in atomic context, the error
may be transient, so it results in zeros being written where maybe they
shouldn't be.
In the usage in filemap, this opens a window for a well timed read to see data
(zeros) which is not consistent with any ordering of reads and writes.
Most cases where __copy_from_user_inatomic is called, a failure results in
__copy_from_user being called immediately. As long as the latter zeros the
tail, the former doesn't need to. However in *copy_from_user_iovec
implementations (in both filemap and ntfs/file), it is assumed that
copy_from_user_inatomic will zero the tail.
This patch removes that assumption, so that after this patch it will
be safe for copy_from_user_inatomic to not zero the tail.
This patch also adds some commentary to filemap.h and asm-i386/uaccess.h.
After this patch, all architectures that might disable preempt when
kmap_atomic is called need to have their __copy_from_user_inatomic* "fixed".
This includes
- powerpc
- i386
- mips
- sparc
Signed-off-by: Neil Brown <neilb@suse.de>
Cc: David Howells <dhowells@redhat.com>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-25 19:47:58 +07:00
|
|
|
copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
|
|
|
|
base, bytes);
|
2005-06-24 12:05:25 +07:00
|
|
|
kunmap_atomic(kaddr, KM_USER0);
|
|
|
|
if (copied != bytes) {
|
|
|
|
kaddr = kmap(page);
|
[PATCH] Prepare for __copy_from_user_inatomic to not zero missed bytes
The problem is that when we write to a file, the copy from userspace to
pagecache is first done with preemption disabled, so if the source address is
not immediately available the copy fails *and* *zeros* *the* *destination*.
This is a problem because a concurrent read (which admittedly is an odd thing
to do) might see zeros rather that was there before the write, or what was
there after, or some mixture of the two (any of these being a reasonable thing
to see).
If the copy did fail, it will immediately be retried with preemption
re-enabled so any transient problem with accessing the source won't cause an
error.
The first copying does not need to zero any uncopied bytes, and doing so
causes the problem. It uses copy_from_user_atomic rather than copy_from_user
so the simple expedient is to change copy_from_user_atomic to *not* zero out
bytes on failure.
The first of these two patches prepares for the change by fixing two places
which assume copy_from_user_atomic does zero the tail. The two usages are
very similar pieces of code which copy from a userspace iovec into one or more
page-cache pages. These are changed to remove the assumption.
The second patch changes __copy_from_user_inatomic* to not zero the tail.
Once these are accepted, I will look at similar patches of other architectures
where this is important (ppc, mips and sparc being the ones I can find).
This patch:
There is a problem with __copy_from_user_inatomic zeroing the tail of the
buffer in the case of an error. As it is called in atomic context, the error
may be transient, so it results in zeros being written where maybe they
shouldn't be.
In the usage in filemap, this opens a window for a well timed read to see data
(zeros) which is not consistent with any ordering of reads and writes.
Most cases where __copy_from_user_inatomic is called, a failure results in
__copy_from_user being called immediately. As long as the latter zeros the
tail, the former doesn't need to. However in *copy_from_user_iovec
implementations (in both filemap and ntfs/file), it is assumed that
copy_from_user_inatomic will zero the tail.
This patch removes that assumption, so that after this patch it will
be safe for copy_from_user_inatomic to not zero the tail.
This patch also adds some commentary to filemap.h and asm-i386/uaccess.h.
After this patch, all architectures that might disable preempt when
kmap_atomic is called need to have their __copy_from_user_inatomic* "fixed".
This includes
- powerpc
- i386
- mips
- sparc
Signed-off-by: Neil Brown <neilb@suse.de>
Cc: David Howells <dhowells@redhat.com>
Cc: Anton Altaparmakov <aia21@cantab.net>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: William Lee Irwin III <wli@holomorphy.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
2006-06-25 19:47:58 +07:00
|
|
|
copied = __filemap_copy_from_user_iovec_inatomic(kaddr + offset, iov,
|
|
|
|
base, bytes);
|
|
|
|
if (bytes - copied)
|
|
|
|
memset(kaddr + offset + copied, 0, bytes - copied);
|
2005-06-24 12:05:25 +07:00
|
|
|
kunmap(page);
|
|
|
|
}
|
|
|
|
return copied;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void
|
|
|
|
filemap_set_next_iovec(const struct iovec **iovp, size_t *basep, size_t bytes)
|
|
|
|
{
|
|
|
|
const struct iovec *iov = *iovp;
|
|
|
|
size_t base = *basep;
|
|
|
|
|
2006-06-29 16:24:26 +07:00
|
|
|
do {
|
2005-06-24 12:05:25 +07:00
|
|
|
int copy = min(bytes, iov->iov_len - base);
|
|
|
|
|
|
|
|
bytes -= copy;
|
|
|
|
base += copy;
|
|
|
|
if (iov->iov_len == base) {
|
|
|
|
iov++;
|
|
|
|
base = 0;
|
|
|
|
}
|
2006-06-29 16:24:26 +07:00
|
|
|
} while (bytes);
|
2005-06-24 12:05:25 +07:00
|
|
|
*iovp = iov;
|
|
|
|
*basep = base;
|
|
|
|
}
|
|
|
|
#endif
|