2013-02-28 08:03:03 +07:00
|
|
|
/*
|
|
|
|
* linux/fs/hfsplus/xattr.c
|
|
|
|
*
|
|
|
|
* Vyacheslav Dubeyko <slava@dubeyko.com>
|
|
|
|
*
|
|
|
|
* Logic of processing extended attributes
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "hfsplus_fs.h"
|
2013-12-20 20:16:46 +07:00
|
|
|
#include <linux/posix_acl_xattr.h>
|
hfsplus: fix worst-case unicode to char conversion of file names and attributes
This is a series of 3 patches which corrects issues in HFS+ concerning
the use of non-english file names and attributes. Names and attributes
are stored internally as UTF-16 units up to a fixed maximum size, and
convert to and from user-representation by NLS. The code incorrectly
assume that NLS string lengths are equal to unicode lengths, which is
only true for English ascii usage.
This patch (of 3):
The HFS Plus Volume Format specification (TN1150) states that file names
are stored internally as a maximum of 255 unicode characters, as defined
by The Unicode Standard, Version 2.0 [Unicode, Inc. ISBN
0-201-48345-9]. File names are converted by the NLS system on Linux
before presented to the user.
255 CJK characters converts to UTF-8 with 1 unicode character to up to 3
bytes, and to GB18030 with 1 unicode character to up to 4 bytes. Thus,
trying in a UTF-8 locale to list files with names of more than 85 CJK
characters results in:
$ ls /mnt
ls: reading directory /mnt: File name too long
The receiving buffer to hfsplus_uni2asc() needs to be 255 x
NLS_MAX_CHARSET_SIZE bytes, not 255 bytes as the code has always been.
Similar consideration applies to attributes, which are stored internally
as a maximum of 127 UTF-16BE units. See XNU source for an up-to-date
reference on attributes.
Strictly speaking, the maximum value of NLS_MAX_CHARSET_SIZE = 6 is not
attainable in the case of conversion to UTF-8, as going beyond 3 bytes
requires the use of surrogate pairs, i.e. consuming two input units.
Thanks Anton Altaparmakov for reviewing an earlier version of this
change.
This patch fixes all callers of hfsplus_uni2asc(), and also enables the
use of long non-English file names in HFS+. The getting and setting,
and general usage of long non-English attributes requires further
forthcoming work, in the following patches of this series.
[akpm@linux-foundation.org: fix build]
Signed-off-by: Hin-Tak Leung <htl10@users.sourceforge.net>
Reviewed-by: Anton Altaparmakov <anton@tuxera.com>
Cc: Vyacheslav Dubeyko <slava@dubeyko.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Sougata Santra <sougata@tuxera.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-07 04:36:21 +07:00
|
|
|
#include <linux/nls.h>
|
2013-02-28 08:03:03 +07:00
|
|
|
#include "xattr.h"
|
2013-09-12 04:24:30 +07:00
|
|
|
#include "acl.h"
|
2013-02-28 08:03:03 +07:00
|
|
|
|
2014-01-30 14:59:19 +07:00
|
|
|
static int hfsplus_removexattr(struct inode *inode, const char *name);
|
|
|
|
|
2013-02-28 08:03:03 +07:00
|
|
|
const struct xattr_handler *hfsplus_xattr_handlers[] = {
|
|
|
|
&hfsplus_xattr_osx_handler,
|
|
|
|
&hfsplus_xattr_user_handler,
|
|
|
|
&hfsplus_xattr_trusted_handler,
|
2013-09-12 04:24:30 +07:00
|
|
|
#ifdef CONFIG_HFSPLUS_FS_POSIX_ACL
|
2013-12-20 20:16:46 +07:00
|
|
|
&posix_acl_access_xattr_handler,
|
|
|
|
&posix_acl_default_xattr_handler,
|
2013-09-12 04:24:30 +07:00
|
|
|
#endif
|
2013-02-28 08:03:03 +07:00
|
|
|
&hfsplus_xattr_security_handler,
|
|
|
|
NULL
|
|
|
|
};
|
|
|
|
|
|
|
|
static int strcmp_xattr_finder_info(const char *name)
|
|
|
|
{
|
|
|
|
if (name) {
|
|
|
|
return strncmp(name, HFSPLUS_XATTR_FINDER_INFO_NAME,
|
|
|
|
sizeof(HFSPLUS_XATTR_FINDER_INFO_NAME));
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int strcmp_xattr_acl(const char *name)
|
|
|
|
{
|
|
|
|
if (name) {
|
|
|
|
return strncmp(name, HFSPLUS_XATTR_ACL_NAME,
|
|
|
|
sizeof(HFSPLUS_XATTR_ACL_NAME));
|
|
|
|
}
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int is_known_namespace(const char *name)
|
|
|
|
{
|
|
|
|
if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) &&
|
|
|
|
strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) &&
|
|
|
|
strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) &&
|
|
|
|
strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN))
|
|
|
|
return false;
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2013-11-13 06:11:08 +07:00
|
|
|
static void hfsplus_init_header_node(struct inode *attr_file,
|
|
|
|
u32 clump_size,
|
2013-11-15 05:32:18 +07:00
|
|
|
char *buf, u16 node_size)
|
2013-11-13 06:11:08 +07:00
|
|
|
{
|
|
|
|
struct hfs_bnode_desc *desc;
|
|
|
|
struct hfs_btree_header_rec *head;
|
|
|
|
u16 offset;
|
|
|
|
__be16 *rec_offsets;
|
|
|
|
u32 hdr_node_map_rec_bits;
|
|
|
|
char *bmp;
|
|
|
|
u32 used_nodes;
|
|
|
|
u32 used_bmp_bytes;
|
hfsplus: fix compiler warning on PowerPC
Commit a99b7069aab8 ("hfsplus: Fix undefined __divdi3 in
hfsplus_init_header_node()") introduced do_div() to xattr.c and the
warning below too.
As Geert remarked: "tmp" is "loff_t" which is "__kernel_loff_t", which
is "long long", i.e. signed, while include/asm-generic/div64.h compares
its type with "uint64_t". As inode sizes are positive, it should be
safe to change the type of "tmp" to "u64".
In file included from
arch/powerpc/include/asm/div64.h:1:0,
from include/linux/kernel.h:124,
from include/asm-generic/bug.h:13,
from arch/powerpc/include/asm/bug.h:127,
from include/linux/bug.h:4,
from include/linux/thread_info.h:11,
from include/asm-generic/preempt.h:4,
from arch/powerpc/include/generated/asm/preempt.h:1,
from include/linux/preempt.h:18,
from include/linux/spinlock.h:50,
from include/linux/wait.h:8,
from include/linux/fs.h:6,
from fs/hfsplus/hfsplus_fs.h:19,
from fs/hfsplus/xattr.c:9:
fs/hfsplus/xattr.c: In function 'hfsplus_init_header_node':
include/asm-generic/div64.h:43:28: warning: comparison of distinct pointer types lacks a cast [enabled by default]
(void)(((typeof((n)) *)0) == ((uint64_t *)0)); \
^
fs/hfsplus/xattr.c:86:2: note: in expansion of macro 'do_div'
do_div(tmp, node_size);
^
Signed-off-by: Christian Kujau <lists@nerdbynature.de>
Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: Sergei Antonov <saproj@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-07 04:36:32 +07:00
|
|
|
u64 tmp;
|
2013-11-13 06:11:08 +07:00
|
|
|
|
2013-11-15 05:32:18 +07:00
|
|
|
hfs_dbg(ATTR_MOD, "init_hdr_attr_file: clump %u, node_size %u\n",
|
2014-06-07 04:36:31 +07:00
|
|
|
clump_size, node_size);
|
2013-11-13 06:11:08 +07:00
|
|
|
|
|
|
|
/* The end of the node contains list of record offsets */
|
|
|
|
rec_offsets = (__be16 *)(buf + node_size);
|
|
|
|
|
|
|
|
desc = (struct hfs_bnode_desc *)buf;
|
|
|
|
desc->type = HFS_NODE_HEADER;
|
|
|
|
desc->num_recs = cpu_to_be16(HFSPLUS_BTREE_HDR_NODE_RECS_COUNT);
|
|
|
|
offset = sizeof(struct hfs_bnode_desc);
|
|
|
|
*--rec_offsets = cpu_to_be16(offset);
|
|
|
|
|
|
|
|
head = (struct hfs_btree_header_rec *)(buf + offset);
|
|
|
|
head->node_size = cpu_to_be16(node_size);
|
2013-11-15 05:32:18 +07:00
|
|
|
tmp = i_size_read(attr_file);
|
|
|
|
do_div(tmp, node_size);
|
|
|
|
head->node_count = cpu_to_be32(tmp);
|
2013-11-13 06:11:08 +07:00
|
|
|
head->free_nodes = cpu_to_be32(be32_to_cpu(head->node_count) - 1);
|
|
|
|
head->clump_size = cpu_to_be32(clump_size);
|
|
|
|
head->attributes |= cpu_to_be32(HFS_TREE_BIGKEYS | HFS_TREE_VARIDXKEYS);
|
|
|
|
head->max_key_len = cpu_to_be16(HFSPLUS_ATTR_KEYLEN - sizeof(u16));
|
|
|
|
offset += sizeof(struct hfs_btree_header_rec);
|
|
|
|
*--rec_offsets = cpu_to_be16(offset);
|
|
|
|
offset += HFSPLUS_BTREE_HDR_USER_BYTES;
|
|
|
|
*--rec_offsets = cpu_to_be16(offset);
|
|
|
|
|
|
|
|
hdr_node_map_rec_bits = 8 * (node_size - offset - (4 * sizeof(u16)));
|
|
|
|
if (be32_to_cpu(head->node_count) > hdr_node_map_rec_bits) {
|
|
|
|
u32 map_node_bits;
|
|
|
|
u32 map_nodes;
|
|
|
|
|
|
|
|
desc->next = cpu_to_be32(be32_to_cpu(head->leaf_tail) + 1);
|
|
|
|
map_node_bits = 8 * (node_size - sizeof(struct hfs_bnode_desc) -
|
|
|
|
(2 * sizeof(u16)) - 2);
|
|
|
|
map_nodes = (be32_to_cpu(head->node_count) -
|
|
|
|
hdr_node_map_rec_bits +
|
|
|
|
(map_node_bits - 1)) / map_node_bits;
|
|
|
|
be32_add_cpu(&head->free_nodes, 0 - map_nodes);
|
|
|
|
}
|
|
|
|
|
|
|
|
bmp = buf + offset;
|
|
|
|
used_nodes =
|
|
|
|
be32_to_cpu(head->node_count) - be32_to_cpu(head->free_nodes);
|
|
|
|
used_bmp_bytes = used_nodes / 8;
|
|
|
|
if (used_bmp_bytes) {
|
|
|
|
memset(bmp, 0xFF, used_bmp_bytes);
|
|
|
|
bmp += used_bmp_bytes;
|
|
|
|
used_nodes %= 8;
|
|
|
|
}
|
|
|
|
*bmp = ~(0xFF >> used_nodes);
|
|
|
|
offset += hdr_node_map_rec_bits / 8;
|
|
|
|
*--rec_offsets = cpu_to_be16(offset);
|
|
|
|
}
|
|
|
|
|
2013-11-13 06:11:09 +07:00
|
|
|
static int hfsplus_create_attributes_file(struct super_block *sb)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct hfsplus_sb_info *sbi = HFSPLUS_SB(sb);
|
|
|
|
struct inode *attr_file;
|
|
|
|
struct hfsplus_inode_info *hip;
|
|
|
|
u32 clump_size;
|
|
|
|
u16 node_size = HFSPLUS_ATTR_TREE_NODE_SIZE;
|
|
|
|
char *buf;
|
|
|
|
int index, written;
|
|
|
|
struct address_space *mapping;
|
|
|
|
struct page *page;
|
|
|
|
int old_state = HFSPLUS_EMPTY_ATTR_TREE;
|
|
|
|
|
|
|
|
hfs_dbg(ATTR_MOD, "create_attr_file: ino %d\n", HFSPLUS_ATTR_CNID);
|
|
|
|
|
|
|
|
check_attr_tree_state_again:
|
|
|
|
switch (atomic_read(&sbi->attr_tree_state)) {
|
|
|
|
case HFSPLUS_EMPTY_ATTR_TREE:
|
|
|
|
if (old_state != atomic_cmpxchg(&sbi->attr_tree_state,
|
|
|
|
old_state,
|
|
|
|
HFSPLUS_CREATING_ATTR_TREE))
|
|
|
|
goto check_attr_tree_state_again;
|
|
|
|
break;
|
|
|
|
case HFSPLUS_CREATING_ATTR_TREE:
|
|
|
|
/*
|
|
|
|
* This state means that another thread is in process
|
|
|
|
* of AttributesFile creation. Theoretically, it is
|
|
|
|
* possible to be here. But really __setxattr() method
|
|
|
|
* first of all calls hfs_find_init() for lookup in
|
|
|
|
* B-tree of CatalogFile. This method locks mutex of
|
|
|
|
* CatalogFile's B-tree. As a result, if some thread
|
|
|
|
* is inside AttributedFile creation operation then
|
|
|
|
* another threads will be waiting unlocking of
|
|
|
|
* CatalogFile's B-tree's mutex. However, if code will
|
|
|
|
* change then we will return error code (-EAGAIN) from
|
|
|
|
* here. Really, it means that first try to set of xattr
|
|
|
|
* fails with error but second attempt will have success.
|
|
|
|
*/
|
|
|
|
return -EAGAIN;
|
|
|
|
case HFSPLUS_VALID_ATTR_TREE:
|
|
|
|
return 0;
|
|
|
|
case HFSPLUS_FAILED_ATTR_TREE:
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
default:
|
|
|
|
BUG();
|
|
|
|
}
|
|
|
|
|
|
|
|
attr_file = hfsplus_iget(sb, HFSPLUS_ATTR_CNID);
|
|
|
|
if (IS_ERR(attr_file)) {
|
|
|
|
pr_err("failed to load attributes file\n");
|
|
|
|
return PTR_ERR(attr_file);
|
|
|
|
}
|
|
|
|
|
|
|
|
BUG_ON(i_size_read(attr_file) != 0);
|
|
|
|
|
|
|
|
hip = HFSPLUS_I(attr_file);
|
|
|
|
|
|
|
|
clump_size = hfsplus_calc_btree_clump_size(sb->s_blocksize,
|
|
|
|
node_size,
|
|
|
|
sbi->sect_count,
|
|
|
|
HFSPLUS_ATTR_CNID);
|
|
|
|
|
|
|
|
mutex_lock(&hip->extents_lock);
|
|
|
|
hip->clump_blocks = clump_size >> sbi->alloc_blksz_shift;
|
|
|
|
mutex_unlock(&hip->extents_lock);
|
|
|
|
|
|
|
|
if (sbi->free_blocks <= (hip->clump_blocks << 1)) {
|
|
|
|
err = -ENOSPC;
|
|
|
|
goto end_attr_file_creation;
|
|
|
|
}
|
|
|
|
|
|
|
|
while (hip->alloc_blocks < hip->clump_blocks) {
|
2014-06-07 04:36:28 +07:00
|
|
|
err = hfsplus_file_extend(attr_file, false);
|
2013-11-13 06:11:09 +07:00
|
|
|
if (unlikely(err)) {
|
|
|
|
pr_err("failed to extend attributes file\n");
|
|
|
|
goto end_attr_file_creation;
|
|
|
|
}
|
|
|
|
hip->phys_size = attr_file->i_size =
|
|
|
|
(loff_t)hip->alloc_blocks << sbi->alloc_blksz_shift;
|
|
|
|
hip->fs_blocks = hip->alloc_blocks << sbi->fs_shift;
|
|
|
|
inode_set_bytes(attr_file, attr_file->i_size);
|
|
|
|
}
|
|
|
|
|
|
|
|
buf = kzalloc(node_size, GFP_NOFS);
|
|
|
|
if (!buf) {
|
|
|
|
pr_err("failed to allocate memory for header node\n");
|
|
|
|
err = -ENOMEM;
|
|
|
|
goto end_attr_file_creation;
|
|
|
|
}
|
|
|
|
|
|
|
|
hfsplus_init_header_node(attr_file, clump_size, buf, node_size);
|
|
|
|
|
|
|
|
mapping = attr_file->i_mapping;
|
|
|
|
|
|
|
|
index = 0;
|
|
|
|
written = 0;
|
|
|
|
for (; written < node_size; index++, written += PAGE_CACHE_SIZE) {
|
|
|
|
void *kaddr;
|
|
|
|
|
|
|
|
page = read_mapping_page(mapping, index, NULL);
|
|
|
|
if (IS_ERR(page)) {
|
|
|
|
err = PTR_ERR(page);
|
|
|
|
goto failed_header_node_init;
|
|
|
|
}
|
|
|
|
|
|
|
|
kaddr = kmap_atomic(page);
|
|
|
|
memcpy(kaddr, buf + written,
|
|
|
|
min_t(size_t, PAGE_CACHE_SIZE, node_size - written));
|
|
|
|
kunmap_atomic(kaddr);
|
|
|
|
|
|
|
|
set_page_dirty(page);
|
|
|
|
page_cache_release(page);
|
|
|
|
}
|
|
|
|
|
|
|
|
hfsplus_mark_inode_dirty(attr_file, HFSPLUS_I_ATTR_DIRTY);
|
|
|
|
|
|
|
|
sbi->attr_tree = hfs_btree_open(sb, HFSPLUS_ATTR_CNID);
|
|
|
|
if (!sbi->attr_tree)
|
|
|
|
pr_err("failed to load attributes file\n");
|
|
|
|
|
|
|
|
failed_header_node_init:
|
|
|
|
kfree(buf);
|
|
|
|
|
|
|
|
end_attr_file_creation:
|
|
|
|
iput(attr_file);
|
|
|
|
|
|
|
|
if (!err)
|
|
|
|
atomic_set(&sbi->attr_tree_state, HFSPLUS_VALID_ATTR_TREE);
|
|
|
|
else if (err == -ENOSPC)
|
|
|
|
atomic_set(&sbi->attr_tree_state, HFSPLUS_EMPTY_ATTR_TREE);
|
|
|
|
else
|
|
|
|
atomic_set(&sbi->attr_tree_state, HFSPLUS_FAILED_ATTR_TREE);
|
|
|
|
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2013-02-28 08:03:03 +07:00
|
|
|
int __hfsplus_setxattr(struct inode *inode, const char *name,
|
|
|
|
const void *value, size_t size, int flags)
|
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct hfs_find_data cat_fd;
|
|
|
|
hfsplus_cat_entry entry;
|
|
|
|
u16 cat_entry_flags, cat_entry_type;
|
|
|
|
u16 folder_finderinfo_len = sizeof(struct DInfo) +
|
|
|
|
sizeof(struct DXInfo);
|
|
|
|
u16 file_finderinfo_len = sizeof(struct FInfo) +
|
|
|
|
sizeof(struct FXInfo);
|
|
|
|
|
|
|
|
if ((!S_ISREG(inode->i_mode) &&
|
|
|
|
!S_ISDIR(inode->i_mode)) ||
|
|
|
|
HFSPLUS_IS_RSRC(inode))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
2014-01-30 14:59:19 +07:00
|
|
|
if (value == NULL)
|
|
|
|
return hfsplus_removexattr(inode, name);
|
2013-02-28 08:03:03 +07:00
|
|
|
|
|
|
|
err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd);
|
|
|
|
if (err) {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("can't init xattr find struct\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd);
|
|
|
|
if (err) {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("catalog searching failed\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
goto end_setxattr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!strcmp_xattr_finder_info(name)) {
|
|
|
|
if (flags & XATTR_CREATE) {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("xattr exists yet\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto end_setxattr;
|
|
|
|
}
|
|
|
|
hfs_bnode_read(cat_fd.bnode, &entry, cat_fd.entryoffset,
|
|
|
|
sizeof(hfsplus_cat_entry));
|
|
|
|
if (be16_to_cpu(entry.type) == HFSPLUS_FOLDER) {
|
|
|
|
if (size == folder_finderinfo_len) {
|
|
|
|
memcpy(&entry.folder.user_info, value,
|
|
|
|
folder_finderinfo_len);
|
|
|
|
hfs_bnode_write(cat_fd.bnode, &entry,
|
|
|
|
cat_fd.entryoffset,
|
|
|
|
sizeof(struct hfsplus_cat_folder));
|
|
|
|
hfsplus_mark_inode_dirty(inode,
|
|
|
|
HFSPLUS_I_CAT_DIRTY);
|
|
|
|
} else {
|
|
|
|
err = -ERANGE;
|
|
|
|
goto end_setxattr;
|
|
|
|
}
|
|
|
|
} else if (be16_to_cpu(entry.type) == HFSPLUS_FILE) {
|
|
|
|
if (size == file_finderinfo_len) {
|
|
|
|
memcpy(&entry.file.user_info, value,
|
|
|
|
file_finderinfo_len);
|
|
|
|
hfs_bnode_write(cat_fd.bnode, &entry,
|
|
|
|
cat_fd.entryoffset,
|
|
|
|
sizeof(struct hfsplus_cat_file));
|
|
|
|
hfsplus_mark_inode_dirty(inode,
|
|
|
|
HFSPLUS_I_CAT_DIRTY);
|
|
|
|
} else {
|
|
|
|
err = -ERANGE;
|
|
|
|
goto end_setxattr;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto end_setxattr;
|
|
|
|
}
|
|
|
|
goto end_setxattr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!HFSPLUS_SB(inode->i_sb)->attr_tree) {
|
2013-11-13 06:11:09 +07:00
|
|
|
err = hfsplus_create_attributes_file(inode->i_sb);
|
|
|
|
if (unlikely(err))
|
|
|
|
goto end_setxattr;
|
2013-02-28 08:03:03 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
if (hfsplus_attr_exists(inode, name)) {
|
|
|
|
if (flags & XATTR_CREATE) {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("xattr exists yet\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto end_setxattr;
|
|
|
|
}
|
|
|
|
err = hfsplus_delete_attr(inode, name);
|
|
|
|
if (err)
|
|
|
|
goto end_setxattr;
|
|
|
|
err = hfsplus_create_attr(inode, name, value, size);
|
|
|
|
if (err)
|
|
|
|
goto end_setxattr;
|
|
|
|
} else {
|
|
|
|
if (flags & XATTR_REPLACE) {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("cannot replace xattr\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
err = -EOPNOTSUPP;
|
|
|
|
goto end_setxattr;
|
|
|
|
}
|
|
|
|
err = hfsplus_create_attr(inode, name, value, size);
|
|
|
|
if (err)
|
|
|
|
goto end_setxattr;
|
|
|
|
}
|
|
|
|
|
|
|
|
cat_entry_type = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset);
|
|
|
|
if (cat_entry_type == HFSPLUS_FOLDER) {
|
|
|
|
cat_entry_flags = hfs_bnode_read_u16(cat_fd.bnode,
|
|
|
|
cat_fd.entryoffset +
|
|
|
|
offsetof(struct hfsplus_cat_folder, flags));
|
|
|
|
cat_entry_flags |= HFSPLUS_XATTR_EXISTS;
|
|
|
|
if (!strcmp_xattr_acl(name))
|
|
|
|
cat_entry_flags |= HFSPLUS_ACL_EXISTS;
|
|
|
|
hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
|
|
|
|
offsetof(struct hfsplus_cat_folder, flags),
|
|
|
|
cat_entry_flags);
|
|
|
|
hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
|
|
|
|
} else if (cat_entry_type == HFSPLUS_FILE) {
|
|
|
|
cat_entry_flags = hfs_bnode_read_u16(cat_fd.bnode,
|
|
|
|
cat_fd.entryoffset +
|
|
|
|
offsetof(struct hfsplus_cat_file, flags));
|
|
|
|
cat_entry_flags |= HFSPLUS_XATTR_EXISTS;
|
|
|
|
if (!strcmp_xattr_acl(name))
|
|
|
|
cat_entry_flags |= HFSPLUS_ACL_EXISTS;
|
|
|
|
hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
|
|
|
|
offsetof(struct hfsplus_cat_file, flags),
|
|
|
|
cat_entry_flags);
|
|
|
|
hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
|
|
|
|
} else {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("invalid catalog entry type\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
err = -EIO;
|
|
|
|
goto end_setxattr;
|
|
|
|
}
|
|
|
|
|
|
|
|
end_setxattr:
|
|
|
|
hfs_find_exit(&cat_fd);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int name_len(const char *xattr_name, int xattr_name_len)
|
|
|
|
{
|
|
|
|
int len = xattr_name_len + 1;
|
|
|
|
|
2014-01-30 14:59:19 +07:00
|
|
|
if (!is_known_namespace(xattr_name))
|
2013-02-28 08:03:03 +07:00
|
|
|
len += XATTR_MAC_OSX_PREFIX_LEN;
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int copy_name(char *buffer, const char *xattr_name, int name_len)
|
|
|
|
{
|
|
|
|
int len = name_len;
|
|
|
|
int offset = 0;
|
|
|
|
|
2014-01-30 14:59:19 +07:00
|
|
|
if (!is_known_namespace(xattr_name)) {
|
2013-02-28 08:03:03 +07:00
|
|
|
strncpy(buffer, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN);
|
|
|
|
offset += XATTR_MAC_OSX_PREFIX_LEN;
|
|
|
|
len += XATTR_MAC_OSX_PREFIX_LEN;
|
|
|
|
}
|
|
|
|
|
|
|
|
strncpy(buffer + offset, xattr_name, name_len);
|
|
|
|
memset(buffer + offset + name_len, 0, 1);
|
|
|
|
len += 1;
|
|
|
|
|
|
|
|
return len;
|
|
|
|
}
|
|
|
|
|
2013-09-12 04:24:30 +07:00
|
|
|
static ssize_t hfsplus_getxattr_finder_info(struct inode *inode,
|
2013-02-28 08:03:03 +07:00
|
|
|
void *value, size_t size)
|
|
|
|
{
|
|
|
|
ssize_t res = 0;
|
|
|
|
struct hfs_find_data fd;
|
|
|
|
u16 entry_type;
|
|
|
|
u16 folder_rec_len = sizeof(struct DInfo) + sizeof(struct DXInfo);
|
|
|
|
u16 file_rec_len = sizeof(struct FInfo) + sizeof(struct FXInfo);
|
|
|
|
u16 record_len = max(folder_rec_len, file_rec_len);
|
|
|
|
u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
|
|
|
|
u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)];
|
|
|
|
|
|
|
|
if (size >= record_len) {
|
|
|
|
res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
|
|
|
|
if (res) {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("can't init xattr find struct\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
|
|
|
|
if (res)
|
|
|
|
goto end_getxattr_finder_info;
|
|
|
|
entry_type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
|
|
|
|
|
|
|
|
if (entry_type == HFSPLUS_FOLDER) {
|
|
|
|
hfs_bnode_read(fd.bnode, folder_finder_info,
|
|
|
|
fd.entryoffset +
|
|
|
|
offsetof(struct hfsplus_cat_folder, user_info),
|
|
|
|
folder_rec_len);
|
|
|
|
memcpy(value, folder_finder_info, folder_rec_len);
|
|
|
|
res = folder_rec_len;
|
|
|
|
} else if (entry_type == HFSPLUS_FILE) {
|
|
|
|
hfs_bnode_read(fd.bnode, file_finder_info,
|
|
|
|
fd.entryoffset +
|
|
|
|
offsetof(struct hfsplus_cat_file, user_info),
|
|
|
|
file_rec_len);
|
|
|
|
memcpy(value, file_finder_info, file_rec_len);
|
|
|
|
res = file_rec_len;
|
|
|
|
} else {
|
|
|
|
res = -EOPNOTSUPP;
|
|
|
|
goto end_getxattr_finder_info;
|
|
|
|
}
|
|
|
|
} else
|
|
|
|
res = size ? -ERANGE : record_len;
|
|
|
|
|
|
|
|
end_getxattr_finder_info:
|
|
|
|
if (size >= record_len)
|
|
|
|
hfs_find_exit(&fd);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2013-09-12 04:24:30 +07:00
|
|
|
ssize_t __hfsplus_getxattr(struct inode *inode, const char *name,
|
2013-02-28 08:03:03 +07:00
|
|
|
void *value, size_t size)
|
|
|
|
{
|
|
|
|
struct hfs_find_data fd;
|
|
|
|
hfsplus_attr_entry *entry;
|
|
|
|
__be32 xattr_record_type;
|
|
|
|
u32 record_type;
|
|
|
|
u16 record_length = 0;
|
|
|
|
ssize_t res = 0;
|
|
|
|
|
|
|
|
if ((!S_ISREG(inode->i_mode) &&
|
|
|
|
!S_ISDIR(inode->i_mode)) ||
|
|
|
|
HFSPLUS_IS_RSRC(inode))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (!strcmp_xattr_finder_info(name))
|
2013-09-12 04:24:30 +07:00
|
|
|
return hfsplus_getxattr_finder_info(inode, value, size);
|
2013-02-28 08:03:03 +07:00
|
|
|
|
|
|
|
if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
entry = hfsplus_alloc_attr_entry();
|
|
|
|
if (!entry) {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("can't allocate xattr entry\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
return -ENOMEM;
|
|
|
|
}
|
|
|
|
|
|
|
|
res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd);
|
|
|
|
if (res) {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("can't init xattr find struct\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
goto failed_getxattr_init;
|
|
|
|
}
|
|
|
|
|
|
|
|
res = hfsplus_find_attr(inode->i_sb, inode->i_ino, name, &fd);
|
|
|
|
if (res) {
|
|
|
|
if (res == -ENOENT)
|
|
|
|
res = -ENODATA;
|
|
|
|
else
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("xattr searching failed\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
hfs_bnode_read(fd.bnode, &xattr_record_type,
|
|
|
|
fd.entryoffset, sizeof(xattr_record_type));
|
|
|
|
record_type = be32_to_cpu(xattr_record_type);
|
|
|
|
if (record_type == HFSPLUS_ATTR_INLINE_DATA) {
|
|
|
|
record_length = hfs_bnode_read_u16(fd.bnode,
|
|
|
|
fd.entryoffset +
|
|
|
|
offsetof(struct hfsplus_attr_inline_data,
|
|
|
|
length));
|
|
|
|
if (record_length > HFSPLUS_MAX_INLINE_DATA_SIZE) {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("invalid xattr record size\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
res = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
} else if (record_type == HFSPLUS_ATTR_FORK_DATA ||
|
|
|
|
record_type == HFSPLUS_ATTR_EXTENTS) {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("only inline data xattr are supported\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
res = -EOPNOTSUPP;
|
|
|
|
goto out;
|
|
|
|
} else {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("invalid xattr record\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
res = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size) {
|
|
|
|
hfs_bnode_read(fd.bnode, entry, fd.entryoffset,
|
|
|
|
offsetof(struct hfsplus_attr_inline_data,
|
|
|
|
raw_bytes) + record_length);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (size >= record_length) {
|
|
|
|
memcpy(value, entry->inline_data.raw_bytes, record_length);
|
|
|
|
res = record_length;
|
|
|
|
} else
|
|
|
|
res = size ? -ERANGE : record_length;
|
|
|
|
|
|
|
|
out:
|
|
|
|
hfs_find_exit(&fd);
|
|
|
|
|
|
|
|
failed_getxattr_init:
|
|
|
|
hfsplus_destroy_attr_entry(entry);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline int can_list(const char *xattr_name)
|
|
|
|
{
|
|
|
|
if (!xattr_name)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
return strncmp(xattr_name, XATTR_TRUSTED_PREFIX,
|
|
|
|
XATTR_TRUSTED_PREFIX_LEN) ||
|
|
|
|
capable(CAP_SYS_ADMIN);
|
|
|
|
}
|
|
|
|
|
|
|
|
static ssize_t hfsplus_listxattr_finder_info(struct dentry *dentry,
|
|
|
|
char *buffer, size_t size)
|
|
|
|
{
|
|
|
|
ssize_t res = 0;
|
|
|
|
struct inode *inode = dentry->d_inode;
|
|
|
|
struct hfs_find_data fd;
|
|
|
|
u16 entry_type;
|
|
|
|
u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)];
|
|
|
|
u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)];
|
|
|
|
unsigned long len, found_bit;
|
|
|
|
int xattr_name_len, symbols_count;
|
|
|
|
|
|
|
|
res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd);
|
|
|
|
if (res) {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("can't init xattr find struct\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd);
|
|
|
|
if (res)
|
|
|
|
goto end_listxattr_finder_info;
|
|
|
|
|
|
|
|
entry_type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset);
|
|
|
|
if (entry_type == HFSPLUS_FOLDER) {
|
|
|
|
len = sizeof(struct DInfo) + sizeof(struct DXInfo);
|
|
|
|
hfs_bnode_read(fd.bnode, folder_finder_info,
|
|
|
|
fd.entryoffset +
|
|
|
|
offsetof(struct hfsplus_cat_folder, user_info),
|
|
|
|
len);
|
|
|
|
found_bit = find_first_bit((void *)folder_finder_info, len*8);
|
|
|
|
} else if (entry_type == HFSPLUS_FILE) {
|
|
|
|
len = sizeof(struct FInfo) + sizeof(struct FXInfo);
|
|
|
|
hfs_bnode_read(fd.bnode, file_finder_info,
|
|
|
|
fd.entryoffset +
|
|
|
|
offsetof(struct hfsplus_cat_file, user_info),
|
|
|
|
len);
|
|
|
|
found_bit = find_first_bit((void *)file_finder_info, len*8);
|
|
|
|
} else {
|
|
|
|
res = -EOPNOTSUPP;
|
|
|
|
goto end_listxattr_finder_info;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (found_bit >= (len*8))
|
|
|
|
res = 0;
|
|
|
|
else {
|
|
|
|
symbols_count = sizeof(HFSPLUS_XATTR_FINDER_INFO_NAME) - 1;
|
|
|
|
xattr_name_len =
|
|
|
|
name_len(HFSPLUS_XATTR_FINDER_INFO_NAME, symbols_count);
|
|
|
|
if (!buffer || !size) {
|
|
|
|
if (can_list(HFSPLUS_XATTR_FINDER_INFO_NAME))
|
|
|
|
res = xattr_name_len;
|
|
|
|
} else if (can_list(HFSPLUS_XATTR_FINDER_INFO_NAME)) {
|
|
|
|
if (size < xattr_name_len)
|
|
|
|
res = -ERANGE;
|
|
|
|
else {
|
|
|
|
res = copy_name(buffer,
|
|
|
|
HFSPLUS_XATTR_FINDER_INFO_NAME,
|
|
|
|
symbols_count);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
end_listxattr_finder_info:
|
|
|
|
hfs_find_exit(&fd);
|
|
|
|
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
|
|
|
ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size)
|
|
|
|
{
|
|
|
|
ssize_t err;
|
|
|
|
ssize_t res = 0;
|
|
|
|
struct inode *inode = dentry->d_inode;
|
|
|
|
struct hfs_find_data fd;
|
|
|
|
u16 key_len = 0;
|
|
|
|
struct hfsplus_attr_key attr_key;
|
hfsplus: fix worst-case unicode to char conversion of file names and attributes
This is a series of 3 patches which corrects issues in HFS+ concerning
the use of non-english file names and attributes. Names and attributes
are stored internally as UTF-16 units up to a fixed maximum size, and
convert to and from user-representation by NLS. The code incorrectly
assume that NLS string lengths are equal to unicode lengths, which is
only true for English ascii usage.
This patch (of 3):
The HFS Plus Volume Format specification (TN1150) states that file names
are stored internally as a maximum of 255 unicode characters, as defined
by The Unicode Standard, Version 2.0 [Unicode, Inc. ISBN
0-201-48345-9]. File names are converted by the NLS system on Linux
before presented to the user.
255 CJK characters converts to UTF-8 with 1 unicode character to up to 3
bytes, and to GB18030 with 1 unicode character to up to 4 bytes. Thus,
trying in a UTF-8 locale to list files with names of more than 85 CJK
characters results in:
$ ls /mnt
ls: reading directory /mnt: File name too long
The receiving buffer to hfsplus_uni2asc() needs to be 255 x
NLS_MAX_CHARSET_SIZE bytes, not 255 bytes as the code has always been.
Similar consideration applies to attributes, which are stored internally
as a maximum of 127 UTF-16BE units. See XNU source for an up-to-date
reference on attributes.
Strictly speaking, the maximum value of NLS_MAX_CHARSET_SIZE = 6 is not
attainable in the case of conversion to UTF-8, as going beyond 3 bytes
requires the use of surrogate pairs, i.e. consuming two input units.
Thanks Anton Altaparmakov for reviewing an earlier version of this
change.
This patch fixes all callers of hfsplus_uni2asc(), and also enables the
use of long non-English file names in HFS+. The getting and setting,
and general usage of long non-English attributes requires further
forthcoming work, in the following patches of this series.
[akpm@linux-foundation.org: fix build]
Signed-off-by: Hin-Tak Leung <htl10@users.sourceforge.net>
Reviewed-by: Anton Altaparmakov <anton@tuxera.com>
Cc: Vyacheslav Dubeyko <slava@dubeyko.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Sougata Santra <sougata@tuxera.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-07 04:36:21 +07:00
|
|
|
char *strbuf;
|
2013-02-28 08:03:03 +07:00
|
|
|
int xattr_name_len;
|
|
|
|
|
|
|
|
if ((!S_ISREG(inode->i_mode) &&
|
|
|
|
!S_ISDIR(inode->i_mode)) ||
|
|
|
|
HFSPLUS_IS_RSRC(inode))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
res = hfsplus_listxattr_finder_info(dentry, buffer, size);
|
|
|
|
if (res < 0)
|
|
|
|
return res;
|
|
|
|
else if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
|
|
|
|
return (res == 0) ? -EOPNOTSUPP : res;
|
|
|
|
|
|
|
|
err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd);
|
|
|
|
if (err) {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("can't init xattr find struct\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
hfsplus: fix worst-case unicode to char conversion of file names and attributes
This is a series of 3 patches which corrects issues in HFS+ concerning
the use of non-english file names and attributes. Names and attributes
are stored internally as UTF-16 units up to a fixed maximum size, and
convert to and from user-representation by NLS. The code incorrectly
assume that NLS string lengths are equal to unicode lengths, which is
only true for English ascii usage.
This patch (of 3):
The HFS Plus Volume Format specification (TN1150) states that file names
are stored internally as a maximum of 255 unicode characters, as defined
by The Unicode Standard, Version 2.0 [Unicode, Inc. ISBN
0-201-48345-9]. File names are converted by the NLS system on Linux
before presented to the user.
255 CJK characters converts to UTF-8 with 1 unicode character to up to 3
bytes, and to GB18030 with 1 unicode character to up to 4 bytes. Thus,
trying in a UTF-8 locale to list files with names of more than 85 CJK
characters results in:
$ ls /mnt
ls: reading directory /mnt: File name too long
The receiving buffer to hfsplus_uni2asc() needs to be 255 x
NLS_MAX_CHARSET_SIZE bytes, not 255 bytes as the code has always been.
Similar consideration applies to attributes, which are stored internally
as a maximum of 127 UTF-16BE units. See XNU source for an up-to-date
reference on attributes.
Strictly speaking, the maximum value of NLS_MAX_CHARSET_SIZE = 6 is not
attainable in the case of conversion to UTF-8, as going beyond 3 bytes
requires the use of surrogate pairs, i.e. consuming two input units.
Thanks Anton Altaparmakov for reviewing an earlier version of this
change.
This patch fixes all callers of hfsplus_uni2asc(), and also enables the
use of long non-English file names in HFS+. The getting and setting,
and general usage of long non-English attributes requires further
forthcoming work, in the following patches of this series.
[akpm@linux-foundation.org: fix build]
Signed-off-by: Hin-Tak Leung <htl10@users.sourceforge.net>
Reviewed-by: Anton Altaparmakov <anton@tuxera.com>
Cc: Vyacheslav Dubeyko <slava@dubeyko.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Sougata Santra <sougata@tuxera.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-07 04:36:21 +07:00
|
|
|
strbuf = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN +
|
|
|
|
XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
|
|
|
|
if (!strbuf) {
|
|
|
|
res = -ENOMEM;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2013-02-28 08:03:03 +07:00
|
|
|
err = hfsplus_find_attr(inode->i_sb, inode->i_ino, NULL, &fd);
|
|
|
|
if (err) {
|
|
|
|
if (err == -ENOENT) {
|
|
|
|
if (res == 0)
|
|
|
|
res = -ENODATA;
|
|
|
|
goto end_listxattr;
|
|
|
|
} else {
|
|
|
|
res = err;
|
|
|
|
goto end_listxattr;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
for (;;) {
|
|
|
|
key_len = hfs_bnode_read_u16(fd.bnode, fd.keyoffset);
|
|
|
|
if (key_len == 0 || key_len > fd.tree->max_key_len) {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("invalid xattr key length: %d\n", key_len);
|
2013-02-28 08:03:03 +07:00
|
|
|
res = -EIO;
|
|
|
|
goto end_listxattr;
|
|
|
|
}
|
|
|
|
|
|
|
|
hfs_bnode_read(fd.bnode, &attr_key,
|
|
|
|
fd.keyoffset, key_len + sizeof(key_len));
|
|
|
|
|
|
|
|
if (be32_to_cpu(attr_key.cnid) != inode->i_ino)
|
|
|
|
goto end_listxattr;
|
|
|
|
|
hfsplus: fix worst-case unicode to char conversion of file names and attributes
This is a series of 3 patches which corrects issues in HFS+ concerning
the use of non-english file names and attributes. Names and attributes
are stored internally as UTF-16 units up to a fixed maximum size, and
convert to and from user-representation by NLS. The code incorrectly
assume that NLS string lengths are equal to unicode lengths, which is
only true for English ascii usage.
This patch (of 3):
The HFS Plus Volume Format specification (TN1150) states that file names
are stored internally as a maximum of 255 unicode characters, as defined
by The Unicode Standard, Version 2.0 [Unicode, Inc. ISBN
0-201-48345-9]. File names are converted by the NLS system on Linux
before presented to the user.
255 CJK characters converts to UTF-8 with 1 unicode character to up to 3
bytes, and to GB18030 with 1 unicode character to up to 4 bytes. Thus,
trying in a UTF-8 locale to list files with names of more than 85 CJK
characters results in:
$ ls /mnt
ls: reading directory /mnt: File name too long
The receiving buffer to hfsplus_uni2asc() needs to be 255 x
NLS_MAX_CHARSET_SIZE bytes, not 255 bytes as the code has always been.
Similar consideration applies to attributes, which are stored internally
as a maximum of 127 UTF-16BE units. See XNU source for an up-to-date
reference on attributes.
Strictly speaking, the maximum value of NLS_MAX_CHARSET_SIZE = 6 is not
attainable in the case of conversion to UTF-8, as going beyond 3 bytes
requires the use of surrogate pairs, i.e. consuming two input units.
Thanks Anton Altaparmakov for reviewing an earlier version of this
change.
This patch fixes all callers of hfsplus_uni2asc(), and also enables the
use of long non-English file names in HFS+. The getting and setting,
and general usage of long non-English attributes requires further
forthcoming work, in the following patches of this series.
[akpm@linux-foundation.org: fix build]
Signed-off-by: Hin-Tak Leung <htl10@users.sourceforge.net>
Reviewed-by: Anton Altaparmakov <anton@tuxera.com>
Cc: Vyacheslav Dubeyko <slava@dubeyko.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Sougata Santra <sougata@tuxera.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-07 04:36:21 +07:00
|
|
|
xattr_name_len = NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN;
|
2013-02-28 08:03:03 +07:00
|
|
|
if (hfsplus_uni2asc(inode->i_sb,
|
|
|
|
(const struct hfsplus_unistr *)&fd.key->attr.key_name,
|
|
|
|
strbuf, &xattr_name_len)) {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("unicode conversion failed\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
res = -EIO;
|
|
|
|
goto end_listxattr;
|
|
|
|
}
|
|
|
|
|
|
|
|
if (!buffer || !size) {
|
|
|
|
if (can_list(strbuf))
|
|
|
|
res += name_len(strbuf, xattr_name_len);
|
|
|
|
} else if (can_list(strbuf)) {
|
|
|
|
if (size < (res + name_len(strbuf, xattr_name_len))) {
|
|
|
|
res = -ERANGE;
|
|
|
|
goto end_listxattr;
|
|
|
|
} else
|
|
|
|
res += copy_name(buffer + res,
|
|
|
|
strbuf, xattr_name_len);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (hfs_brec_goto(&fd, 1))
|
|
|
|
goto end_listxattr;
|
|
|
|
}
|
|
|
|
|
|
|
|
end_listxattr:
|
hfsplus: fix worst-case unicode to char conversion of file names and attributes
This is a series of 3 patches which corrects issues in HFS+ concerning
the use of non-english file names and attributes. Names and attributes
are stored internally as UTF-16 units up to a fixed maximum size, and
convert to and from user-representation by NLS. The code incorrectly
assume that NLS string lengths are equal to unicode lengths, which is
only true for English ascii usage.
This patch (of 3):
The HFS Plus Volume Format specification (TN1150) states that file names
are stored internally as a maximum of 255 unicode characters, as defined
by The Unicode Standard, Version 2.0 [Unicode, Inc. ISBN
0-201-48345-9]. File names are converted by the NLS system on Linux
before presented to the user.
255 CJK characters converts to UTF-8 with 1 unicode character to up to 3
bytes, and to GB18030 with 1 unicode character to up to 4 bytes. Thus,
trying in a UTF-8 locale to list files with names of more than 85 CJK
characters results in:
$ ls /mnt
ls: reading directory /mnt: File name too long
The receiving buffer to hfsplus_uni2asc() needs to be 255 x
NLS_MAX_CHARSET_SIZE bytes, not 255 bytes as the code has always been.
Similar consideration applies to attributes, which are stored internally
as a maximum of 127 UTF-16BE units. See XNU source for an up-to-date
reference on attributes.
Strictly speaking, the maximum value of NLS_MAX_CHARSET_SIZE = 6 is not
attainable in the case of conversion to UTF-8, as going beyond 3 bytes
requires the use of surrogate pairs, i.e. consuming two input units.
Thanks Anton Altaparmakov for reviewing an earlier version of this
change.
This patch fixes all callers of hfsplus_uni2asc(), and also enables the
use of long non-English file names in HFS+. The getting and setting,
and general usage of long non-English attributes requires further
forthcoming work, in the following patches of this series.
[akpm@linux-foundation.org: fix build]
Signed-off-by: Hin-Tak Leung <htl10@users.sourceforge.net>
Reviewed-by: Anton Altaparmakov <anton@tuxera.com>
Cc: Vyacheslav Dubeyko <slava@dubeyko.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Sougata Santra <sougata@tuxera.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-07 04:36:21 +07:00
|
|
|
kfree(strbuf);
|
|
|
|
out:
|
2013-02-28 08:03:03 +07:00
|
|
|
hfs_find_exit(&fd);
|
|
|
|
return res;
|
|
|
|
}
|
|
|
|
|
2014-01-30 14:59:19 +07:00
|
|
|
static int hfsplus_removexattr(struct inode *inode, const char *name)
|
2013-02-28 08:03:03 +07:00
|
|
|
{
|
|
|
|
int err = 0;
|
|
|
|
struct hfs_find_data cat_fd;
|
|
|
|
u16 flags;
|
|
|
|
u16 cat_entry_type;
|
|
|
|
int is_xattr_acl_deleted = 0;
|
|
|
|
int is_all_xattrs_deleted = 0;
|
|
|
|
|
|
|
|
if (!HFSPLUS_SB(inode->i_sb)->attr_tree)
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
if (!strcmp_xattr_finder_info(name))
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
|
|
|
|
err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd);
|
|
|
|
if (err) {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("can't init xattr find struct\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd);
|
|
|
|
if (err) {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("catalog searching failed\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
goto end_removexattr;
|
|
|
|
}
|
|
|
|
|
|
|
|
err = hfsplus_delete_attr(inode, name);
|
|
|
|
if (err)
|
|
|
|
goto end_removexattr;
|
|
|
|
|
|
|
|
is_xattr_acl_deleted = !strcmp_xattr_acl(name);
|
|
|
|
is_all_xattrs_deleted = !hfsplus_attr_exists(inode, NULL);
|
|
|
|
|
|
|
|
if (!is_xattr_acl_deleted && !is_all_xattrs_deleted)
|
|
|
|
goto end_removexattr;
|
|
|
|
|
|
|
|
cat_entry_type = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset);
|
|
|
|
|
|
|
|
if (cat_entry_type == HFSPLUS_FOLDER) {
|
|
|
|
flags = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset +
|
|
|
|
offsetof(struct hfsplus_cat_folder, flags));
|
|
|
|
if (is_xattr_acl_deleted)
|
|
|
|
flags &= ~HFSPLUS_ACL_EXISTS;
|
|
|
|
if (is_all_xattrs_deleted)
|
|
|
|
flags &= ~HFSPLUS_XATTR_EXISTS;
|
|
|
|
hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
|
|
|
|
offsetof(struct hfsplus_cat_folder, flags),
|
|
|
|
flags);
|
|
|
|
hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
|
|
|
|
} else if (cat_entry_type == HFSPLUS_FILE) {
|
|
|
|
flags = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset +
|
|
|
|
offsetof(struct hfsplus_cat_file, flags));
|
|
|
|
if (is_xattr_acl_deleted)
|
|
|
|
flags &= ~HFSPLUS_ACL_EXISTS;
|
|
|
|
if (is_all_xattrs_deleted)
|
|
|
|
flags &= ~HFSPLUS_XATTR_EXISTS;
|
|
|
|
hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset +
|
|
|
|
offsetof(struct hfsplus_cat_file, flags),
|
|
|
|
flags);
|
|
|
|
hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY);
|
|
|
|
} else {
|
2013-05-01 05:27:55 +07:00
|
|
|
pr_err("invalid catalog entry type\n");
|
2013-02-28 08:03:03 +07:00
|
|
|
err = -EIO;
|
|
|
|
goto end_removexattr;
|
|
|
|
}
|
|
|
|
|
|
|
|
end_removexattr:
|
|
|
|
hfs_find_exit(&cat_fd);
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name,
|
|
|
|
void *buffer, size_t size, int type)
|
|
|
|
{
|
hfsplus: correct usage of HFSPLUS_ATTR_MAX_STRLEN for non-English attributes
HFSPLUS_ATTR_MAX_STRLEN (=127) is the limit of attribute names for the
number of unicode character (UTF-16BE) storable in the HFS+ file system.
Almost all the current usage of it is wrong, in relation to NLS to
on-disk conversion.
Except for one use calling hfsplus_asc2uni (which should stay the same)
and its uses in calling hfsplus_uni2asc (which was corrected in the
earlier patch in this series concerning usage of hfsplus_uni2asc), all
the other uses are of the forms:
- char buffer[size]
- bound check: "if (namespace_adjusted_input_length > size) return failure;"
Conversion between on-disk unicode representation and NLS char strings
(in whichever direction) always needs to accommodate the worst-case NLS
conversion, so all char buffers of that size need to have a
NLS_MAX_CHARSET_SIZE x .
The bound checks are all wrong, since they compare nls_length derived
from strlen() to a unicode length limit.
It turns out that all the bound-checks do is to protect
hfsplus_asc2uni(), which can fail if the input is too large.
There is only one usage of it as far as attributes are concerned, in
hfsplus_attr_build_key(). It is in turn used by hfsplus_find_attr(),
hfsplus_create_attr(), hfsplus_delete_attr(). Thus making sure that
errors from hfsplus_asc2uni() is caught in hfsplus_attr_build_key() and
propagated is sufficient to replace all the bound checks.
Unpropagated errors from hfsplus_asc2uni() in the file catalog code was
addressed recently in an independent patch "hfsplus: fix longname
handling" by Sougata Santra.
Before this patch, trying to set a 55 CJK character (in a UTF-8 locale,
> 127/3=42) attribute plus user prefix fails with:
$ setfattr -n user.`cat testing-string` -v `cat testing-string` \
testing-string
setfattr: testing-string: Operation not supported
and retrieving a stored long attributes is particular ugly(!):
find /mnt/* -type f -exec getfattr -d {} \;
getfattr: /mnt/testing-string: Input/output error
with console log:
[268008.389781] hfsplus: unicode conversion failed
After the patch, both of the above works.
FYI, the test attribute string is prepared with:
echo -e -n \
"\xe9\x80\x99\xe6\x98\xaf\xe4\xb8\x80\xe5\x80\x8b\xe9\x9d\x9e\xe5" \
"\xb8\xb8\xe6\xbc\xab\xe9\x95\xb7\xe8\x80\x8c\xe6\xa5\xb5\xe5\x85" \
"\xb6\xe4\xb9\x8f\xe5\x91\xb3\xe5\x92\x8c\xe7\x9b\xb8\xe7\x95\xb6" \
"\xe7\x84\xa1\xe8\xb6\xa3\xe3\x80\x81\xe4\xbb\xa5\xe5\x8f\x8a\xe7" \
"\x84\xa1\xe7\x94\xa8\xe7\x9a\x84\xe3\x80\x81\xe5\x86\x8d\xe5\x8a" \
"\xa0\xe4\xb8\x8a\xe6\xaf\xab\xe7\x84\xa1\xe6\x84\x8f\xe7\xbe\xa9" \
"\xe7\x9a\x84\xe6\x93\xb4\xe5\xb1\x95\xe5\xb1\xac\xe6\x80\xa7\xef" \
"\xbc\x8c\xe8\x80\x8c\xe5\x85\xb6\xe5\x94\xaf\xe4\xb8\x80\xe5\x89" \
"\xb5\xe5\xbb\xba\xe7\x9b\xae\xe7\x9a\x84\xe5\x83\x85\xe6\x98\xaf" \
"\xe7\x82\xba\xe4\xba\x86\xe6\xb8\xac\xe8\xa9\xa6\xe4\xbd\x9c\xe7" \
"\x94\xa8\xe3\x80\x82" | tr -d ' '
(= "pointlessly long attribute for testing", elaborate Chinese in
UTF-8 enoding).
However, it is not possible to set double the size (110 + 5 is still
under 127) in a UTF-8 locale:
$setfattr -n user.`cat testing-string testing-string` -v \
`cat testing-string testing-string` testing-string
setfattr: testing-string: Numerical result out of range
110 CJK char in UTF-8 is 330 bytes - the generic get/set attribute
system call code in linux/fs/xattr.c imposes a 255 byte limit. One can
use a combination of iconv to encode content, changing terminal locale
for viewing, and an nls=cp932/cp936/cp949/cp950 mount option to fully
use 127-unicode attribute in a double-byte locale.
Also, as an additional information, it is possible to (mis-)use unicode
half-width/full-width forms (U+FFxx) to write attributes which looks
like english but not actually ascii.
Thanks Anton Altaparmakov for reviewing the earlier ideas behind this
change.
[akpm@linux-foundation.org: fix build]
[akpm@linux-foundation.org: fix build]
Signed-off-by: Hin-Tak Leung <htl10@users.sourceforge.net>
Cc: Anton Altaparmakov <anton@tuxera.com>
Cc: Vyacheslav Dubeyko <slava@dubeyko.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Sougata Santra <sougata@tuxera.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-07 04:36:22 +07:00
|
|
|
char *xattr_name;
|
|
|
|
int res;
|
2013-02-28 08:03:03 +07:00
|
|
|
|
|
|
|
if (!strcmp(name, ""))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2014-01-30 14:59:19 +07:00
|
|
|
/*
|
|
|
|
* Don't allow retrieving properly prefixed attributes
|
|
|
|
* by prepending them with "osx."
|
|
|
|
*/
|
|
|
|
if (is_known_namespace(name))
|
|
|
|
return -EOPNOTSUPP;
|
hfsplus: correct usage of HFSPLUS_ATTR_MAX_STRLEN for non-English attributes
HFSPLUS_ATTR_MAX_STRLEN (=127) is the limit of attribute names for the
number of unicode character (UTF-16BE) storable in the HFS+ file system.
Almost all the current usage of it is wrong, in relation to NLS to
on-disk conversion.
Except for one use calling hfsplus_asc2uni (which should stay the same)
and its uses in calling hfsplus_uni2asc (which was corrected in the
earlier patch in this series concerning usage of hfsplus_uni2asc), all
the other uses are of the forms:
- char buffer[size]
- bound check: "if (namespace_adjusted_input_length > size) return failure;"
Conversion between on-disk unicode representation and NLS char strings
(in whichever direction) always needs to accommodate the worst-case NLS
conversion, so all char buffers of that size need to have a
NLS_MAX_CHARSET_SIZE x .
The bound checks are all wrong, since they compare nls_length derived
from strlen() to a unicode length limit.
It turns out that all the bound-checks do is to protect
hfsplus_asc2uni(), which can fail if the input is too large.
There is only one usage of it as far as attributes are concerned, in
hfsplus_attr_build_key(). It is in turn used by hfsplus_find_attr(),
hfsplus_create_attr(), hfsplus_delete_attr(). Thus making sure that
errors from hfsplus_asc2uni() is caught in hfsplus_attr_build_key() and
propagated is sufficient to replace all the bound checks.
Unpropagated errors from hfsplus_asc2uni() in the file catalog code was
addressed recently in an independent patch "hfsplus: fix longname
handling" by Sougata Santra.
Before this patch, trying to set a 55 CJK character (in a UTF-8 locale,
> 127/3=42) attribute plus user prefix fails with:
$ setfattr -n user.`cat testing-string` -v `cat testing-string` \
testing-string
setfattr: testing-string: Operation not supported
and retrieving a stored long attributes is particular ugly(!):
find /mnt/* -type f -exec getfattr -d {} \;
getfattr: /mnt/testing-string: Input/output error
with console log:
[268008.389781] hfsplus: unicode conversion failed
After the patch, both of the above works.
FYI, the test attribute string is prepared with:
echo -e -n \
"\xe9\x80\x99\xe6\x98\xaf\xe4\xb8\x80\xe5\x80\x8b\xe9\x9d\x9e\xe5" \
"\xb8\xb8\xe6\xbc\xab\xe9\x95\xb7\xe8\x80\x8c\xe6\xa5\xb5\xe5\x85" \
"\xb6\xe4\xb9\x8f\xe5\x91\xb3\xe5\x92\x8c\xe7\x9b\xb8\xe7\x95\xb6" \
"\xe7\x84\xa1\xe8\xb6\xa3\xe3\x80\x81\xe4\xbb\xa5\xe5\x8f\x8a\xe7" \
"\x84\xa1\xe7\x94\xa8\xe7\x9a\x84\xe3\x80\x81\xe5\x86\x8d\xe5\x8a" \
"\xa0\xe4\xb8\x8a\xe6\xaf\xab\xe7\x84\xa1\xe6\x84\x8f\xe7\xbe\xa9" \
"\xe7\x9a\x84\xe6\x93\xb4\xe5\xb1\x95\xe5\xb1\xac\xe6\x80\xa7\xef" \
"\xbc\x8c\xe8\x80\x8c\xe5\x85\xb6\xe5\x94\xaf\xe4\xb8\x80\xe5\x89" \
"\xb5\xe5\xbb\xba\xe7\x9b\xae\xe7\x9a\x84\xe5\x83\x85\xe6\x98\xaf" \
"\xe7\x82\xba\xe4\xba\x86\xe6\xb8\xac\xe8\xa9\xa6\xe4\xbd\x9c\xe7" \
"\x94\xa8\xe3\x80\x82" | tr -d ' '
(= "pointlessly long attribute for testing", elaborate Chinese in
UTF-8 enoding).
However, it is not possible to set double the size (110 + 5 is still
under 127) in a UTF-8 locale:
$setfattr -n user.`cat testing-string testing-string` -v \
`cat testing-string testing-string` testing-string
setfattr: testing-string: Numerical result out of range
110 CJK char in UTF-8 is 330 bytes - the generic get/set attribute
system call code in linux/fs/xattr.c imposes a 255 byte limit. One can
use a combination of iconv to encode content, changing terminal locale
for viewing, and an nls=cp932/cp936/cp949/cp950 mount option to fully
use 127-unicode attribute in a double-byte locale.
Also, as an additional information, it is possible to (mis-)use unicode
half-width/full-width forms (U+FFxx) to write attributes which looks
like english but not actually ascii.
Thanks Anton Altaparmakov for reviewing the earlier ideas behind this
change.
[akpm@linux-foundation.org: fix build]
[akpm@linux-foundation.org: fix build]
Signed-off-by: Hin-Tak Leung <htl10@users.sourceforge.net>
Cc: Anton Altaparmakov <anton@tuxera.com>
Cc: Vyacheslav Dubeyko <slava@dubeyko.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Sougata Santra <sougata@tuxera.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-07 04:36:22 +07:00
|
|
|
xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN
|
|
|
|
+ XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
|
|
|
|
if (!xattr_name)
|
|
|
|
return -ENOMEM;
|
|
|
|
strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
|
|
|
|
strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
|
2013-02-28 08:03:03 +07:00
|
|
|
|
hfsplus: correct usage of HFSPLUS_ATTR_MAX_STRLEN for non-English attributes
HFSPLUS_ATTR_MAX_STRLEN (=127) is the limit of attribute names for the
number of unicode character (UTF-16BE) storable in the HFS+ file system.
Almost all the current usage of it is wrong, in relation to NLS to
on-disk conversion.
Except for one use calling hfsplus_asc2uni (which should stay the same)
and its uses in calling hfsplus_uni2asc (which was corrected in the
earlier patch in this series concerning usage of hfsplus_uni2asc), all
the other uses are of the forms:
- char buffer[size]
- bound check: "if (namespace_adjusted_input_length > size) return failure;"
Conversion between on-disk unicode representation and NLS char strings
(in whichever direction) always needs to accommodate the worst-case NLS
conversion, so all char buffers of that size need to have a
NLS_MAX_CHARSET_SIZE x .
The bound checks are all wrong, since they compare nls_length derived
from strlen() to a unicode length limit.
It turns out that all the bound-checks do is to protect
hfsplus_asc2uni(), which can fail if the input is too large.
There is only one usage of it as far as attributes are concerned, in
hfsplus_attr_build_key(). It is in turn used by hfsplus_find_attr(),
hfsplus_create_attr(), hfsplus_delete_attr(). Thus making sure that
errors from hfsplus_asc2uni() is caught in hfsplus_attr_build_key() and
propagated is sufficient to replace all the bound checks.
Unpropagated errors from hfsplus_asc2uni() in the file catalog code was
addressed recently in an independent patch "hfsplus: fix longname
handling" by Sougata Santra.
Before this patch, trying to set a 55 CJK character (in a UTF-8 locale,
> 127/3=42) attribute plus user prefix fails with:
$ setfattr -n user.`cat testing-string` -v `cat testing-string` \
testing-string
setfattr: testing-string: Operation not supported
and retrieving a stored long attributes is particular ugly(!):
find /mnt/* -type f -exec getfattr -d {} \;
getfattr: /mnt/testing-string: Input/output error
with console log:
[268008.389781] hfsplus: unicode conversion failed
After the patch, both of the above works.
FYI, the test attribute string is prepared with:
echo -e -n \
"\xe9\x80\x99\xe6\x98\xaf\xe4\xb8\x80\xe5\x80\x8b\xe9\x9d\x9e\xe5" \
"\xb8\xb8\xe6\xbc\xab\xe9\x95\xb7\xe8\x80\x8c\xe6\xa5\xb5\xe5\x85" \
"\xb6\xe4\xb9\x8f\xe5\x91\xb3\xe5\x92\x8c\xe7\x9b\xb8\xe7\x95\xb6" \
"\xe7\x84\xa1\xe8\xb6\xa3\xe3\x80\x81\xe4\xbb\xa5\xe5\x8f\x8a\xe7" \
"\x84\xa1\xe7\x94\xa8\xe7\x9a\x84\xe3\x80\x81\xe5\x86\x8d\xe5\x8a" \
"\xa0\xe4\xb8\x8a\xe6\xaf\xab\xe7\x84\xa1\xe6\x84\x8f\xe7\xbe\xa9" \
"\xe7\x9a\x84\xe6\x93\xb4\xe5\xb1\x95\xe5\xb1\xac\xe6\x80\xa7\xef" \
"\xbc\x8c\xe8\x80\x8c\xe5\x85\xb6\xe5\x94\xaf\xe4\xb8\x80\xe5\x89" \
"\xb5\xe5\xbb\xba\xe7\x9b\xae\xe7\x9a\x84\xe5\x83\x85\xe6\x98\xaf" \
"\xe7\x82\xba\xe4\xba\x86\xe6\xb8\xac\xe8\xa9\xa6\xe4\xbd\x9c\xe7" \
"\x94\xa8\xe3\x80\x82" | tr -d ' '
(= "pointlessly long attribute for testing", elaborate Chinese in
UTF-8 enoding).
However, it is not possible to set double the size (110 + 5 is still
under 127) in a UTF-8 locale:
$setfattr -n user.`cat testing-string testing-string` -v \
`cat testing-string testing-string` testing-string
setfattr: testing-string: Numerical result out of range
110 CJK char in UTF-8 is 330 bytes - the generic get/set attribute
system call code in linux/fs/xattr.c imposes a 255 byte limit. One can
use a combination of iconv to encode content, changing terminal locale
for viewing, and an nls=cp932/cp936/cp949/cp950 mount option to fully
use 127-unicode attribute in a double-byte locale.
Also, as an additional information, it is possible to (mis-)use unicode
half-width/full-width forms (U+FFxx) to write attributes which looks
like english but not actually ascii.
Thanks Anton Altaparmakov for reviewing the earlier ideas behind this
change.
[akpm@linux-foundation.org: fix build]
[akpm@linux-foundation.org: fix build]
Signed-off-by: Hin-Tak Leung <htl10@users.sourceforge.net>
Cc: Anton Altaparmakov <anton@tuxera.com>
Cc: Vyacheslav Dubeyko <slava@dubeyko.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Sougata Santra <sougata@tuxera.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-07 04:36:22 +07:00
|
|
|
res = hfsplus_getxattr(dentry, xattr_name, buffer, size);
|
|
|
|
kfree(xattr_name);
|
|
|
|
return res;
|
2013-02-28 08:03:03 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name,
|
|
|
|
const void *buffer, size_t size, int flags, int type)
|
|
|
|
{
|
hfsplus: correct usage of HFSPLUS_ATTR_MAX_STRLEN for non-English attributes
HFSPLUS_ATTR_MAX_STRLEN (=127) is the limit of attribute names for the
number of unicode character (UTF-16BE) storable in the HFS+ file system.
Almost all the current usage of it is wrong, in relation to NLS to
on-disk conversion.
Except for one use calling hfsplus_asc2uni (which should stay the same)
and its uses in calling hfsplus_uni2asc (which was corrected in the
earlier patch in this series concerning usage of hfsplus_uni2asc), all
the other uses are of the forms:
- char buffer[size]
- bound check: "if (namespace_adjusted_input_length > size) return failure;"
Conversion between on-disk unicode representation and NLS char strings
(in whichever direction) always needs to accommodate the worst-case NLS
conversion, so all char buffers of that size need to have a
NLS_MAX_CHARSET_SIZE x .
The bound checks are all wrong, since they compare nls_length derived
from strlen() to a unicode length limit.
It turns out that all the bound-checks do is to protect
hfsplus_asc2uni(), which can fail if the input is too large.
There is only one usage of it as far as attributes are concerned, in
hfsplus_attr_build_key(). It is in turn used by hfsplus_find_attr(),
hfsplus_create_attr(), hfsplus_delete_attr(). Thus making sure that
errors from hfsplus_asc2uni() is caught in hfsplus_attr_build_key() and
propagated is sufficient to replace all the bound checks.
Unpropagated errors from hfsplus_asc2uni() in the file catalog code was
addressed recently in an independent patch "hfsplus: fix longname
handling" by Sougata Santra.
Before this patch, trying to set a 55 CJK character (in a UTF-8 locale,
> 127/3=42) attribute plus user prefix fails with:
$ setfattr -n user.`cat testing-string` -v `cat testing-string` \
testing-string
setfattr: testing-string: Operation not supported
and retrieving a stored long attributes is particular ugly(!):
find /mnt/* -type f -exec getfattr -d {} \;
getfattr: /mnt/testing-string: Input/output error
with console log:
[268008.389781] hfsplus: unicode conversion failed
After the patch, both of the above works.
FYI, the test attribute string is prepared with:
echo -e -n \
"\xe9\x80\x99\xe6\x98\xaf\xe4\xb8\x80\xe5\x80\x8b\xe9\x9d\x9e\xe5" \
"\xb8\xb8\xe6\xbc\xab\xe9\x95\xb7\xe8\x80\x8c\xe6\xa5\xb5\xe5\x85" \
"\xb6\xe4\xb9\x8f\xe5\x91\xb3\xe5\x92\x8c\xe7\x9b\xb8\xe7\x95\xb6" \
"\xe7\x84\xa1\xe8\xb6\xa3\xe3\x80\x81\xe4\xbb\xa5\xe5\x8f\x8a\xe7" \
"\x84\xa1\xe7\x94\xa8\xe7\x9a\x84\xe3\x80\x81\xe5\x86\x8d\xe5\x8a" \
"\xa0\xe4\xb8\x8a\xe6\xaf\xab\xe7\x84\xa1\xe6\x84\x8f\xe7\xbe\xa9" \
"\xe7\x9a\x84\xe6\x93\xb4\xe5\xb1\x95\xe5\xb1\xac\xe6\x80\xa7\xef" \
"\xbc\x8c\xe8\x80\x8c\xe5\x85\xb6\xe5\x94\xaf\xe4\xb8\x80\xe5\x89" \
"\xb5\xe5\xbb\xba\xe7\x9b\xae\xe7\x9a\x84\xe5\x83\x85\xe6\x98\xaf" \
"\xe7\x82\xba\xe4\xba\x86\xe6\xb8\xac\xe8\xa9\xa6\xe4\xbd\x9c\xe7" \
"\x94\xa8\xe3\x80\x82" | tr -d ' '
(= "pointlessly long attribute for testing", elaborate Chinese in
UTF-8 enoding).
However, it is not possible to set double the size (110 + 5 is still
under 127) in a UTF-8 locale:
$setfattr -n user.`cat testing-string testing-string` -v \
`cat testing-string testing-string` testing-string
setfattr: testing-string: Numerical result out of range
110 CJK char in UTF-8 is 330 bytes - the generic get/set attribute
system call code in linux/fs/xattr.c imposes a 255 byte limit. One can
use a combination of iconv to encode content, changing terminal locale
for viewing, and an nls=cp932/cp936/cp949/cp950 mount option to fully
use 127-unicode attribute in a double-byte locale.
Also, as an additional information, it is possible to (mis-)use unicode
half-width/full-width forms (U+FFxx) to write attributes which looks
like english but not actually ascii.
Thanks Anton Altaparmakov for reviewing the earlier ideas behind this
change.
[akpm@linux-foundation.org: fix build]
[akpm@linux-foundation.org: fix build]
Signed-off-by: Hin-Tak Leung <htl10@users.sourceforge.net>
Cc: Anton Altaparmakov <anton@tuxera.com>
Cc: Vyacheslav Dubeyko <slava@dubeyko.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Sougata Santra <sougata@tuxera.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-07 04:36:22 +07:00
|
|
|
char *xattr_name;
|
|
|
|
int res;
|
2013-02-28 08:03:03 +07:00
|
|
|
|
|
|
|
if (!strcmp(name, ""))
|
|
|
|
return -EINVAL;
|
|
|
|
|
2014-01-30 14:59:19 +07:00
|
|
|
/*
|
|
|
|
* Don't allow setting properly prefixed attributes
|
|
|
|
* by prepending them with "osx."
|
|
|
|
*/
|
2013-12-20 20:16:56 +07:00
|
|
|
if (is_known_namespace(name))
|
|
|
|
return -EOPNOTSUPP;
|
hfsplus: correct usage of HFSPLUS_ATTR_MAX_STRLEN for non-English attributes
HFSPLUS_ATTR_MAX_STRLEN (=127) is the limit of attribute names for the
number of unicode character (UTF-16BE) storable in the HFS+ file system.
Almost all the current usage of it is wrong, in relation to NLS to
on-disk conversion.
Except for one use calling hfsplus_asc2uni (which should stay the same)
and its uses in calling hfsplus_uni2asc (which was corrected in the
earlier patch in this series concerning usage of hfsplus_uni2asc), all
the other uses are of the forms:
- char buffer[size]
- bound check: "if (namespace_adjusted_input_length > size) return failure;"
Conversion between on-disk unicode representation and NLS char strings
(in whichever direction) always needs to accommodate the worst-case NLS
conversion, so all char buffers of that size need to have a
NLS_MAX_CHARSET_SIZE x .
The bound checks are all wrong, since they compare nls_length derived
from strlen() to a unicode length limit.
It turns out that all the bound-checks do is to protect
hfsplus_asc2uni(), which can fail if the input is too large.
There is only one usage of it as far as attributes are concerned, in
hfsplus_attr_build_key(). It is in turn used by hfsplus_find_attr(),
hfsplus_create_attr(), hfsplus_delete_attr(). Thus making sure that
errors from hfsplus_asc2uni() is caught in hfsplus_attr_build_key() and
propagated is sufficient to replace all the bound checks.
Unpropagated errors from hfsplus_asc2uni() in the file catalog code was
addressed recently in an independent patch "hfsplus: fix longname
handling" by Sougata Santra.
Before this patch, trying to set a 55 CJK character (in a UTF-8 locale,
> 127/3=42) attribute plus user prefix fails with:
$ setfattr -n user.`cat testing-string` -v `cat testing-string` \
testing-string
setfattr: testing-string: Operation not supported
and retrieving a stored long attributes is particular ugly(!):
find /mnt/* -type f -exec getfattr -d {} \;
getfattr: /mnt/testing-string: Input/output error
with console log:
[268008.389781] hfsplus: unicode conversion failed
After the patch, both of the above works.
FYI, the test attribute string is prepared with:
echo -e -n \
"\xe9\x80\x99\xe6\x98\xaf\xe4\xb8\x80\xe5\x80\x8b\xe9\x9d\x9e\xe5" \
"\xb8\xb8\xe6\xbc\xab\xe9\x95\xb7\xe8\x80\x8c\xe6\xa5\xb5\xe5\x85" \
"\xb6\xe4\xb9\x8f\xe5\x91\xb3\xe5\x92\x8c\xe7\x9b\xb8\xe7\x95\xb6" \
"\xe7\x84\xa1\xe8\xb6\xa3\xe3\x80\x81\xe4\xbb\xa5\xe5\x8f\x8a\xe7" \
"\x84\xa1\xe7\x94\xa8\xe7\x9a\x84\xe3\x80\x81\xe5\x86\x8d\xe5\x8a" \
"\xa0\xe4\xb8\x8a\xe6\xaf\xab\xe7\x84\xa1\xe6\x84\x8f\xe7\xbe\xa9" \
"\xe7\x9a\x84\xe6\x93\xb4\xe5\xb1\x95\xe5\xb1\xac\xe6\x80\xa7\xef" \
"\xbc\x8c\xe8\x80\x8c\xe5\x85\xb6\xe5\x94\xaf\xe4\xb8\x80\xe5\x89" \
"\xb5\xe5\xbb\xba\xe7\x9b\xae\xe7\x9a\x84\xe5\x83\x85\xe6\x98\xaf" \
"\xe7\x82\xba\xe4\xba\x86\xe6\xb8\xac\xe8\xa9\xa6\xe4\xbd\x9c\xe7" \
"\x94\xa8\xe3\x80\x82" | tr -d ' '
(= "pointlessly long attribute for testing", elaborate Chinese in
UTF-8 enoding).
However, it is not possible to set double the size (110 + 5 is still
under 127) in a UTF-8 locale:
$setfattr -n user.`cat testing-string testing-string` -v \
`cat testing-string testing-string` testing-string
setfattr: testing-string: Numerical result out of range
110 CJK char in UTF-8 is 330 bytes - the generic get/set attribute
system call code in linux/fs/xattr.c imposes a 255 byte limit. One can
use a combination of iconv to encode content, changing terminal locale
for viewing, and an nls=cp932/cp936/cp949/cp950 mount option to fully
use 127-unicode attribute in a double-byte locale.
Also, as an additional information, it is possible to (mis-)use unicode
half-width/full-width forms (U+FFxx) to write attributes which looks
like english but not actually ascii.
Thanks Anton Altaparmakov for reviewing the earlier ideas behind this
change.
[akpm@linux-foundation.org: fix build]
[akpm@linux-foundation.org: fix build]
Signed-off-by: Hin-Tak Leung <htl10@users.sourceforge.net>
Cc: Anton Altaparmakov <anton@tuxera.com>
Cc: Vyacheslav Dubeyko <slava@dubeyko.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Sougata Santra <sougata@tuxera.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-07 04:36:22 +07:00
|
|
|
xattr_name = kmalloc(NLS_MAX_CHARSET_SIZE * HFSPLUS_ATTR_MAX_STRLEN
|
|
|
|
+ XATTR_MAC_OSX_PREFIX_LEN + 1, GFP_KERNEL);
|
|
|
|
if (!xattr_name)
|
|
|
|
return -ENOMEM;
|
|
|
|
strcpy(xattr_name, XATTR_MAC_OSX_PREFIX);
|
|
|
|
strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name);
|
2013-12-20 20:16:56 +07:00
|
|
|
|
hfsplus: correct usage of HFSPLUS_ATTR_MAX_STRLEN for non-English attributes
HFSPLUS_ATTR_MAX_STRLEN (=127) is the limit of attribute names for the
number of unicode character (UTF-16BE) storable in the HFS+ file system.
Almost all the current usage of it is wrong, in relation to NLS to
on-disk conversion.
Except for one use calling hfsplus_asc2uni (which should stay the same)
and its uses in calling hfsplus_uni2asc (which was corrected in the
earlier patch in this series concerning usage of hfsplus_uni2asc), all
the other uses are of the forms:
- char buffer[size]
- bound check: "if (namespace_adjusted_input_length > size) return failure;"
Conversion between on-disk unicode representation and NLS char strings
(in whichever direction) always needs to accommodate the worst-case NLS
conversion, so all char buffers of that size need to have a
NLS_MAX_CHARSET_SIZE x .
The bound checks are all wrong, since they compare nls_length derived
from strlen() to a unicode length limit.
It turns out that all the bound-checks do is to protect
hfsplus_asc2uni(), which can fail if the input is too large.
There is only one usage of it as far as attributes are concerned, in
hfsplus_attr_build_key(). It is in turn used by hfsplus_find_attr(),
hfsplus_create_attr(), hfsplus_delete_attr(). Thus making sure that
errors from hfsplus_asc2uni() is caught in hfsplus_attr_build_key() and
propagated is sufficient to replace all the bound checks.
Unpropagated errors from hfsplus_asc2uni() in the file catalog code was
addressed recently in an independent patch "hfsplus: fix longname
handling" by Sougata Santra.
Before this patch, trying to set a 55 CJK character (in a UTF-8 locale,
> 127/3=42) attribute plus user prefix fails with:
$ setfattr -n user.`cat testing-string` -v `cat testing-string` \
testing-string
setfattr: testing-string: Operation not supported
and retrieving a stored long attributes is particular ugly(!):
find /mnt/* -type f -exec getfattr -d {} \;
getfattr: /mnt/testing-string: Input/output error
with console log:
[268008.389781] hfsplus: unicode conversion failed
After the patch, both of the above works.
FYI, the test attribute string is prepared with:
echo -e -n \
"\xe9\x80\x99\xe6\x98\xaf\xe4\xb8\x80\xe5\x80\x8b\xe9\x9d\x9e\xe5" \
"\xb8\xb8\xe6\xbc\xab\xe9\x95\xb7\xe8\x80\x8c\xe6\xa5\xb5\xe5\x85" \
"\xb6\xe4\xb9\x8f\xe5\x91\xb3\xe5\x92\x8c\xe7\x9b\xb8\xe7\x95\xb6" \
"\xe7\x84\xa1\xe8\xb6\xa3\xe3\x80\x81\xe4\xbb\xa5\xe5\x8f\x8a\xe7" \
"\x84\xa1\xe7\x94\xa8\xe7\x9a\x84\xe3\x80\x81\xe5\x86\x8d\xe5\x8a" \
"\xa0\xe4\xb8\x8a\xe6\xaf\xab\xe7\x84\xa1\xe6\x84\x8f\xe7\xbe\xa9" \
"\xe7\x9a\x84\xe6\x93\xb4\xe5\xb1\x95\xe5\xb1\xac\xe6\x80\xa7\xef" \
"\xbc\x8c\xe8\x80\x8c\xe5\x85\xb6\xe5\x94\xaf\xe4\xb8\x80\xe5\x89" \
"\xb5\xe5\xbb\xba\xe7\x9b\xae\xe7\x9a\x84\xe5\x83\x85\xe6\x98\xaf" \
"\xe7\x82\xba\xe4\xba\x86\xe6\xb8\xac\xe8\xa9\xa6\xe4\xbd\x9c\xe7" \
"\x94\xa8\xe3\x80\x82" | tr -d ' '
(= "pointlessly long attribute for testing", elaborate Chinese in
UTF-8 enoding).
However, it is not possible to set double the size (110 + 5 is still
under 127) in a UTF-8 locale:
$setfattr -n user.`cat testing-string testing-string` -v \
`cat testing-string testing-string` testing-string
setfattr: testing-string: Numerical result out of range
110 CJK char in UTF-8 is 330 bytes - the generic get/set attribute
system call code in linux/fs/xattr.c imposes a 255 byte limit. One can
use a combination of iconv to encode content, changing terminal locale
for viewing, and an nls=cp932/cp936/cp949/cp950 mount option to fully
use 127-unicode attribute in a double-byte locale.
Also, as an additional information, it is possible to (mis-)use unicode
half-width/full-width forms (U+FFxx) to write attributes which looks
like english but not actually ascii.
Thanks Anton Altaparmakov for reviewing the earlier ideas behind this
change.
[akpm@linux-foundation.org: fix build]
[akpm@linux-foundation.org: fix build]
Signed-off-by: Hin-Tak Leung <htl10@users.sourceforge.net>
Cc: Anton Altaparmakov <anton@tuxera.com>
Cc: Vyacheslav Dubeyko <slava@dubeyko.com>
Cc: Al Viro <viro@zeniv.linux.org.uk>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Sougata Santra <sougata@tuxera.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2014-06-07 04:36:22 +07:00
|
|
|
res = hfsplus_setxattr(dentry, xattr_name, buffer, size, flags);
|
|
|
|
kfree(xattr_name);
|
|
|
|
return res;
|
2013-02-28 08:03:03 +07:00
|
|
|
}
|
|
|
|
|
|
|
|
static size_t hfsplus_osx_listxattr(struct dentry *dentry, char *list,
|
|
|
|
size_t list_size, const char *name, size_t name_len, int type)
|
|
|
|
{
|
|
|
|
/*
|
|
|
|
* This method is not used.
|
|
|
|
* It is used hfsplus_listxattr() instead of generic_listxattr().
|
|
|
|
*/
|
|
|
|
return -EOPNOTSUPP;
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct xattr_handler hfsplus_xattr_osx_handler = {
|
|
|
|
.prefix = XATTR_MAC_OSX_PREFIX,
|
|
|
|
.list = hfsplus_osx_listxattr,
|
|
|
|
.get = hfsplus_osx_getxattr,
|
|
|
|
.set = hfsplus_osx_setxattr,
|
|
|
|
};
|