2018-04-04 00:23:33 +07:00
|
|
|
// SPDX-License-Identifier: GPL-2.0
|
2007-10-16 03:18:56 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2007 Oracle. All rights reserved.
|
|
|
|
*/
|
|
|
|
|
2012-07-10 09:22:35 +07:00
|
|
|
#include <asm/unaligned.h>
|
2008-09-30 02:18:18 +07:00
|
|
|
|
2012-07-10 09:22:35 +07:00
|
|
|
#include "ctree.h"
|
|
|
|
|
|
|
|
static inline u8 get_unaligned_le8(const void *p)
|
|
|
|
{
|
|
|
|
return *(u8 *)p;
|
|
|
|
}
|
|
|
|
|
|
|
|
static inline void put_unaligned_le8(u8 val, void *p)
|
|
|
|
{
|
|
|
|
*(u8 *)p = val;
|
|
|
|
}
|
|
|
|
|
2020-05-01 04:38:11 +07:00
|
|
|
static bool check_setget_bounds(const struct extent_buffer *eb,
|
|
|
|
const void *ptr, unsigned off, int size)
|
|
|
|
{
|
|
|
|
const unsigned long member_offset = (unsigned long)ptr + off;
|
|
|
|
|
|
|
|
if (member_offset > eb->len) {
|
|
|
|
btrfs_warn(eb->fs_info,
|
|
|
|
"bad eb member start: ptr 0x%lx start %llu member offset %lu size %d",
|
|
|
|
(unsigned long)ptr, eb->start, member_offset, size);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
if (member_offset + size > eb->len) {
|
|
|
|
btrfs_warn(eb->fs_info,
|
|
|
|
"bad eb member end: ptr 0x%lx start %llu member offset %lu size %d",
|
|
|
|
(unsigned long)ptr, eb->start, member_offset, size);
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2012-07-10 09:22:35 +07:00
|
|
|
/*
|
|
|
|
* this is some deeply nasty code.
|
2008-09-30 02:18:18 +07:00
|
|
|
*
|
|
|
|
* The end result is that anyone who #includes ctree.h gets a
|
2012-07-10 09:22:35 +07:00
|
|
|
* declaration for the btrfs_set_foo functions and btrfs_foo functions,
|
2016-05-20 08:18:45 +07:00
|
|
|
* which are wrappers of btrfs_set_token_#bits functions and
|
2012-07-10 09:22:35 +07:00
|
|
|
* btrfs_get_token_#bits functions, which are defined in this file.
|
2008-09-30 02:18:18 +07:00
|
|
|
*
|
|
|
|
* These setget functions do all the extent_buffer related mapping
|
|
|
|
* required to efficiently read and write specific fields in the extent
|
|
|
|
* buffers. Every pointer to metadata items in btrfs is really just
|
|
|
|
* an unsigned long offset into the extent buffer which has been
|
|
|
|
* cast to a specific type. This gives us all the gcc type checking.
|
|
|
|
*
|
2012-07-10 09:22:35 +07:00
|
|
|
* The extent buffer api is used to do the page spanning work required to
|
|
|
|
* have a metadata blocksize different from the page size.
|
2019-08-09 22:12:38 +07:00
|
|
|
*
|
|
|
|
* There are 2 variants defined, one with a token pointer and one without.
|
2008-09-30 02:18:18 +07:00
|
|
|
*/
|
|
|
|
|
2012-07-10 09:22:35 +07:00
|
|
|
#define DEFINE_BTRFS_SETGET_BITS(bits) \
|
2020-04-29 07:15:56 +07:00
|
|
|
u##bits btrfs_get_token_##bits(struct btrfs_map_token *token, \
|
|
|
|
const void *ptr, unsigned long off) \
|
2007-10-16 03:18:56 +07:00
|
|
|
{ \
|
2020-04-29 22:45:33 +07:00
|
|
|
const unsigned long member_offset = (unsigned long)ptr + off; \
|
|
|
|
const unsigned long idx = member_offset >> PAGE_SHIFT; \
|
|
|
|
const unsigned long oip = offset_in_page(member_offset); \
|
|
|
|
const int size = sizeof(u##bits); \
|
2020-04-30 22:57:55 +07:00
|
|
|
u8 lebytes[sizeof(u##bits)]; \
|
|
|
|
const int part = PAGE_SIZE - oip; \
|
2012-07-10 09:22:35 +07:00
|
|
|
\
|
2019-08-09 22:30:23 +07:00
|
|
|
ASSERT(token); \
|
2020-04-30 00:29:04 +07:00
|
|
|
ASSERT(token->kaddr); \
|
2020-05-01 04:38:11 +07:00
|
|
|
ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
|
2020-04-29 22:45:33 +07:00
|
|
|
if (token->offset <= member_offset && \
|
|
|
|
member_offset + size <= token->offset + PAGE_SIZE) { \
|
|
|
|
return get_unaligned_le##bits(token->kaddr + oip); \
|
2012-07-10 09:22:35 +07:00
|
|
|
} \
|
2020-04-30 22:57:55 +07:00
|
|
|
token->kaddr = page_address(token->eb->pages[idx]); \
|
|
|
|
token->offset = idx << PAGE_SHIFT; \
|
|
|
|
if (oip + size <= PAGE_SIZE) \
|
2020-04-29 22:45:33 +07:00
|
|
|
return get_unaligned_le##bits(token->kaddr + oip); \
|
2020-04-30 22:57:55 +07:00
|
|
|
\
|
|
|
|
memcpy(lebytes, token->kaddr + oip, part); \
|
2020-04-29 22:45:33 +07:00
|
|
|
token->kaddr = page_address(token->eb->pages[idx + 1]); \
|
|
|
|
token->offset = (idx + 1) << PAGE_SHIFT; \
|
2020-04-30 22:57:55 +07:00
|
|
|
memcpy(lebytes + part, token->kaddr, size - part); \
|
|
|
|
return get_unaligned_le##bits(lebytes); \
|
2007-10-16 03:18:56 +07:00
|
|
|
} \
|
2019-08-09 22:12:38 +07:00
|
|
|
u##bits btrfs_get_##bits(const struct extent_buffer *eb, \
|
|
|
|
const void *ptr, unsigned long off) \
|
|
|
|
{ \
|
2020-04-29 21:04:44 +07:00
|
|
|
const unsigned long member_offset = (unsigned long)ptr + off; \
|
|
|
|
const unsigned long oip = offset_in_page(member_offset); \
|
2020-04-30 22:57:55 +07:00
|
|
|
const unsigned long idx = member_offset >> PAGE_SHIFT; \
|
|
|
|
char *kaddr = page_address(eb->pages[idx]); \
|
2020-04-29 21:04:44 +07:00
|
|
|
const int size = sizeof(u##bits); \
|
2020-04-30 22:57:55 +07:00
|
|
|
const int part = PAGE_SIZE - oip; \
|
|
|
|
u8 lebytes[sizeof(u##bits)]; \
|
2019-08-09 22:12:38 +07:00
|
|
|
\
|
2020-05-01 04:38:11 +07:00
|
|
|
ASSERT(check_setget_bounds(eb, ptr, off, size)); \
|
2020-04-30 22:57:55 +07:00
|
|
|
if (oip + size <= PAGE_SIZE) \
|
2020-04-29 21:04:44 +07:00
|
|
|
return get_unaligned_le##bits(kaddr + oip); \
|
2020-04-30 22:57:55 +07:00
|
|
|
\
|
|
|
|
memcpy(lebytes, kaddr + oip, part); \
|
|
|
|
kaddr = page_address(eb->pages[idx + 1]); \
|
|
|
|
memcpy(lebytes + part, kaddr, size - part); \
|
|
|
|
return get_unaligned_le##bits(lebytes); \
|
2019-08-09 22:12:38 +07:00
|
|
|
} \
|
2020-04-29 07:15:56 +07:00
|
|
|
void btrfs_set_token_##bits(struct btrfs_map_token *token, \
|
2017-06-29 10:56:53 +07:00
|
|
|
const void *ptr, unsigned long off, \
|
2020-04-29 07:15:56 +07:00
|
|
|
u##bits val) \
|
2007-10-16 03:18:56 +07:00
|
|
|
{ \
|
2020-04-29 23:23:37 +07:00
|
|
|
const unsigned long member_offset = (unsigned long)ptr + off; \
|
|
|
|
const unsigned long idx = member_offset >> PAGE_SHIFT; \
|
|
|
|
const unsigned long oip = offset_in_page(member_offset); \
|
|
|
|
const int size = sizeof(u##bits); \
|
2020-04-30 22:57:55 +07:00
|
|
|
u8 lebytes[sizeof(u##bits)]; \
|
|
|
|
const int part = PAGE_SIZE - oip; \
|
2012-07-10 09:22:35 +07:00
|
|
|
\
|
2019-08-09 22:30:23 +07:00
|
|
|
ASSERT(token); \
|
2020-04-30 00:29:04 +07:00
|
|
|
ASSERT(token->kaddr); \
|
2020-05-01 04:38:11 +07:00
|
|
|
ASSERT(check_setget_bounds(token->eb, ptr, off, size)); \
|
2020-04-29 23:23:37 +07:00
|
|
|
if (token->offset <= member_offset && \
|
|
|
|
member_offset + size <= token->offset + PAGE_SIZE) { \
|
|
|
|
put_unaligned_le##bits(val, token->kaddr + oip); \
|
2012-07-10 09:22:35 +07:00
|
|
|
return; \
|
|
|
|
} \
|
2020-04-30 22:57:55 +07:00
|
|
|
token->kaddr = page_address(token->eb->pages[idx]); \
|
|
|
|
token->offset = idx << PAGE_SHIFT; \
|
2020-04-29 23:23:37 +07:00
|
|
|
if (oip + size <= PAGE_SIZE) { \
|
|
|
|
put_unaligned_le##bits(val, token->kaddr + oip); \
|
2012-07-10 09:22:35 +07:00
|
|
|
return; \
|
|
|
|
} \
|
2020-04-30 22:57:55 +07:00
|
|
|
put_unaligned_le##bits(val, lebytes); \
|
|
|
|
memcpy(token->kaddr + oip, lebytes, part); \
|
2020-04-29 23:23:37 +07:00
|
|
|
token->kaddr = page_address(token->eb->pages[idx + 1]); \
|
|
|
|
token->offset = (idx + 1) << PAGE_SHIFT; \
|
2020-04-30 22:57:55 +07:00
|
|
|
memcpy(token->kaddr, lebytes + part, size - part); \
|
2019-08-09 22:12:38 +07:00
|
|
|
} \
|
2020-04-29 08:04:10 +07:00
|
|
|
void btrfs_set_##bits(const struct extent_buffer *eb, void *ptr, \
|
2019-08-09 22:12:38 +07:00
|
|
|
unsigned long off, u##bits val) \
|
|
|
|
{ \
|
2020-04-29 23:07:04 +07:00
|
|
|
const unsigned long member_offset = (unsigned long)ptr + off; \
|
|
|
|
const unsigned long oip = offset_in_page(member_offset); \
|
2020-04-30 22:57:55 +07:00
|
|
|
const unsigned long idx = member_offset >> PAGE_SHIFT; \
|
|
|
|
char *kaddr = page_address(eb->pages[idx]); \
|
2020-04-29 23:07:04 +07:00
|
|
|
const int size = sizeof(u##bits); \
|
2020-04-30 22:57:55 +07:00
|
|
|
const int part = PAGE_SIZE - oip; \
|
|
|
|
u8 lebytes[sizeof(u##bits)]; \
|
2019-08-09 22:12:38 +07:00
|
|
|
\
|
2020-05-01 04:38:11 +07:00
|
|
|
ASSERT(check_setget_bounds(eb, ptr, off, size)); \
|
2020-04-29 23:07:04 +07:00
|
|
|
if (oip + size <= PAGE_SIZE) { \
|
|
|
|
put_unaligned_le##bits(val, kaddr + oip); \
|
2019-08-09 22:12:38 +07:00
|
|
|
return; \
|
|
|
|
} \
|
2020-04-30 22:57:55 +07:00
|
|
|
\
|
|
|
|
put_unaligned_le##bits(val, lebytes); \
|
|
|
|
memcpy(kaddr + oip, lebytes, part); \
|
|
|
|
kaddr = page_address(eb->pages[idx + 1]); \
|
|
|
|
memcpy(kaddr, lebytes + part, size - part); \
|
2012-07-10 09:22:35 +07:00
|
|
|
}
|
2007-10-16 03:18:56 +07:00
|
|
|
|
2012-07-10 09:22:35 +07:00
|
|
|
DEFINE_BTRFS_SETGET_BITS(8)
|
|
|
|
DEFINE_BTRFS_SETGET_BITS(16)
|
|
|
|
DEFINE_BTRFS_SETGET_BITS(32)
|
|
|
|
DEFINE_BTRFS_SETGET_BITS(64)
|
2007-10-16 03:18:56 +07:00
|
|
|
|
2017-06-29 10:56:53 +07:00
|
|
|
void btrfs_node_key(const struct extent_buffer *eb,
|
2007-11-07 03:09:29 +07:00
|
|
|
struct btrfs_disk_key *disk_key, int nr)
|
|
|
|
{
|
|
|
|
unsigned long ptr = btrfs_node_key_ptr_offset(nr);
|
|
|
|
read_eb_member(eb, (struct btrfs_key_ptr *)ptr,
|
|
|
|
struct btrfs_key_ptr, key, disk_key);
|
|
|
|
}
|