mirror of
https://github.com/AuxXxilium/eudev.git
synced 2024-11-23 23:10:57 +07:00
journal: add ability to list values a specified field can take in all entries of the journal
The new 'unique' API allows listing all unique field values that a field specified by a field name can take in all entries of the journal. This allows answering queries such as "What units logged to the journal?", "What hosts have logged into the journal?", "Which boot IDs have logged into the journal?". Ultimately this allows implementation of tools similar to lastlog based on journal data. Note that listing these field values will not work for journal files created with older journald, as the field values are not indexed in older files.
This commit is contained in:
parent
86b2e20a5e
commit
3c1668da62
11
Makefile.am
11
Makefile.am
@ -513,7 +513,8 @@ MANPAGES = \
|
||||
man/sd_journal_get_fd.3 \
|
||||
man/sd_journal_get_usage.3 \
|
||||
man/sd_journal_add_match.3 \
|
||||
man/sd_journal_seek_head.3
|
||||
man/sd_journal_seek_head.3 \
|
||||
man/sd_journal_query_unique.3
|
||||
|
||||
MANPAGES_ALIAS = \
|
||||
man/reboot.8 \
|
||||
@ -584,7 +585,10 @@ MANPAGES_ALIAS = \
|
||||
man/sd_journal_seek_monotonic_usec.3 \
|
||||
man/sd_journal_seek_realtime_usec.3 \
|
||||
man/sd_journal_seek_cursor.3 \
|
||||
man/sd_journal_test_cursor.3
|
||||
man/sd_journal_test_cursor.3 \
|
||||
man/sd_journal_enumerate_unique.3 \
|
||||
man/sd_journal_restart_unique.3 \
|
||||
man/SD_JOURNAL_FOREACH_UNIQUE.3
|
||||
|
||||
man/reboot.8: man/halt.8
|
||||
man/poweroff.8: man/halt.8
|
||||
@ -655,6 +659,9 @@ man/sd_journal_seek_monotonic_usec.3: man/sd_journal_seek_head.3
|
||||
man/sd_journal_seek_realtime_usec.3: man/sd_journal_seek_head.3
|
||||
man/sd_journal_seek_cursor.3: man/sd_journal_seek_head.3
|
||||
man/sd_journal_test_cursor.3: man/sd_journal_get_cursor.3
|
||||
man/sd_journal_enumerate_unique.3: man/sd_journal_query_unique.3
|
||||
man/sd_journal_restart_unique.3: man/sd_journal_query_unique.3
|
||||
man/SD_JOURNAL_FOREACH_UNIQUE.3: man/sd_journal_query_unique.3
|
||||
|
||||
XML_FILES = \
|
||||
${patsubst %.1,%.xml,${patsubst %.3,%.xml,${patsubst %.5,%.xml,${patsubst %.7,%.xml,${patsubst %.8,%.xml,$(MANPAGES)}}}}}
|
||||
|
@ -112,12 +112,15 @@
|
||||
<citerefentry><refentrytitle>sd_journal_stream_fd</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>sd_journal_open</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>sd_journal_next</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>sd_journal_get_data</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>sd_journal_get_realtime_usec</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>sd_journal_add_match</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>sd_journal_seek_head</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>sd_journal_get_cursor</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>sd_journal_cutoff_realtime_usec</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>sd_journal_get_usage</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>sd_journal_get_fd</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>sd_journal_query_unique</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>journalctl</refentrytitle><manvolnum>1</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>sd-id128</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>pkg-config</refentrytitle><manvolnum>1</manvolnum></citerefentry>
|
||||
|
@ -70,7 +70,7 @@
|
||||
</funcprototype>
|
||||
|
||||
<funcprototype>
|
||||
<funcdef>int <function>sd_journal_restart_data</function></funcdef>
|
||||
<funcdef>void <function>sd_journal_restart_data</function></funcdef>
|
||||
<paramdef>sd_journal* <parameter>j</parameter></paramdef>
|
||||
</funcprototype>
|
||||
|
||||
@ -120,7 +120,7 @@
|
||||
|
||||
<para>Note that the
|
||||
<function>SD_JOURNAL_FOREACH_DATA()</function> macro
|
||||
may be used as a wrapper around
|
||||
may be used as a handy wrapper around
|
||||
<function>sd_journal_restart_data()</function> and
|
||||
<function>sd_journal_enumerate_data()</function>.</para>
|
||||
|
||||
@ -193,7 +193,8 @@ int print_fields(sd_journal *j) {
|
||||
<citerefentry><refentrytitle>sd-journal</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>sd_journal_open</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>sd_journal_next</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>sd_journal_get_realtime_usec</refentrytitle><manvolnum>3</manvolnum></citerefentry>
|
||||
<citerefentry><refentrytitle>sd_journal_get_realtime_usec</refentrytitle><manvolnum>3</manvolnum></citerefentry>,
|
||||
<citerefentry><refentrytitle>sd_journal_query_unique</refentrytitle><manvolnum>3</manvolnum></citerefentry>
|
||||
</para>
|
||||
</refsect1>
|
||||
|
||||
|
@ -260,6 +260,12 @@ int journal_file_hmac_put_object(JournalFile *f, int type, Object *o, uint64_t p
|
||||
gcry_md_write(f->hmac, o->data.payload, le64toh(o->object.size) - offsetof(DataObject, payload));
|
||||
break;
|
||||
|
||||
case OBJECT_FIELD:
|
||||
/* Same here */
|
||||
gcry_md_write(f->hmac, &o->field.hash, sizeof(o->field.hash));
|
||||
gcry_md_write(f->hmac, o->field.payload, le64toh(o->object.size) - offsetof(FieldObject, payload));
|
||||
break;
|
||||
|
||||
case OBJECT_ENTRY:
|
||||
/* All */
|
||||
gcry_md_write(f->hmac, &o->entry.seqnum, le64toh(o->object.size) - offsetof(EntryObject, seqnum));
|
||||
@ -484,7 +490,6 @@ int journal_file_append_first_tag(JournalFile *f) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
int journal_file_parse_verification_key(JournalFile *f, const char *key) {
|
||||
uint8_t *seed;
|
||||
size_t seed_size, c;
|
||||
|
@ -399,7 +399,7 @@ int journal_file_move_to_object(JournalFile *f, int type, uint64_t offset, Objec
|
||||
if (s < minimum_header_size(o))
|
||||
return -EBADMSG;
|
||||
|
||||
if (type >= 0 && o->object.type != type)
|
||||
if (type > 0 && o->object.type != type)
|
||||
return -EBADMSG;
|
||||
|
||||
if (s > sizeof(ObjectHeader)) {
|
||||
@ -526,6 +526,9 @@ static int journal_file_setup_field_hash_table(JournalFile *f) {
|
||||
|
||||
assert(f);
|
||||
|
||||
/* We use a fixed size hash table for the fields as this
|
||||
* number should grow very slowly only */
|
||||
|
||||
s = DEFAULT_FIELD_HASH_TABLE_SIZE;
|
||||
r = journal_file_append_object(f,
|
||||
OBJECT_FIELD_HASH_TABLE,
|
||||
@ -586,7 +589,52 @@ static int journal_file_map_field_hash_table(JournalFile *f) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int journal_file_link_data(JournalFile *f, Object *o, uint64_t offset, uint64_t hash) {
|
||||
static int journal_file_link_field(
|
||||
JournalFile *f,
|
||||
Object *o,
|
||||
uint64_t offset,
|
||||
uint64_t hash) {
|
||||
|
||||
uint64_t p, h;
|
||||
int r;
|
||||
|
||||
assert(f);
|
||||
assert(o);
|
||||
assert(offset > 0);
|
||||
|
||||
if (o->object.type != OBJECT_FIELD)
|
||||
return -EINVAL;
|
||||
|
||||
/* This might alter the window we are looking at */
|
||||
|
||||
o->field.next_hash_offset = o->field.head_data_offset = 0;
|
||||
|
||||
h = hash % (le64toh(f->header->field_hash_table_size) / sizeof(HashItem));
|
||||
p = le64toh(f->field_hash_table[h].tail_hash_offset);
|
||||
if (p == 0)
|
||||
f->field_hash_table[h].head_hash_offset = htole64(offset);
|
||||
else {
|
||||
r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
o->field.next_hash_offset = htole64(offset);
|
||||
}
|
||||
|
||||
f->field_hash_table[h].tail_hash_offset = htole64(offset);
|
||||
|
||||
if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
|
||||
f->header->n_fields = htole64(le64toh(f->header->n_fields) + 1);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int journal_file_link_data(
|
||||
JournalFile *f,
|
||||
Object *o,
|
||||
uint64_t offset,
|
||||
uint64_t hash) {
|
||||
|
||||
uint64_t p, h;
|
||||
int r;
|
||||
|
||||
@ -605,10 +653,10 @@ static int journal_file_link_data(JournalFile *f, Object *o, uint64_t offset, ui
|
||||
|
||||
h = hash % (le64toh(f->header->data_hash_table_size) / sizeof(HashItem));
|
||||
p = le64toh(f->data_hash_table[h].tail_hash_offset);
|
||||
if (p == 0) {
|
||||
if (p == 0)
|
||||
/* Only entry in the hash table is easy */
|
||||
f->data_hash_table[h].head_hash_offset = htole64(offset);
|
||||
} else {
|
||||
else {
|
||||
/* Move back to the previous data object, to patch in
|
||||
* pointer */
|
||||
|
||||
@ -627,6 +675,67 @@ static int journal_file_link_data(JournalFile *f, Object *o, uint64_t offset, ui
|
||||
return 0;
|
||||
}
|
||||
|
||||
int journal_file_find_field_object_with_hash(
|
||||
JournalFile *f,
|
||||
const void *field, uint64_t size, uint64_t hash,
|
||||
Object **ret, uint64_t *offset) {
|
||||
|
||||
uint64_t p, osize, h;
|
||||
int r;
|
||||
|
||||
assert(f);
|
||||
assert(field && size > 0);
|
||||
|
||||
osize = offsetof(Object, field.payload) + size;
|
||||
|
||||
if (f->header->field_hash_table_size == 0)
|
||||
return -EBADMSG;
|
||||
|
||||
h = hash % (le64toh(f->header->field_hash_table_size) / sizeof(HashItem));
|
||||
p = le64toh(f->field_hash_table[h].head_hash_offset);
|
||||
|
||||
while (p > 0) {
|
||||
Object *o;
|
||||
|
||||
r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
if (le64toh(o->field.hash) == hash &&
|
||||
le64toh(o->object.size) == osize &&
|
||||
memcmp(o->field.payload, field, size) == 0) {
|
||||
|
||||
if (ret)
|
||||
*ret = o;
|
||||
if (offset)
|
||||
*offset = p;
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
p = le64toh(o->field.next_hash_offset);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int journal_file_find_field_object(
|
||||
JournalFile *f,
|
||||
const void *field, uint64_t size,
|
||||
Object **ret, uint64_t *offset) {
|
||||
|
||||
uint64_t hash;
|
||||
|
||||
assert(f);
|
||||
assert(field && size > 0);
|
||||
|
||||
hash = hash64(field, size);
|
||||
|
||||
return journal_file_find_field_object_with_hash(f,
|
||||
field, size, hash,
|
||||
ret, offset);
|
||||
}
|
||||
|
||||
int journal_file_find_data_object_with_hash(
|
||||
JournalFile *f,
|
||||
const void *data, uint64_t size, uint64_t hash,
|
||||
@ -720,6 +829,66 @@ int journal_file_find_data_object(
|
||||
ret, offset);
|
||||
}
|
||||
|
||||
static int journal_file_append_field(
|
||||
JournalFile *f,
|
||||
const void *field, uint64_t size,
|
||||
Object **ret, uint64_t *offset) {
|
||||
|
||||
uint64_t hash, p;
|
||||
uint64_t osize;
|
||||
Object *o;
|
||||
int r;
|
||||
|
||||
assert(f);
|
||||
assert(field && size > 0);
|
||||
|
||||
hash = hash64(field, size);
|
||||
|
||||
r = journal_file_find_field_object_with_hash(f, field, size, hash, &o, &p);
|
||||
if (r < 0)
|
||||
return r;
|
||||
else if (r > 0) {
|
||||
|
||||
if (ret)
|
||||
*ret = o;
|
||||
|
||||
if (offset)
|
||||
*offset = p;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
osize = offsetof(Object, field.payload) + size;
|
||||
r = journal_file_append_object(f, OBJECT_FIELD, osize, &o, &p);
|
||||
|
||||
o->field.hash = htole64(hash);
|
||||
memcpy(o->field.payload, field, size);
|
||||
|
||||
r = journal_file_link_field(f, o, p, hash);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
/* The linking might have altered the window, so let's
|
||||
* refresh our pointer */
|
||||
r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
#ifdef HAVE_GCRYPT
|
||||
r = journal_file_hmac_put_object(f, OBJECT_FIELD, o, p);
|
||||
if (r < 0)
|
||||
return r;
|
||||
#endif
|
||||
|
||||
if (ret)
|
||||
*ret = o;
|
||||
|
||||
if (offset)
|
||||
*offset = p;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int journal_file_append_data(
|
||||
JournalFile *f,
|
||||
const void *data, uint64_t size,
|
||||
@ -730,6 +899,7 @@ static int journal_file_append_data(
|
||||
Object *o;
|
||||
int r;
|
||||
bool compressed = false;
|
||||
const void *eq;
|
||||
|
||||
assert(f);
|
||||
assert(data || size == 0);
|
||||
@ -786,6 +956,21 @@ static int journal_file_append_data(
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
eq = memchr(data, '=', size);
|
||||
if (eq && eq > data) {
|
||||
uint64_t fp;
|
||||
Object *fo;
|
||||
|
||||
/* Create field object ... */
|
||||
r = journal_file_append_field(f, data, (uint8_t*) eq - (uint8_t*) data, &fo, &fp);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
/* ... and link it in. */
|
||||
o->data.next_field_offset = fo->field.head_data_offset;
|
||||
fo->field.head_data_offset = le64toh(p);
|
||||
}
|
||||
|
||||
#ifdef HAVE_GCRYPT
|
||||
r = journal_file_hmac_put_object(f, OBJECT_DATA, o, p);
|
||||
if (r < 0)
|
||||
@ -1899,6 +2084,10 @@ void journal_file_dump(JournalFile *f) {
|
||||
printf("Type: OBJECT_DATA\n");
|
||||
break;
|
||||
|
||||
case OBJECT_FIELD:
|
||||
printf("Type: OBJECT_FIELD\n");
|
||||
break;
|
||||
|
||||
case OBJECT_ENTRY:
|
||||
printf("Type: OBJECT_ENTRY seqnum=%llu monotonic=%llu realtime=%llu\n",
|
||||
(unsigned long long) le64toh(o->entry.seqnum),
|
||||
@ -1923,6 +2112,10 @@ void journal_file_dump(JournalFile *f) {
|
||||
(unsigned long long) le64toh(o->tag.seqnum),
|
||||
(unsigned long long) le64toh(o->tag.epoch));
|
||||
break;
|
||||
|
||||
default:
|
||||
printf("Type: unknown (%u)\n", o->object.type);
|
||||
break;
|
||||
}
|
||||
|
||||
if (o->object.flags & OBJECT_COMPRESSED)
|
||||
|
@ -155,6 +155,9 @@ int journal_file_append_entry(JournalFile *f, const dual_timestamp *ts, const st
|
||||
int journal_file_find_data_object(JournalFile *f, const void *data, uint64_t size, Object **ret, uint64_t *offset);
|
||||
int journal_file_find_data_object_with_hash(JournalFile *f, const void *data, uint64_t size, uint64_t hash, Object **ret, uint64_t *offset);
|
||||
|
||||
int journal_file_find_field_object(JournalFile *f, const void *field, uint64_t size, Object **ret, uint64_t *offset);
|
||||
int journal_file_find_field_object_with_hash(JournalFile *f, const void *field, uint64_t size, uint64_t hash, Object **ret, uint64_t *offset);
|
||||
|
||||
int journal_file_next_entry(JournalFile *f, Object *o, uint64_t p, direction_t direction, Object **ret, uint64_t *offset);
|
||||
int journal_file_skip_entry(JournalFile *f, Object *o, uint64_t p, int64_t skip, Object **ret, uint64_t *offset);
|
||||
|
||||
|
@ -115,6 +115,10 @@ struct sd_journal {
|
||||
Match *level0, *level1;
|
||||
|
||||
unsigned current_invalidate_counter, last_invalidate_counter;
|
||||
|
||||
char *unique_field;
|
||||
JournalFile *unique_file;
|
||||
uint64_t unique_offset;
|
||||
};
|
||||
|
||||
char *journal_make_match_string(sd_journal *j);
|
||||
|
@ -74,6 +74,7 @@ static usec_t arg_interval = DEFAULT_FSS_INTERVAL_USEC;
|
||||
static usec_t arg_since, arg_until;
|
||||
static bool arg_since_set = false, arg_until_set = false;
|
||||
static const char *arg_unit = NULL;
|
||||
static const char *arg_field = NULL;
|
||||
|
||||
static enum {
|
||||
ACTION_SHOW,
|
||||
|
@ -79,4 +79,7 @@ global:
|
||||
LIBSYSTEMD_JOURNAL_195 {
|
||||
global:
|
||||
sd_journal_test_cursor;
|
||||
sd_journal_query_unique;
|
||||
sd_journal_enumerate_unique;
|
||||
sd_journal_restart_unique;
|
||||
} LIBSYSTEMD_JOURNAL_190;
|
||||
|
@ -1261,6 +1261,16 @@ static int remove_file(sd_journal *j, const char *prefix, const char *filename)
|
||||
|
||||
log_debug("File %s got removed.", f->path);
|
||||
|
||||
if (j->current_file == f) {
|
||||
j->current_file = NULL;
|
||||
j->current_field = 0;
|
||||
}
|
||||
|
||||
if (j->unique_file == f) {
|
||||
j->unique_file = NULL;
|
||||
j->unique_offset = 0;
|
||||
}
|
||||
|
||||
journal_file_close(f);
|
||||
|
||||
j->current_invalidate_counter ++;
|
||||
@ -1641,6 +1651,7 @@ _public_ void sd_journal_close(sd_journal *j) {
|
||||
mmap_cache_unref(j->mmap);
|
||||
|
||||
free(j->path);
|
||||
free(j->unique_field);
|
||||
free(j);
|
||||
}
|
||||
|
||||
@ -1828,13 +1839,43 @@ _public_ int sd_journal_get_data(sd_journal *j, const char *field, const void **
|
||||
return -ENOENT;
|
||||
}
|
||||
|
||||
static int return_data(JournalFile *f, Object *o, const void **data, size_t *size) {
|
||||
size_t t;
|
||||
uint64_t l;
|
||||
|
||||
l = le64toh(o->object.size) - offsetof(Object, data.payload);
|
||||
t = (size_t) l;
|
||||
|
||||
/* We can't read objects larger than 4G on a 32bit machine */
|
||||
if ((uint64_t) t != l)
|
||||
return -E2BIG;
|
||||
|
||||
if (o->object.flags & OBJECT_COMPRESSED) {
|
||||
#ifdef HAVE_XZ
|
||||
uint64_t rsize;
|
||||
|
||||
if (!uncompress_blob(o->data.payload, l, &f->compress_buffer, &f->compress_buffer_size, &rsize))
|
||||
return -EBADMSG;
|
||||
|
||||
*data = f->compress_buffer;
|
||||
*size = (size_t) rsize;
|
||||
#else
|
||||
return -EPROTONOSUPPORT;
|
||||
#endif
|
||||
} else {
|
||||
*data = o->data.payload;
|
||||
*size = t;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
_public_ int sd_journal_enumerate_data(sd_journal *j, const void **data, size_t *size) {
|
||||
JournalFile *f;
|
||||
uint64_t p, l, n;
|
||||
uint64_t p, n;
|
||||
le64_t le_hash;
|
||||
int r;
|
||||
Object *o;
|
||||
size_t t;
|
||||
|
||||
if (!j)
|
||||
return -EINVAL;
|
||||
@ -1867,29 +1908,9 @@ _public_ int sd_journal_enumerate_data(sd_journal *j, const void **data, size_t
|
||||
if (le_hash != o->data.hash)
|
||||
return -EBADMSG;
|
||||
|
||||
l = le64toh(o->object.size) - offsetof(Object, data.payload);
|
||||
t = (size_t) l;
|
||||
|
||||
/* We can't read objects larger than 4G on a 32bit machine */
|
||||
if ((uint64_t) t != l)
|
||||
return -E2BIG;
|
||||
|
||||
if (o->object.flags & OBJECT_COMPRESSED) {
|
||||
#ifdef HAVE_XZ
|
||||
uint64_t rsize;
|
||||
|
||||
if (!uncompress_blob(o->data.payload, l, &f->compress_buffer, &f->compress_buffer_size, &rsize))
|
||||
return -EBADMSG;
|
||||
|
||||
*data = f->compress_buffer;
|
||||
*size = (size_t) rsize;
|
||||
#else
|
||||
return -EPROTONOSUPPORT;
|
||||
#endif
|
||||
} else {
|
||||
*data = o->data.payload;
|
||||
*size = t;
|
||||
}
|
||||
r = return_data(f, o, data, size);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
j->current_field ++;
|
||||
|
||||
@ -2186,27 +2207,138 @@ _public_ int sd_journal_get_usage(sd_journal *j, uint64_t *bytes) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* _public_ int sd_journal_query_unique(sd_journal *j, const char *field) { */
|
||||
/* if (!j) */
|
||||
/* return -EINVAL; */
|
||||
/* if (!field) */
|
||||
/* return -EINVAL; */
|
||||
_public_ int sd_journal_query_unique(sd_journal *j, const char *field) {
|
||||
char *f;
|
||||
|
||||
/* return -ENOTSUP; */
|
||||
/* } */
|
||||
if (!j)
|
||||
return -EINVAL;
|
||||
if (isempty(field))
|
||||
return -EINVAL;
|
||||
|
||||
/* _public_ int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_t *l) { */
|
||||
/* if (!j) */
|
||||
/* return -EINVAL; */
|
||||
/* if (!data) */
|
||||
/* return -EINVAL; */
|
||||
/* if (!l) */
|
||||
/* return -EINVAL; */
|
||||
f = strdup(field);
|
||||
if (!f)
|
||||
return -ENOMEM;
|
||||
|
||||
/* return -ENOTSUP; */
|
||||
/* } */
|
||||
free(j->unique_field);
|
||||
j->unique_field = f;
|
||||
j->unique_file = NULL;
|
||||
j->unique_offset = 0;
|
||||
|
||||
/* _public_ void sd_journal_restart_unique(sd_journal *j) { */
|
||||
/* if (!j) */
|
||||
/* return; */
|
||||
/* } */
|
||||
return 0;
|
||||
}
|
||||
|
||||
_public_ int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_t *l) {
|
||||
Object *o;
|
||||
size_t k;
|
||||
int r;
|
||||
|
||||
if (!j)
|
||||
return -EINVAL;
|
||||
if (!data)
|
||||
return -EINVAL;
|
||||
if (!l)
|
||||
return -EINVAL;
|
||||
if (!j->unique_field)
|
||||
return -EINVAL;
|
||||
|
||||
k = strlen(j->unique_field);
|
||||
|
||||
if (!j->unique_file) {
|
||||
j->unique_file = hashmap_first(j->files);
|
||||
if (!j->unique_file)
|
||||
return 0;
|
||||
j->unique_offset = 0;
|
||||
}
|
||||
|
||||
for (;;) {
|
||||
JournalFile *of;
|
||||
Iterator i;
|
||||
const void *odata;
|
||||
size_t ol;
|
||||
bool found;
|
||||
|
||||
/* Proceed to next data object in list the field's linked list */
|
||||
if (j->unique_offset == 0) {
|
||||
r = journal_file_find_field_object(j->unique_file, j->unique_field, k, &o, NULL);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
j->unique_offset = r > 0 ? le64toh(o->field.head_data_offset) : 0;
|
||||
} else {
|
||||
r = journal_file_move_to_object(j->unique_file, OBJECT_DATA, j->unique_offset, &o);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
j->unique_offset = le64toh(o->data.next_field_offset);
|
||||
}
|
||||
|
||||
/* We reached the end of the list? Then start again, with the next file */
|
||||
if (j->unique_offset == 0) {
|
||||
JournalFile *n;
|
||||
|
||||
n = hashmap_next(j->files, j->unique_file->path);
|
||||
if (!n)
|
||||
return 0;
|
||||
|
||||
j->unique_file = n;
|
||||
continue;
|
||||
}
|
||||
|
||||
/* We do not use the type context here, but 0 instead,
|
||||
* so that we can look at this data object at the same
|
||||
* time as one on another file */
|
||||
r = journal_file_move_to_object(j->unique_file, 0, j->unique_offset, &o);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
/* Let's do the type check by hand, since we used 0 context above. */
|
||||
if (o->object.type != OBJECT_DATA)
|
||||
return -EBADMSG;
|
||||
|
||||
r = return_data(j->unique_file, o, &odata, &ol);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
/* OK, now let's see if we already returned this data
|
||||
* object by checking if it exists in the earlier
|
||||
* traversed files. */
|
||||
found = false;
|
||||
HASHMAP_FOREACH(of, j->files, i) {
|
||||
Object *oo;
|
||||
uint64_t op;
|
||||
|
||||
if (of == j->unique_file)
|
||||
break;
|
||||
|
||||
/* Skip this file it didn't have any fields
|
||||
* indexed */
|
||||
if (JOURNAL_HEADER_CONTAINS(of->header, n_fields) &&
|
||||
le64toh(of->header->n_fields) <= 0)
|
||||
continue;
|
||||
|
||||
r = journal_file_find_data_object_with_hash(of, odata, ol, le64toh(o->data.hash), &oo, &op);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
if (r > 0)
|
||||
found = true;
|
||||
}
|
||||
|
||||
if (found)
|
||||
continue;
|
||||
|
||||
r = return_data(j->unique_file, o, data, l);
|
||||
if (r < 0)
|
||||
return r;
|
||||
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
void sd_journal_restart_unique(sd_journal *j) {
|
||||
if (!j)
|
||||
return;
|
||||
|
||||
j->unique_file = NULL;
|
||||
j->unique_offset = 0;
|
||||
}
|
||||
|
@ -77,6 +77,8 @@ int main(int argc, char *argv[]) {
|
||||
unsigned i;
|
||||
sd_journal *j;
|
||||
char *z;
|
||||
const void *data;
|
||||
size_t l;
|
||||
|
||||
log_set_max_level(LOG_DEBUG);
|
||||
|
||||
@ -124,12 +126,10 @@ int main(int argc, char *argv[]) {
|
||||
|
||||
assert_se(sd_journal_add_match(j, "MAGIC=quux", 0) >= 0);
|
||||
SD_JOURNAL_FOREACH_BACKWARDS(j) {
|
||||
const void *d;
|
||||
size_t l;
|
||||
char *c;
|
||||
|
||||
assert_se(sd_journal_get_data(j, "NUMBER", &d, &l) >= 0);
|
||||
printf("\t%.*s\n", (int) l, (const char*) d);
|
||||
assert_se(sd_journal_get_data(j, "NUMBER", &data, &l) >= 0);
|
||||
printf("\t%.*s\n", (int) l, (const char*) data);
|
||||
|
||||
assert_se(sd_journal_get_cursor(j, &c) >= 0);
|
||||
assert_se(sd_journal_test_cursor(j, c) > 0);
|
||||
@ -137,12 +137,10 @@ int main(int argc, char *argv[]) {
|
||||
}
|
||||
|
||||
SD_JOURNAL_FOREACH(j) {
|
||||
const void *d;
|
||||
size_t l;
|
||||
char *c;
|
||||
|
||||
assert_se(sd_journal_get_data(j, "NUMBER", &d, &l) >= 0);
|
||||
printf("\t%.*s\n", (int) l, (const char*) d);
|
||||
assert_se(sd_journal_get_data(j, "NUMBER", &data, &l) >= 0);
|
||||
printf("\t%.*s\n", (int) l, (const char*) data);
|
||||
|
||||
assert_se(sd_journal_get_cursor(j, &c) >= 0);
|
||||
assert_se(sd_journal_test_cursor(j, c) > 0);
|
||||
@ -175,6 +173,10 @@ int main(int argc, char *argv[]) {
|
||||
|
||||
verify_contents(j, 0);
|
||||
|
||||
assert_se(sd_journal_query_unique(j, "NUMBER") >= 0);
|
||||
SD_JOURNAL_FOREACH_UNIQUE(j, data, l)
|
||||
printf("%.*s\n", (int) l, (const char*) data);
|
||||
|
||||
sd_journal_close(j);
|
||||
|
||||
assert_se(rm_rf_dangerous(t, false, true, false) >= 0);
|
||||
|
@ -33,7 +33,7 @@ int main(int argc, char *argv[]) {
|
||||
dual_timestamp ts;
|
||||
JournalFile *f;
|
||||
struct iovec iovec;
|
||||
static const char test[] = "test", test2[] = "test2";
|
||||
static const char test[] = "TEST1=1", test2[] = "TEST2=2";
|
||||
Object *o;
|
||||
uint64_t p;
|
||||
char t[] = "/tmp/journal-XXXXXX";
|
||||
|
@ -758,3 +758,25 @@ char **hashmap_get_strv(Hashmap *h) {
|
||||
|
||||
return sv;
|
||||
}
|
||||
|
||||
void *hashmap_next(Hashmap *h, const void *key) {
|
||||
unsigned hash;
|
||||
struct hashmap_entry *e;
|
||||
|
||||
assert(h);
|
||||
assert(key);
|
||||
|
||||
if (!h)
|
||||
return NULL;
|
||||
|
||||
hash = h->hash_func(key) % NBUCKETS;
|
||||
e = hash_scan(h, hash, key);
|
||||
if (!e)
|
||||
return NULL;
|
||||
|
||||
e = e->iterate_next;
|
||||
if (!e)
|
||||
return NULL;
|
||||
|
||||
return e->value;
|
||||
}
|
||||
|
@ -79,6 +79,8 @@ void* hashmap_first(Hashmap *h);
|
||||
void* hashmap_first_key(Hashmap *h);
|
||||
void* hashmap_last(Hashmap *h);
|
||||
|
||||
void *hashmap_next(Hashmap *h, const void *key);
|
||||
|
||||
char **hashmap_get_strv(Hashmap *h);
|
||||
|
||||
#define HASHMAP_FOREACH(e, h, i) \
|
||||
|
@ -70,12 +70,20 @@ int sd_journal_stream_fd(const char *identifier, int priority, int level_prefix)
|
||||
|
||||
typedef struct sd_journal sd_journal;
|
||||
|
||||
/* Open flags */
|
||||
enum {
|
||||
SD_JOURNAL_LOCAL_ONLY = 1,
|
||||
SD_JOURNAL_RUNTIME_ONLY = 2,
|
||||
SD_JOURNAL_SYSTEM_ONLY = 4
|
||||
};
|
||||
|
||||
/* Wakeup event types */
|
||||
enum {
|
||||
SD_JOURNAL_NOP,
|
||||
SD_JOURNAL_APPEND,
|
||||
SD_JOURNAL_INVALIDATE
|
||||
};
|
||||
|
||||
int sd_journal_open(sd_journal **ret, int flags);
|
||||
int sd_journal_open_directory(sd_journal **ret, const char *path, int flags);
|
||||
void sd_journal_close(sd_journal *j);
|
||||
@ -111,15 +119,9 @@ int sd_journal_get_cutoff_monotonic_usec(sd_journal *j, const sd_id128_t boot_id
|
||||
|
||||
int sd_journal_get_usage(sd_journal *j, uint64_t *bytes);
|
||||
|
||||
/* int sd_journal_query_unique(sd_journal *j, const char *field); /\* missing *\/ */
|
||||
/* int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_t *l); /\* missing *\/ */
|
||||
/* void sd_journal_restart_unique(sd_journal *j); /\* missing *\/ */
|
||||
|
||||
enum {
|
||||
SD_JOURNAL_NOP,
|
||||
SD_JOURNAL_APPEND,
|
||||
SD_JOURNAL_INVALIDATE
|
||||
};
|
||||
int sd_journal_query_unique(sd_journal *j, const char *field);
|
||||
int sd_journal_enumerate_unique(sd_journal *j, const void **data, size_t *l);
|
||||
void sd_journal_restart_unique(sd_journal *j);
|
||||
|
||||
int sd_journal_get_fd(sd_journal *j);
|
||||
int sd_journal_process(sd_journal *j);
|
||||
@ -136,8 +138,8 @@ int sd_journal_wait(sd_journal *j, uint64_t timeout_usec);
|
||||
#define SD_JOURNAL_FOREACH_DATA(j, data, l) \
|
||||
for (sd_journal_restart_data(j); sd_journal_enumerate_data((j), &(data), &(l)) > 0; )
|
||||
|
||||
/* #define SD_JOURNAL_FOREACH_UNIQUE(j, data, l) \ */
|
||||
/* for (sd_journal_restart_unique(j); sd_journal_enumerate_data((j), &(data), &(l)) > 0; ) */
|
||||
#define SD_JOURNAL_FOREACH_UNIQUE(j, data, l) \
|
||||
for (sd_journal_restart_unique(j); sd_journal_enumerate_unique((j), &(data), &(l)) > 0; )
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user