staging/lustre/fld: Adjust comments to better conform to coding style

This patch fixes "Block comments use a trailing */ on a separate line"
warnings from checkpatch.

Signed-off-by: Oleg Drokin <green@linuxhacker.ru>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
Oleg Drokin 2016-02-24 22:00:26 -05:00 committed by Greg Kroah-Hartman
parent c56e256d50
commit 52581b893f
3 changed files with 20 additions and 22 deletions

View File

@ -183,7 +183,8 @@ static void fld_fix_new_list(struct fld_cache *cache)
}
/* we could have overlap over next
* range too. better restart. */
* range too. better restart.
*/
goto restart_fixup;
}
@ -302,7 +303,8 @@ static void fld_cache_overlap_handle(struct fld_cache *cache,
const u32 mdt = range->lsr_index;
/* this is overlap case, these case are checking overlapping with
* prev range only. fixup will handle overlapping with next range. */
* prev range only. fixup will handle overlapping with next range.
*/
if (f_curr->fce_range.lsr_index == mdt) {
f_curr->fce_range.lsr_start = min(f_curr->fce_range.lsr_start,
@ -317,7 +319,8 @@ static void fld_cache_overlap_handle(struct fld_cache *cache,
} else if (new_start <= f_curr->fce_range.lsr_start &&
f_curr->fce_range.lsr_end <= new_end) {
/* case 1: new range completely overshadowed existing range.
* e.g. whole range migrated. update fld cache entry */
* e.g. whole range migrated. update fld cache entry
*/
f_curr->fce_range = *range;
kfree(f_new);

View File

@ -67,8 +67,7 @@ struct lu_fld_hash {
struct fld_cache_entry {
struct list_head fce_lru;
struct list_head fce_list;
/**
* fld cache entries are sorted on range->lsr_start field. */
/** fld cache entries are sorted on range->lsr_start field. */
struct lu_seq_range fce_range;
};
@ -79,32 +78,25 @@ struct fld_cache {
*/
rwlock_t fci_lock;
/**
* Cache shrink threshold */
/** Cache shrink threshold */
int fci_threshold;
/**
* Preferred number of cached entries */
/** Preferred number of cached entries */
int fci_cache_size;
/**
* Current number of cached entries. Protected by \a fci_lock */
/** Current number of cached entries. Protected by \a fci_lock */
int fci_cache_count;
/**
* LRU list fld entries. */
/** LRU list fld entries. */
struct list_head fci_lru;
/**
* sorted fld entries. */
/** sorted fld entries. */
struct list_head fci_entries_head;
/**
* Cache statistics. */
/** Cache statistics. */
struct fld_stats fci_stat;
/**
* Cache name used for debug and messages. */
/** Cache name used for debug and messages. */
char fci_name[LUSTRE_MDT_MAXNAMELEN];
unsigned int fci_no_shrink:1;
};

View File

@ -58,7 +58,8 @@
#include "fld_internal.h"
/* TODO: these 3 functions are copies of flow-control code from mdc_lib.c
* It should be common thing. The same about mdc RPC lock */
* It should be common thing. The same about mdc RPC lock
*/
static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
{
int rc;
@ -124,7 +125,8 @@ fld_rrb_scan(struct lu_client_fld *fld, u64 seq)
* it should go to index 0 directly, instead of calculating
* hash again, and also if other MDTs is not being connected,
* the fld lookup requests(for seq on MDT0) should not be
* blocked because of other MDTs */
* blocked because of other MDTs
*/
if (fid_seq_is_norm(seq))
hash = fld_rrb_hash(fld, seq);
else
@ -139,7 +141,8 @@ fld_rrb_scan(struct lu_client_fld *fld, u64 seq)
if (hash != 0) {
/* It is possible the remote target(MDT) are not connected to
* with client yet, so we will refer this to MDT0, which should
* be connected during mount */
* be connected during mount
*/
hash = 0;
goto again;
}