s390/checksum: coding style changes

Add some coding style changes which hopefully make the code
look a bit less odd.

Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
Heiko Carstens 2020-08-11 16:41:27 +02:00 committed by Vasily Gorbik
parent 612ad0785d
commit 98ad45fb58

View File

@ -16,19 +16,18 @@
#include <linux/in6.h>
/*
* computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit)
* Computes the checksum of a memory block at buff, length len,
* and adds in "sum" (32-bit).
*
* returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic
* Returns a 32-bit number suitable for feeding into itself
* or csum_tcpudp_magic.
*
* this function must be called with even lengths, except
* for the last fragment, which may be odd
* This function must be called with even lengths, except
* for the last fragment, which may be odd.
*
* it's best to have buff aligned on a 32-bit boundary
* It's best to have buff aligned on a 32-bit boundary.
*/
static inline __wsum
csum_partial(const void *buff, int len, __wsum sum)
static inline __wsum csum_partial(const void *buff, int len, __wsum sum)
{
register unsigned long reg2 asm("2") = (unsigned long) buff;
register unsigned long reg3 asm("3") = (unsigned long) len;
@ -40,15 +39,15 @@ csum_partial(const void *buff, int len, __wsum sum)
return sum;
}
static inline __wsum
csum_partial_copy_nocheck (const void *src, void *dst, int len, __wsum sum)
static inline __wsum csum_partial_copy_nocheck(const void *src, void *dst,
int len, __wsum sum)
{
memcpy(dst,src,len);
memcpy(dst, src, len);
return csum_partial(dst, len, sum);
}
/*
* Fold a partial checksum without adding pseudo headers
* Fold a partial checksum without adding pseudo headers.
*/
static inline __sum16 csum_fold(__wsum sum)
{
@ -60,9 +59,8 @@ static inline __sum16 csum_fold(__wsum sum)
}
/*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksum on 4 octet boundaries.
*
* This is a version of ip_compute_csum() optimized for IP headers,
* which always checksums on 4 octet boundaries.
*/
static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
{
@ -81,8 +79,8 @@ static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 32-bit checksum
* Computes the checksum of the TCP/UDP pseudo-header.
* Returns a 32-bit checksum.
*/
static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
__u8 proto, __wsum sum)
@ -98,22 +96,18 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, __u32 len,
}
/*
* computes the checksum of the TCP/UDP pseudo-header
* returns a 16-bit checksum, already complemented
* Computes the checksum of the TCP/UDP pseudo-header.
* Returns a 16-bit checksum, already complemented.
*/
static inline __sum16
csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len, __u8 proto,
__wsum sum)
static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
__u8 proto, __wsum sum)
{
return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
}
/*
* this routine is used for miscellaneous IP-like checksums, mainly
* in icmp.c
* Used for miscellaneous IP-like checksums, mainly icmp.
*/
static inline __sum16 ip_compute_csum(const void *buff, int len)
{
return csum_fold(csum_partial(buff, len, 0));