mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 11:26:40 +07:00
a0e881b7c1
Pull second vfs pile from Al Viro: "The stuff in there: fsfreeze deadlock fixes by Jan (essentially, the deadlock reproduced by xfstests 068), symlink and hardlink restriction patches, plus assorted cleanups and fixes. Note that another fsfreeze deadlock (emergency thaw one) is *not* dealt with - the series by Fernando conflicts a lot with Jan's, breaks userland ABI (FIFREEZE semantics gets changed) and trades the deadlock for massive vfsmount leak; this is going to be handled next cycle. There probably will be another pull request, but that stuff won't be in it." Fix up trivial conflicts due to unrelated changes next to each other in drivers/{staging/gdm72xx/usb_boot.c, usb/gadget/storage_common.c} * 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs: (54 commits) delousing target_core_file a bit Documentation: Correct s_umount state for freeze_fs/unfreeze_fs fs: Remove old freezing mechanism ext2: Implement freezing btrfs: Convert to new freezing mechanism nilfs2: Convert to new freezing mechanism ntfs: Convert to new freezing mechanism fuse: Convert to new freezing mechanism gfs2: Convert to new freezing mechanism ocfs2: Convert to new freezing mechanism xfs: Convert to new freezing code ext4: Convert to new freezing mechanism fs: Protect write paths by sb_start_write - sb_end_write fs: Skip atime update on frozen filesystem fs: Add freezing handling to mnt_want_write() / mnt_drop_write() fs: Improve filesystem freezing handling switch the protection of percpu_counter list to spinlock nfsd: Push mnt_want_write() outside of i_mutex btrfs: Push mnt_want_write() outside of i_mutex fat: Push mnt_want_write() outside of i_mutex ...
166 lines
5.7 KiB
C
166 lines
5.7 KiB
C
#ifndef _LINUX_PIPE_FS_I_H
|
|
#define _LINUX_PIPE_FS_I_H
|
|
|
|
#define PIPE_DEF_BUFFERS 16
|
|
|
|
#define PIPE_BUF_FLAG_LRU 0x01 /* page is on the LRU */
|
|
#define PIPE_BUF_FLAG_ATOMIC 0x02 /* was atomically mapped */
|
|
#define PIPE_BUF_FLAG_GIFT 0x04 /* page is a gift */
|
|
#define PIPE_BUF_FLAG_PACKET 0x08 /* read() as a packet */
|
|
|
|
/**
|
|
* struct pipe_buffer - a linux kernel pipe buffer
|
|
* @page: the page containing the data for the pipe buffer
|
|
* @offset: offset of data inside the @page
|
|
* @len: length of data inside the @page
|
|
* @ops: operations associated with this buffer. See @pipe_buf_operations.
|
|
* @flags: pipe buffer flags. See above.
|
|
* @private: private data owned by the ops.
|
|
**/
|
|
struct pipe_buffer {
|
|
struct page *page;
|
|
unsigned int offset, len;
|
|
const struct pipe_buf_operations *ops;
|
|
unsigned int flags;
|
|
unsigned long private;
|
|
};
|
|
|
|
/**
|
|
* struct pipe_inode_info - a linux kernel pipe
|
|
* @wait: reader/writer wait point in case of empty/full pipe
|
|
* @nrbufs: the number of non-empty pipe buffers in this pipe
|
|
* @buffers: total number of buffers (should be a power of 2)
|
|
* @curbuf: the current pipe buffer entry
|
|
* @tmp_page: cached released page
|
|
* @readers: number of current readers of this pipe
|
|
* @writers: number of current writers of this pipe
|
|
* @waiting_writers: number of writers blocked waiting for room
|
|
* @r_counter: reader counter
|
|
* @w_counter: writer counter
|
|
* @fasync_readers: reader side fasync
|
|
* @fasync_writers: writer side fasync
|
|
* @inode: inode this pipe is attached to
|
|
* @bufs: the circular array of pipe buffers
|
|
**/
|
|
struct pipe_inode_info {
|
|
wait_queue_head_t wait;
|
|
unsigned int nrbufs, curbuf, buffers;
|
|
unsigned int readers;
|
|
unsigned int writers;
|
|
unsigned int waiting_writers;
|
|
unsigned int r_counter;
|
|
unsigned int w_counter;
|
|
struct page *tmp_page;
|
|
struct fasync_struct *fasync_readers;
|
|
struct fasync_struct *fasync_writers;
|
|
struct inode *inode;
|
|
struct pipe_buffer *bufs;
|
|
};
|
|
|
|
/*
|
|
* Note on the nesting of these functions:
|
|
*
|
|
* ->confirm()
|
|
* ->steal()
|
|
* ...
|
|
* ->map()
|
|
* ...
|
|
* ->unmap()
|
|
*
|
|
* That is, ->map() must be called on a confirmed buffer,
|
|
* same goes for ->steal(). See below for the meaning of each
|
|
* operation. Also see kerneldoc in fs/pipe.c for the pipe
|
|
* and generic variants of these hooks.
|
|
*/
|
|
struct pipe_buf_operations {
|
|
/*
|
|
* This is set to 1, if the generic pipe read/write may coalesce
|
|
* data into an existing buffer. If this is set to 0, a new pipe
|
|
* page segment is always used for new data.
|
|
*/
|
|
int can_merge;
|
|
|
|
/*
|
|
* ->map() returns a virtual address mapping of the pipe buffer.
|
|
* The last integer flag reflects whether this should be an atomic
|
|
* mapping or not. The atomic map is faster, however you can't take
|
|
* page faults before calling ->unmap() again. So if you need to eg
|
|
* access user data through copy_to/from_user(), then you must get
|
|
* a non-atomic map. ->map() uses the kmap_atomic slot for
|
|
* atomic maps, you have to be careful if mapping another page as
|
|
* source or destination for a copy.
|
|
*/
|
|
void * (*map)(struct pipe_inode_info *, struct pipe_buffer *, int);
|
|
|
|
/*
|
|
* Undoes ->map(), finishes the virtual mapping of the pipe buffer.
|
|
*/
|
|
void (*unmap)(struct pipe_inode_info *, struct pipe_buffer *, void *);
|
|
|
|
/*
|
|
* ->confirm() verifies that the data in the pipe buffer is there
|
|
* and that the contents are good. If the pages in the pipe belong
|
|
* to a file system, we may need to wait for IO completion in this
|
|
* hook. Returns 0 for good, or a negative error value in case of
|
|
* error.
|
|
*/
|
|
int (*confirm)(struct pipe_inode_info *, struct pipe_buffer *);
|
|
|
|
/*
|
|
* When the contents of this pipe buffer has been completely
|
|
* consumed by a reader, ->release() is called.
|
|
*/
|
|
void (*release)(struct pipe_inode_info *, struct pipe_buffer *);
|
|
|
|
/*
|
|
* Attempt to take ownership of the pipe buffer and its contents.
|
|
* ->steal() returns 0 for success, in which case the contents
|
|
* of the pipe (the buf->page) is locked and now completely owned
|
|
* by the caller. The page may then be transferred to a different
|
|
* mapping, the most often used case is insertion into different
|
|
* file address space cache.
|
|
*/
|
|
int (*steal)(struct pipe_inode_info *, struct pipe_buffer *);
|
|
|
|
/*
|
|
* Get a reference to the pipe buffer.
|
|
*/
|
|
void (*get)(struct pipe_inode_info *, struct pipe_buffer *);
|
|
};
|
|
|
|
/* Differs from PIPE_BUF in that PIPE_SIZE is the length of the actual
|
|
memory allocation, whereas PIPE_BUF makes atomicity guarantees. */
|
|
#define PIPE_SIZE PAGE_SIZE
|
|
|
|
/* Pipe lock and unlock operations */
|
|
void pipe_lock(struct pipe_inode_info *);
|
|
void pipe_unlock(struct pipe_inode_info *);
|
|
void pipe_double_lock(struct pipe_inode_info *, struct pipe_inode_info *);
|
|
|
|
extern unsigned int pipe_max_size, pipe_min_size;
|
|
int pipe_proc_fn(struct ctl_table *, int, void __user *, size_t *, loff_t *);
|
|
|
|
|
|
/* Drop the inode semaphore and wait for a pipe event, atomically */
|
|
void pipe_wait(struct pipe_inode_info *pipe);
|
|
|
|
struct pipe_inode_info * alloc_pipe_info(struct inode * inode);
|
|
void free_pipe_info(struct inode * inode);
|
|
void __free_pipe_info(struct pipe_inode_info *);
|
|
|
|
/* Generic pipe buffer ops functions */
|
|
void *generic_pipe_buf_map(struct pipe_inode_info *, struct pipe_buffer *, int);
|
|
void generic_pipe_buf_unmap(struct pipe_inode_info *, struct pipe_buffer *, void *);
|
|
void generic_pipe_buf_get(struct pipe_inode_info *, struct pipe_buffer *);
|
|
int generic_pipe_buf_confirm(struct pipe_inode_info *, struct pipe_buffer *);
|
|
int generic_pipe_buf_steal(struct pipe_inode_info *, struct pipe_buffer *);
|
|
void generic_pipe_buf_release(struct pipe_inode_info *, struct pipe_buffer *);
|
|
|
|
/* for F_SETPIPE_SZ and F_GETPIPE_SZ */
|
|
long pipe_fcntl(struct file *, unsigned int, unsigned long arg);
|
|
struct pipe_inode_info *get_pipe_info(struct file *file);
|
|
|
|
int create_pipe_files(struct file **, int);
|
|
|
|
#endif
|