2008-06-12 03:50:36 +07:00
|
|
|
/*
|
|
|
|
* Copyright (C) 2007 Oracle. All rights reserved.
|
|
|
|
*
|
|
|
|
* This program is free software; you can redistribute it and/or
|
|
|
|
* modify it under the terms of the GNU General Public
|
|
|
|
* License v2 as published by the Free Software Foundation.
|
|
|
|
*
|
|
|
|
* This program is distributed in the hope that it will be useful,
|
|
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
|
* General Public License for more details.
|
|
|
|
*
|
|
|
|
* You should have received a copy of the GNU General Public
|
|
|
|
* License along with this program; if not, write to the
|
|
|
|
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
|
|
|
|
* Boston, MA 021110-1307, USA.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifndef __BTRFS_ASYNC_THREAD_
|
|
|
|
#define __BTRFS_ASYNC_THREAD_
|
|
|
|
|
|
|
|
struct btrfs_worker_thread;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* This is similar to a workqueue, but it is meant to spread the operations
|
|
|
|
* across all available cpus instead of just the CPU that was used to
|
|
|
|
* queue the work. There is also some batching introduced to try and
|
|
|
|
* cut down on context switches.
|
|
|
|
*
|
|
|
|
* By default threads are added on demand up to 2 * the number of cpus.
|
|
|
|
* Changing struct btrfs_workers->max_workers is one way to prevent
|
|
|
|
* demand creation of kthreads.
|
|
|
|
*
|
|
|
|
* the basic model of these worker threads is to embed a btrfs_work
|
|
|
|
* structure in your own data struct, and use container_of in a
|
|
|
|
* work function to get back to your data struct.
|
|
|
|
*/
|
|
|
|
struct btrfs_work {
|
|
|
|
/*
|
Btrfs: Add ordered async work queues
Btrfs uses kernel threads to create async work queues for cpu intensive
operations such as checksumming and decompression. These work well,
but they make it difficult to keep IO order intact.
A single writepages call from pdflush or fsync will turn into a number
of bios, and each bio is checksummed in parallel. Once the checksum is
computed, the bio is sent down to the disk, and since we don't control
the order in which the parallel operations happen, they might go down to
the disk in almost any order.
The code deals with this somewhat by having deep work queues for a single
kernel thread, making it very likely that a single thread will process all
the bios for a single inode.
This patch introduces an explicitly ordered work queue. As work structs
are placed into the queue they are put onto the tail of a list. They have
three callbacks:
->func (cpu intensive processing here)
->ordered_func (order sensitive processing here)
->ordered_free (free the work struct, all processing is done)
The work struct has three callbacks. The func callback does the cpu intensive
work, and when it completes the work struct is marked as done.
Every time a work struct completes, the list is checked to see if the head
is marked as done. If so the ordered_func callback is used to do the
order sensitive processing and the ordered_free callback is used to do
any cleanup. Then we loop back and check the head of the list again.
This patch also changes the checksumming code to use the ordered workqueues.
One a 4 drive array, it increases streaming writes from 280MB/s to 350MB/s.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-11-07 10:03:00 +07:00
|
|
|
* func should be set to the function you want called
|
2008-06-12 03:50:36 +07:00
|
|
|
* your work struct is passed as the only arg
|
Btrfs: Add ordered async work queues
Btrfs uses kernel threads to create async work queues for cpu intensive
operations such as checksumming and decompression. These work well,
but they make it difficult to keep IO order intact.
A single writepages call from pdflush or fsync will turn into a number
of bios, and each bio is checksummed in parallel. Once the checksum is
computed, the bio is sent down to the disk, and since we don't control
the order in which the parallel operations happen, they might go down to
the disk in almost any order.
The code deals with this somewhat by having deep work queues for a single
kernel thread, making it very likely that a single thread will process all
the bios for a single inode.
This patch introduces an explicitly ordered work queue. As work structs
are placed into the queue they are put onto the tail of a list. They have
three callbacks:
->func (cpu intensive processing here)
->ordered_func (order sensitive processing here)
->ordered_free (free the work struct, all processing is done)
The work struct has three callbacks. The func callback does the cpu intensive
work, and when it completes the work struct is marked as done.
Every time a work struct completes, the list is checked to see if the head
is marked as done. If so the ordered_func callback is used to do the
order sensitive processing and the ordered_free callback is used to do
any cleanup. Then we loop back and check the head of the list again.
This patch also changes the checksumming code to use the ordered workqueues.
One a 4 drive array, it increases streaming writes from 280MB/s to 350MB/s.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-11-07 10:03:00 +07:00
|
|
|
*
|
|
|
|
* ordered_func must be set for work sent to an ordered work queue,
|
|
|
|
* and it is called to complete a given work item in the same
|
|
|
|
* order they were sent to the queue.
|
2008-06-12 03:50:36 +07:00
|
|
|
*/
|
|
|
|
void (*func)(struct btrfs_work *work);
|
Btrfs: Add ordered async work queues
Btrfs uses kernel threads to create async work queues for cpu intensive
operations such as checksumming and decompression. These work well,
but they make it difficult to keep IO order intact.
A single writepages call from pdflush or fsync will turn into a number
of bios, and each bio is checksummed in parallel. Once the checksum is
computed, the bio is sent down to the disk, and since we don't control
the order in which the parallel operations happen, they might go down to
the disk in almost any order.
The code deals with this somewhat by having deep work queues for a single
kernel thread, making it very likely that a single thread will process all
the bios for a single inode.
This patch introduces an explicitly ordered work queue. As work structs
are placed into the queue they are put onto the tail of a list. They have
three callbacks:
->func (cpu intensive processing here)
->ordered_func (order sensitive processing here)
->ordered_free (free the work struct, all processing is done)
The work struct has three callbacks. The func callback does the cpu intensive
work, and when it completes the work struct is marked as done.
Every time a work struct completes, the list is checked to see if the head
is marked as done. If so the ordered_func callback is used to do the
order sensitive processing and the ordered_free callback is used to do
any cleanup. Then we loop back and check the head of the list again.
This patch also changes the checksumming code to use the ordered workqueues.
One a 4 drive array, it increases streaming writes from 280MB/s to 350MB/s.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-11-07 10:03:00 +07:00
|
|
|
void (*ordered_func)(struct btrfs_work *work);
|
|
|
|
void (*ordered_free)(struct btrfs_work *work);
|
2008-06-12 03:50:36 +07:00
|
|
|
|
|
|
|
/*
|
|
|
|
* flags should be set to zero. It is used to make sure the
|
|
|
|
* struct is only inserted once into the list.
|
|
|
|
*/
|
|
|
|
unsigned long flags;
|
|
|
|
|
|
|
|
/* don't touch these */
|
|
|
|
struct btrfs_worker_thread *worker;
|
|
|
|
struct list_head list;
|
Btrfs: Add ordered async work queues
Btrfs uses kernel threads to create async work queues for cpu intensive
operations such as checksumming and decompression. These work well,
but they make it difficult to keep IO order intact.
A single writepages call from pdflush or fsync will turn into a number
of bios, and each bio is checksummed in parallel. Once the checksum is
computed, the bio is sent down to the disk, and since we don't control
the order in which the parallel operations happen, they might go down to
the disk in almost any order.
The code deals with this somewhat by having deep work queues for a single
kernel thread, making it very likely that a single thread will process all
the bios for a single inode.
This patch introduces an explicitly ordered work queue. As work structs
are placed into the queue they are put onto the tail of a list. They have
three callbacks:
->func (cpu intensive processing here)
->ordered_func (order sensitive processing here)
->ordered_free (free the work struct, all processing is done)
The work struct has three callbacks. The func callback does the cpu intensive
work, and when it completes the work struct is marked as done.
Every time a work struct completes, the list is checked to see if the head
is marked as done. If so the ordered_func callback is used to do the
order sensitive processing and the ordered_free callback is used to do
any cleanup. Then we loop back and check the head of the list again.
This patch also changes the checksumming code to use the ordered workqueues.
One a 4 drive array, it increases streaming writes from 280MB/s to 350MB/s.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-11-07 10:03:00 +07:00
|
|
|
struct list_head order_list;
|
2008-06-12 03:50:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
struct btrfs_workers {
|
|
|
|
/* current number of running workers */
|
|
|
|
int num_workers;
|
|
|
|
|
2009-10-03 06:11:56 +07:00
|
|
|
int num_workers_starting;
|
|
|
|
|
2008-06-12 03:50:36 +07:00
|
|
|
/* max number of workers allowed. changed by btrfs_start_workers */
|
|
|
|
int max_workers;
|
|
|
|
|
2008-06-12 07:21:24 +07:00
|
|
|
/* once a worker has this many requests or fewer, it is idle */
|
|
|
|
int idle_thresh;
|
|
|
|
|
Btrfs: Add ordered async work queues
Btrfs uses kernel threads to create async work queues for cpu intensive
operations such as checksumming and decompression. These work well,
but they make it difficult to keep IO order intact.
A single writepages call from pdflush or fsync will turn into a number
of bios, and each bio is checksummed in parallel. Once the checksum is
computed, the bio is sent down to the disk, and since we don't control
the order in which the parallel operations happen, they might go down to
the disk in almost any order.
The code deals with this somewhat by having deep work queues for a single
kernel thread, making it very likely that a single thread will process all
the bios for a single inode.
This patch introduces an explicitly ordered work queue. As work structs
are placed into the queue they are put onto the tail of a list. They have
three callbacks:
->func (cpu intensive processing here)
->ordered_func (order sensitive processing here)
->ordered_free (free the work struct, all processing is done)
The work struct has three callbacks. The func callback does the cpu intensive
work, and when it completes the work struct is marked as done.
Every time a work struct completes, the list is checked to see if the head
is marked as done. If so the ordered_func callback is used to do the
order sensitive processing and the ordered_free callback is used to do
any cleanup. Then we loop back and check the head of the list again.
This patch also changes the checksumming code to use the ordered workqueues.
One a 4 drive array, it increases streaming writes from 280MB/s to 350MB/s.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-11-07 10:03:00 +07:00
|
|
|
/* force completions in the order they were queued */
|
|
|
|
int ordered;
|
|
|
|
|
2009-08-05 03:56:34 +07:00
|
|
|
/* more workers required, but in an interrupt handler */
|
|
|
|
int atomic_start_pending;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* are we allowed to sleep while starting workers or are we required
|
2009-10-03 06:11:56 +07:00
|
|
|
* to start them at a later time? If we can't sleep, this indicates
|
|
|
|
* which queue we need to use to schedule thread creation.
|
2009-08-05 03:56:34 +07:00
|
|
|
*/
|
2009-10-03 06:11:56 +07:00
|
|
|
struct btrfs_workers *atomic_worker_start;
|
2009-08-05 03:56:34 +07:00
|
|
|
|
2008-09-30 02:18:18 +07:00
|
|
|
/* list with all the work threads. The workers on the idle thread
|
|
|
|
* may be actively servicing jobs, but they haven't yet hit the
|
|
|
|
* idle thresh limit above.
|
|
|
|
*/
|
2008-06-12 03:50:36 +07:00
|
|
|
struct list_head worker_list;
|
2008-06-12 07:21:24 +07:00
|
|
|
struct list_head idle_list;
|
2008-06-12 03:50:36 +07:00
|
|
|
|
Btrfs: Add ordered async work queues
Btrfs uses kernel threads to create async work queues for cpu intensive
operations such as checksumming and decompression. These work well,
but they make it difficult to keep IO order intact.
A single writepages call from pdflush or fsync will turn into a number
of bios, and each bio is checksummed in parallel. Once the checksum is
computed, the bio is sent down to the disk, and since we don't control
the order in which the parallel operations happen, they might go down to
the disk in almost any order.
The code deals with this somewhat by having deep work queues for a single
kernel thread, making it very likely that a single thread will process all
the bios for a single inode.
This patch introduces an explicitly ordered work queue. As work structs
are placed into the queue they are put onto the tail of a list. They have
three callbacks:
->func (cpu intensive processing here)
->ordered_func (order sensitive processing here)
->ordered_free (free the work struct, all processing is done)
The work struct has three callbacks. The func callback does the cpu intensive
work, and when it completes the work struct is marked as done.
Every time a work struct completes, the list is checked to see if the head
is marked as done. If so the ordered_func callback is used to do the
order sensitive processing and the ordered_free callback is used to do
any cleanup. Then we loop back and check the head of the list again.
This patch also changes the checksumming code to use the ordered workqueues.
One a 4 drive array, it increases streaming writes from 280MB/s to 350MB/s.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-11-07 10:03:00 +07:00
|
|
|
/*
|
|
|
|
* when operating in ordered mode, this maintains the list
|
|
|
|
* of work items waiting for completion
|
|
|
|
*/
|
|
|
|
struct list_head order_list;
|
2009-04-21 02:50:09 +07:00
|
|
|
struct list_head prio_order_list;
|
Btrfs: Add ordered async work queues
Btrfs uses kernel threads to create async work queues for cpu intensive
operations such as checksumming and decompression. These work well,
but they make it difficult to keep IO order intact.
A single writepages call from pdflush or fsync will turn into a number
of bios, and each bio is checksummed in parallel. Once the checksum is
computed, the bio is sent down to the disk, and since we don't control
the order in which the parallel operations happen, they might go down to
the disk in almost any order.
The code deals with this somewhat by having deep work queues for a single
kernel thread, making it very likely that a single thread will process all
the bios for a single inode.
This patch introduces an explicitly ordered work queue. As work structs
are placed into the queue they are put onto the tail of a list. They have
three callbacks:
->func (cpu intensive processing here)
->ordered_func (order sensitive processing here)
->ordered_free (free the work struct, all processing is done)
The work struct has three callbacks. The func callback does the cpu intensive
work, and when it completes the work struct is marked as done.
Every time a work struct completes, the list is checked to see if the head
is marked as done. If so the ordered_func callback is used to do the
order sensitive processing and the ordered_free callback is used to do
any cleanup. Then we loop back and check the head of the list again.
This patch also changes the checksumming code to use the ordered workqueues.
One a 4 drive array, it increases streaming writes from 280MB/s to 350MB/s.
Signed-off-by: Chris Mason <chris.mason@oracle.com>
2008-11-07 10:03:00 +07:00
|
|
|
|
2008-06-12 03:50:36 +07:00
|
|
|
/* lock for finding the next worker thread to queue on */
|
|
|
|
spinlock_t lock;
|
2008-08-16 02:34:16 +07:00
|
|
|
|
2009-08-06 03:36:45 +07:00
|
|
|
/* lock for the ordered lists */
|
|
|
|
spinlock_t order_lock;
|
|
|
|
|
2008-09-30 02:18:18 +07:00
|
|
|
/* extra name for this worker, used for current->name */
|
2008-08-16 02:34:16 +07:00
|
|
|
char *name;
|
2008-06-12 03:50:36 +07:00
|
|
|
};
|
|
|
|
|
|
|
|
int btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work);
|
|
|
|
int btrfs_start_workers(struct btrfs_workers *workers, int num_workers);
|
|
|
|
int btrfs_stop_workers(struct btrfs_workers *workers);
|
2009-10-03 06:11:56 +07:00
|
|
|
void btrfs_init_workers(struct btrfs_workers *workers, char *name, int max,
|
|
|
|
struct btrfs_workers *async_starter);
|
2008-06-12 03:50:36 +07:00
|
|
|
int btrfs_requeue_work(struct btrfs_work *work);
|
2009-04-21 02:50:09 +07:00
|
|
|
void btrfs_set_work_high_prio(struct btrfs_work *work);
|
2008-06-12 03:50:36 +07:00
|
|
|
#endif
|