mirror of
https://github.com/AuxXxilium/linux_dsm_epyc7002.git
synced 2024-11-30 05:26:46 +07:00
033d9959ed
Pull workqueue changes from Tejun Heo: "This is workqueue updates for v3.7-rc1. A lot of activities this round including considerable API and behavior cleanups. * delayed_work combines a timer and a work item. The handling of the timer part has always been a bit clunky leading to confusing cancelation API with weird corner-case behaviors. delayed_work is updated to use new IRQ safe timer and cancelation now works as expected. * Another deficiency of delayed_work was lack of the counterpart of mod_timer() which led to cancel+queue combinations or open-coded timer+work usages. mod_delayed_work[_on]() are added. These two delayed_work changes make delayed_work provide interface and behave like timer which is executed with process context. * A work item could be executed concurrently on multiple CPUs, which is rather unintuitive and made flush_work() behavior confusing and half-broken under certain circumstances. This problem doesn't exist for non-reentrant workqueues. While non-reentrancy check isn't free, the overhead is incurred only when a work item bounces across different CPUs and even in simulated pathological scenario the overhead isn't too high. All workqueues are made non-reentrant. This removes the distinction between flush_[delayed_]work() and flush_[delayed_]_work_sync(). The former is now as strong as the latter and the specified work item is guaranteed to have finished execution of any previous queueing on return. * In addition to the various bug fixes, Lai redid and simplified CPU hotplug handling significantly. * Joonsoo introduced system_highpri_wq and used it during CPU hotplug. There are two merge commits - one to pull in IRQ safe timer from tip/timers/core and the other to pull in CPU hotplug fixes from wq/for-3.6-fixes as Lai's hotplug restructuring depended on them." Fixed a number of trivial conflicts, but the more interesting conflicts were silent ones where the deprecated interfaces had been used by new code in the merge window, and thus didn't cause any real data conflicts. Tejun pointed out a few of them, I fixed a couple more. * 'for-3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq: (46 commits) workqueue: remove spurious WARN_ON_ONCE(in_irq()) from try_to_grab_pending() workqueue: use cwq_set_max_active() helper for workqueue_set_max_active() workqueue: introduce cwq_set_max_active() helper for thaw_workqueues() workqueue: remove @delayed from cwq_dec_nr_in_flight() workqueue: fix possible stall on try_to_grab_pending() of a delayed work item workqueue: use hotcpu_notifier() for workqueue_cpu_down_callback() workqueue: use __cpuinit instead of __devinit for cpu callbacks workqueue: rename manager_mutex to assoc_mutex workqueue: WORKER_REBIND is no longer necessary for idle rebinding workqueue: WORKER_REBIND is no longer necessary for busy rebinding workqueue: reimplement idle worker rebinding workqueue: deprecate __cancel_delayed_work() workqueue: reimplement cancel_delayed_work() using try_to_grab_pending() workqueue: use mod_delayed_work() instead of __cancel + queue workqueue: use irqsafe timer for delayed_work workqueue: clean up delayed_work initializers and add missing one workqueue: make deferrable delayed_work initializer names consistent workqueue: cosmetic whitespace updates for macro definitions workqueue: deprecate system_nrt[_freezable]_wq workqueue: deprecate flush[_delayed]_work_sync() ...
527 lines
12 KiB
C
527 lines
12 KiB
C
/*
|
|
*
|
|
* Author Karsten Keil <kkeil@novell.com>
|
|
*
|
|
* Copyright 2008 by Karsten Keil <kkeil@novell.com>
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 as
|
|
* published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
*/
|
|
|
|
#include <linux/gfp.h>
|
|
#include <linux/module.h>
|
|
#include <linux/mISDNhw.h>
|
|
|
|
static void
|
|
dchannel_bh(struct work_struct *ws)
|
|
{
|
|
struct dchannel *dch = container_of(ws, struct dchannel, workq);
|
|
struct sk_buff *skb;
|
|
int err;
|
|
|
|
if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
|
|
while ((skb = skb_dequeue(&dch->rqueue))) {
|
|
if (likely(dch->dev.D.peer)) {
|
|
err = dch->dev.D.recv(dch->dev.D.peer, skb);
|
|
if (err)
|
|
dev_kfree_skb(skb);
|
|
} else
|
|
dev_kfree_skb(skb);
|
|
}
|
|
}
|
|
if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
|
|
if (dch->phfunc)
|
|
dch->phfunc(dch);
|
|
}
|
|
}
|
|
|
|
static void
|
|
bchannel_bh(struct work_struct *ws)
|
|
{
|
|
struct bchannel *bch = container_of(ws, struct bchannel, workq);
|
|
struct sk_buff *skb;
|
|
int err;
|
|
|
|
if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
|
|
while ((skb = skb_dequeue(&bch->rqueue))) {
|
|
bch->rcount--;
|
|
if (likely(bch->ch.peer)) {
|
|
err = bch->ch.recv(bch->ch.peer, skb);
|
|
if (err)
|
|
dev_kfree_skb(skb);
|
|
} else
|
|
dev_kfree_skb(skb);
|
|
}
|
|
}
|
|
}
|
|
|
|
int
|
|
mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
|
|
{
|
|
test_and_set_bit(FLG_HDLC, &ch->Flags);
|
|
ch->maxlen = maxlen;
|
|
ch->hw = NULL;
|
|
ch->rx_skb = NULL;
|
|
ch->tx_skb = NULL;
|
|
ch->tx_idx = 0;
|
|
ch->phfunc = phf;
|
|
skb_queue_head_init(&ch->squeue);
|
|
skb_queue_head_init(&ch->rqueue);
|
|
INIT_LIST_HEAD(&ch->dev.bchannels);
|
|
INIT_WORK(&ch->workq, dchannel_bh);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(mISDN_initdchannel);
|
|
|
|
int
|
|
mISDN_initbchannel(struct bchannel *ch, unsigned short maxlen,
|
|
unsigned short minlen)
|
|
{
|
|
ch->Flags = 0;
|
|
ch->minlen = minlen;
|
|
ch->next_minlen = minlen;
|
|
ch->init_minlen = minlen;
|
|
ch->maxlen = maxlen;
|
|
ch->next_maxlen = maxlen;
|
|
ch->init_maxlen = maxlen;
|
|
ch->hw = NULL;
|
|
ch->rx_skb = NULL;
|
|
ch->tx_skb = NULL;
|
|
ch->tx_idx = 0;
|
|
skb_queue_head_init(&ch->rqueue);
|
|
ch->rcount = 0;
|
|
ch->next_skb = NULL;
|
|
INIT_WORK(&ch->workq, bchannel_bh);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(mISDN_initbchannel);
|
|
|
|
int
|
|
mISDN_freedchannel(struct dchannel *ch)
|
|
{
|
|
if (ch->tx_skb) {
|
|
dev_kfree_skb(ch->tx_skb);
|
|
ch->tx_skb = NULL;
|
|
}
|
|
if (ch->rx_skb) {
|
|
dev_kfree_skb(ch->rx_skb);
|
|
ch->rx_skb = NULL;
|
|
}
|
|
skb_queue_purge(&ch->squeue);
|
|
skb_queue_purge(&ch->rqueue);
|
|
flush_work(&ch->workq);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(mISDN_freedchannel);
|
|
|
|
void
|
|
mISDN_clear_bchannel(struct bchannel *ch)
|
|
{
|
|
if (ch->tx_skb) {
|
|
dev_kfree_skb(ch->tx_skb);
|
|
ch->tx_skb = NULL;
|
|
}
|
|
ch->tx_idx = 0;
|
|
if (ch->rx_skb) {
|
|
dev_kfree_skb(ch->rx_skb);
|
|
ch->rx_skb = NULL;
|
|
}
|
|
if (ch->next_skb) {
|
|
dev_kfree_skb(ch->next_skb);
|
|
ch->next_skb = NULL;
|
|
}
|
|
test_and_clear_bit(FLG_TX_BUSY, &ch->Flags);
|
|
test_and_clear_bit(FLG_TX_NEXT, &ch->Flags);
|
|
test_and_clear_bit(FLG_ACTIVE, &ch->Flags);
|
|
test_and_clear_bit(FLG_FILLEMPTY, &ch->Flags);
|
|
test_and_clear_bit(FLG_TX_EMPTY, &ch->Flags);
|
|
test_and_clear_bit(FLG_RX_OFF, &ch->Flags);
|
|
ch->dropcnt = 0;
|
|
ch->minlen = ch->init_minlen;
|
|
ch->next_minlen = ch->init_minlen;
|
|
ch->maxlen = ch->init_maxlen;
|
|
ch->next_maxlen = ch->init_maxlen;
|
|
skb_queue_purge(&ch->rqueue);
|
|
ch->rcount = 0;
|
|
}
|
|
EXPORT_SYMBOL(mISDN_clear_bchannel);
|
|
|
|
void
|
|
mISDN_freebchannel(struct bchannel *ch)
|
|
{
|
|
cancel_work_sync(&ch->workq);
|
|
mISDN_clear_bchannel(ch);
|
|
}
|
|
EXPORT_SYMBOL(mISDN_freebchannel);
|
|
|
|
int
|
|
mISDN_ctrl_bchannel(struct bchannel *bch, struct mISDN_ctrl_req *cq)
|
|
{
|
|
int ret = 0;
|
|
|
|
switch (cq->op) {
|
|
case MISDN_CTRL_GETOP:
|
|
cq->op = MISDN_CTRL_RX_BUFFER | MISDN_CTRL_FILL_EMPTY |
|
|
MISDN_CTRL_RX_OFF;
|
|
break;
|
|
case MISDN_CTRL_FILL_EMPTY:
|
|
if (cq->p1) {
|
|
memset(bch->fill, cq->p2 & 0xff, MISDN_BCH_FILL_SIZE);
|
|
test_and_set_bit(FLG_FILLEMPTY, &bch->Flags);
|
|
} else {
|
|
test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
|
|
}
|
|
break;
|
|
case MISDN_CTRL_RX_OFF:
|
|
/* read back dropped byte count */
|
|
cq->p2 = bch->dropcnt;
|
|
if (cq->p1)
|
|
test_and_set_bit(FLG_RX_OFF, &bch->Flags);
|
|
else
|
|
test_and_clear_bit(FLG_RX_OFF, &bch->Flags);
|
|
bch->dropcnt = 0;
|
|
break;
|
|
case MISDN_CTRL_RX_BUFFER:
|
|
if (cq->p2 > MISDN_CTRL_RX_SIZE_IGNORE)
|
|
bch->next_maxlen = cq->p2;
|
|
if (cq->p1 > MISDN_CTRL_RX_SIZE_IGNORE)
|
|
bch->next_minlen = cq->p1;
|
|
/* we return the old values */
|
|
cq->p1 = bch->minlen;
|
|
cq->p2 = bch->maxlen;
|
|
break;
|
|
default:
|
|
pr_info("mISDN unhandled control %x operation\n", cq->op);
|
|
ret = -EINVAL;
|
|
break;
|
|
}
|
|
return ret;
|
|
}
|
|
EXPORT_SYMBOL(mISDN_ctrl_bchannel);
|
|
|
|
static inline u_int
|
|
get_sapi_tei(u_char *p)
|
|
{
|
|
u_int sapi, tei;
|
|
|
|
sapi = *p >> 2;
|
|
tei = p[1] >> 1;
|
|
return sapi | (tei << 8);
|
|
}
|
|
|
|
void
|
|
recv_Dchannel(struct dchannel *dch)
|
|
{
|
|
struct mISDNhead *hh;
|
|
|
|
if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
|
|
dev_kfree_skb(dch->rx_skb);
|
|
dch->rx_skb = NULL;
|
|
return;
|
|
}
|
|
hh = mISDN_HEAD_P(dch->rx_skb);
|
|
hh->prim = PH_DATA_IND;
|
|
hh->id = get_sapi_tei(dch->rx_skb->data);
|
|
skb_queue_tail(&dch->rqueue, dch->rx_skb);
|
|
dch->rx_skb = NULL;
|
|
schedule_event(dch, FLG_RECVQUEUE);
|
|
}
|
|
EXPORT_SYMBOL(recv_Dchannel);
|
|
|
|
void
|
|
recv_Echannel(struct dchannel *ech, struct dchannel *dch)
|
|
{
|
|
struct mISDNhead *hh;
|
|
|
|
if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
|
|
dev_kfree_skb(ech->rx_skb);
|
|
ech->rx_skb = NULL;
|
|
return;
|
|
}
|
|
hh = mISDN_HEAD_P(ech->rx_skb);
|
|
hh->prim = PH_DATA_E_IND;
|
|
hh->id = get_sapi_tei(ech->rx_skb->data);
|
|
skb_queue_tail(&dch->rqueue, ech->rx_skb);
|
|
ech->rx_skb = NULL;
|
|
schedule_event(dch, FLG_RECVQUEUE);
|
|
}
|
|
EXPORT_SYMBOL(recv_Echannel);
|
|
|
|
void
|
|
recv_Bchannel(struct bchannel *bch, unsigned int id, bool force)
|
|
{
|
|
struct mISDNhead *hh;
|
|
|
|
/* if allocation did fail upper functions still may call us */
|
|
if (unlikely(!bch->rx_skb))
|
|
return;
|
|
if (unlikely(!bch->rx_skb->len)) {
|
|
/* we have no data to send - this may happen after recovery
|
|
* from overflow or too small allocation.
|
|
* We need to free the buffer here */
|
|
dev_kfree_skb(bch->rx_skb);
|
|
bch->rx_skb = NULL;
|
|
} else {
|
|
if (test_bit(FLG_TRANSPARENT, &bch->Flags) &&
|
|
(bch->rx_skb->len < bch->minlen) && !force)
|
|
return;
|
|
hh = mISDN_HEAD_P(bch->rx_skb);
|
|
hh->prim = PH_DATA_IND;
|
|
hh->id = id;
|
|
if (bch->rcount >= 64) {
|
|
printk(KERN_WARNING
|
|
"B%d receive queue overflow - flushing!\n",
|
|
bch->nr);
|
|
skb_queue_purge(&bch->rqueue);
|
|
}
|
|
bch->rcount++;
|
|
skb_queue_tail(&bch->rqueue, bch->rx_skb);
|
|
bch->rx_skb = NULL;
|
|
schedule_event(bch, FLG_RECVQUEUE);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(recv_Bchannel);
|
|
|
|
void
|
|
recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
|
|
{
|
|
skb_queue_tail(&dch->rqueue, skb);
|
|
schedule_event(dch, FLG_RECVQUEUE);
|
|
}
|
|
EXPORT_SYMBOL(recv_Dchannel_skb);
|
|
|
|
void
|
|
recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
|
|
{
|
|
if (bch->rcount >= 64) {
|
|
printk(KERN_WARNING "B-channel %p receive queue overflow, "
|
|
"flushing!\n", bch);
|
|
skb_queue_purge(&bch->rqueue);
|
|
bch->rcount = 0;
|
|
}
|
|
bch->rcount++;
|
|
skb_queue_tail(&bch->rqueue, skb);
|
|
schedule_event(bch, FLG_RECVQUEUE);
|
|
}
|
|
EXPORT_SYMBOL(recv_Bchannel_skb);
|
|
|
|
static void
|
|
confirm_Dsend(struct dchannel *dch)
|
|
{
|
|
struct sk_buff *skb;
|
|
|
|
skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
|
|
0, NULL, GFP_ATOMIC);
|
|
if (!skb) {
|
|
printk(KERN_ERR "%s: no skb id %x\n", __func__,
|
|
mISDN_HEAD_ID(dch->tx_skb));
|
|
return;
|
|
}
|
|
skb_queue_tail(&dch->rqueue, skb);
|
|
schedule_event(dch, FLG_RECVQUEUE);
|
|
}
|
|
|
|
int
|
|
get_next_dframe(struct dchannel *dch)
|
|
{
|
|
dch->tx_idx = 0;
|
|
dch->tx_skb = skb_dequeue(&dch->squeue);
|
|
if (dch->tx_skb) {
|
|
confirm_Dsend(dch);
|
|
return 1;
|
|
}
|
|
dch->tx_skb = NULL;
|
|
test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(get_next_dframe);
|
|
|
|
static void
|
|
confirm_Bsend(struct bchannel *bch)
|
|
{
|
|
struct sk_buff *skb;
|
|
|
|
if (bch->rcount >= 64) {
|
|
printk(KERN_WARNING "B-channel %p receive queue overflow, "
|
|
"flushing!\n", bch);
|
|
skb_queue_purge(&bch->rqueue);
|
|
bch->rcount = 0;
|
|
}
|
|
skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
|
|
0, NULL, GFP_ATOMIC);
|
|
if (!skb) {
|
|
printk(KERN_ERR "%s: no skb id %x\n", __func__,
|
|
mISDN_HEAD_ID(bch->tx_skb));
|
|
return;
|
|
}
|
|
bch->rcount++;
|
|
skb_queue_tail(&bch->rqueue, skb);
|
|
schedule_event(bch, FLG_RECVQUEUE);
|
|
}
|
|
|
|
int
|
|
get_next_bframe(struct bchannel *bch)
|
|
{
|
|
bch->tx_idx = 0;
|
|
if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
|
|
bch->tx_skb = bch->next_skb;
|
|
if (bch->tx_skb) {
|
|
bch->next_skb = NULL;
|
|
test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
|
|
/* confirm imediately to allow next data */
|
|
confirm_Bsend(bch);
|
|
return 1;
|
|
} else {
|
|
test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
|
|
printk(KERN_WARNING "B TX_NEXT without skb\n");
|
|
}
|
|
}
|
|
bch->tx_skb = NULL;
|
|
test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
|
|
return 0;
|
|
}
|
|
EXPORT_SYMBOL(get_next_bframe);
|
|
|
|
void
|
|
queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
|
|
{
|
|
struct mISDNhead *hh;
|
|
|
|
if (!skb) {
|
|
_queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
|
|
} else {
|
|
if (ch->peer) {
|
|
hh = mISDN_HEAD_P(skb);
|
|
hh->prim = pr;
|
|
hh->id = id;
|
|
if (!ch->recv(ch->peer, skb))
|
|
return;
|
|
}
|
|
dev_kfree_skb(skb);
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(queue_ch_frame);
|
|
|
|
int
|
|
dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
|
|
{
|
|
/* check oversize */
|
|
if (skb->len <= 0) {
|
|
printk(KERN_WARNING "%s: skb too small\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
if (skb->len > ch->maxlen) {
|
|
printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
|
|
__func__, skb->len, ch->maxlen);
|
|
return -EINVAL;
|
|
}
|
|
/* HW lock must be obtained */
|
|
if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
|
|
skb_queue_tail(&ch->squeue, skb);
|
|
return 0;
|
|
} else {
|
|
/* write to fifo */
|
|
ch->tx_skb = skb;
|
|
ch->tx_idx = 0;
|
|
return 1;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(dchannel_senddata);
|
|
|
|
int
|
|
bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
|
|
{
|
|
|
|
/* check oversize */
|
|
if (skb->len <= 0) {
|
|
printk(KERN_WARNING "%s: skb too small\n", __func__);
|
|
return -EINVAL;
|
|
}
|
|
if (skb->len > ch->maxlen) {
|
|
printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
|
|
__func__, skb->len, ch->maxlen);
|
|
return -EINVAL;
|
|
}
|
|
/* HW lock must be obtained */
|
|
/* check for pending next_skb */
|
|
if (ch->next_skb) {
|
|
printk(KERN_WARNING
|
|
"%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
|
|
__func__, skb->len, ch->next_skb->len);
|
|
return -EBUSY;
|
|
}
|
|
if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
|
|
test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
|
|
ch->next_skb = skb;
|
|
return 0;
|
|
} else {
|
|
/* write to fifo */
|
|
ch->tx_skb = skb;
|
|
ch->tx_idx = 0;
|
|
confirm_Bsend(ch);
|
|
return 1;
|
|
}
|
|
}
|
|
EXPORT_SYMBOL(bchannel_senddata);
|
|
|
|
/* The function allocates a new receive skb on demand with a size for the
|
|
* requirements of the current protocol. It returns the tailroom of the
|
|
* receive skb or an error.
|
|
*/
|
|
int
|
|
bchannel_get_rxbuf(struct bchannel *bch, int reqlen)
|
|
{
|
|
int len;
|
|
|
|
if (bch->rx_skb) {
|
|
len = skb_tailroom(bch->rx_skb);
|
|
if (len < reqlen) {
|
|
pr_warning("B%d no space for %d (only %d) bytes\n",
|
|
bch->nr, reqlen, len);
|
|
if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
|
|
/* send what we have now and try a new buffer */
|
|
recv_Bchannel(bch, 0, true);
|
|
} else {
|
|
/* on HDLC we have to drop too big frames */
|
|
return -EMSGSIZE;
|
|
}
|
|
} else {
|
|
return len;
|
|
}
|
|
}
|
|
/* update current min/max length first */
|
|
if (unlikely(bch->maxlen != bch->next_maxlen))
|
|
bch->maxlen = bch->next_maxlen;
|
|
if (unlikely(bch->minlen != bch->next_minlen))
|
|
bch->minlen = bch->next_minlen;
|
|
if (unlikely(reqlen > bch->maxlen))
|
|
return -EMSGSIZE;
|
|
if (test_bit(FLG_TRANSPARENT, &bch->Flags)) {
|
|
if (reqlen >= bch->minlen) {
|
|
len = reqlen;
|
|
} else {
|
|
len = 2 * bch->minlen;
|
|
if (len > bch->maxlen)
|
|
len = bch->maxlen;
|
|
}
|
|
} else {
|
|
/* with HDLC we do not know the length yet */
|
|
len = bch->maxlen;
|
|
}
|
|
bch->rx_skb = mI_alloc_skb(len, GFP_ATOMIC);
|
|
if (!bch->rx_skb) {
|
|
pr_warning("B%d receive no memory for %d bytes\n",
|
|
bch->nr, len);
|
|
len = -ENOMEM;
|
|
}
|
|
return len;
|
|
}
|
|
EXPORT_SYMBOL(bchannel_get_rxbuf);
|