Remove support for kernels older than 2.6.27

There are not any major distributions that are still supporting kernels
older than 2.6.27 so we can remove many typedefs. The primary motivator
for this change is that kernel 5.0 is dropping support for timeval and
it would be ideal if the in-kernel time representation can
standardize on ktime_t, but 2.6.18 did not support the ktime
interface that was needed.

Signed-off-by: Shaun Ruffell <sruffell@sruffell.net>
This commit is contained in:
Shaun Ruffell 2019-01-11 03:59:45 +00:00
parent 8c8b9b6df0
commit 02d30ab799
28 changed files with 15 additions and 708 deletions

View File

@ -103,11 +103,7 @@
#define chan_to_netdev(h) ((h)->hdlcnetdev->netdev)
/* macro-oni for determining a unit (channel) number */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
#define UNIT(file) MINOR(file->f_dentry->d_inode->i_rdev)
#else
#define UNIT(file) MINOR(file->f_path.dentry->d_inode->i_rdev)
#endif
EXPORT_SYMBOL(dahdi_transcode_fops);
EXPORT_SYMBOL(dahdi_init_tone_state);
@ -1973,12 +1969,10 @@ static inline void print_debug_writebuf(struct dahdi_chan* ss, struct sk_buff *s
#endif
#ifdef CONFIG_DAHDI_NET
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 26)
static inline struct net_device_stats *hdlc_stats(struct net_device *dev)
{
return &dev->stats;
}
#endif
static int dahdi_net_open(struct net_device *dev)
{
@ -3938,19 +3932,11 @@ static void __dahdi_find_master_span(void)
module_printk(KERN_NOTICE, "Master changed to %s\n", s->name);
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
static void _dahdi_find_master_span(void *work)
{
__dahdi_find_master_span();
}
static DECLARE_WORK(find_master_work, _dahdi_find_master_span, NULL);
#else
static void _dahdi_find_master_span(struct work_struct *work)
{
__dahdi_find_master_span();
}
static DECLARE_WORK(find_master_work, _dahdi_find_master_span);
#endif
static void dahdi_find_master_span(void)
{
@ -4970,9 +4956,6 @@ static int dahdi_ioctl_chanconfig(struct file *file, unsigned long data)
chan->hdlcnetdev->netdev = alloc_hdlcdev(chan->hdlcnetdev);
if (chan->hdlcnetdev->netdev) {
chan->hdlcnetdev->chan = chan;
#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 23)
SET_MODULE_OWNER(chan->hdlcnetdev->netdev);
#endif
chan->hdlcnetdev->netdev->tx_queue_len = 50;
#ifdef HAVE_NET_DEVICE_OPS
chan->hdlcnetdev->netdev->netdev_ops = &dahdi_netdev_ops;
@ -9384,11 +9367,7 @@ that the waitqueue is empty. */
#ifdef CONFIG_DAHDI_NET
if (skb && dahdi_have_netdev(ms))
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
skb->mac.raw = skb->data;
#else
skb_reset_mac_header(skb);
#endif
skb->dev = chan_to_netdev(ms);
#ifdef DAHDI_HDLC_TYPE_TRANS
skb->protocol = hdlc_type_trans(skb,
@ -10550,35 +10529,10 @@ failed_driver_init:
return res;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
#ifdef CONFIG_PCI
void dahdi_pci_disable_link_state(struct pci_dev *pdev, int state)
{
u16 reg16;
int pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
state &= (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
PCIE_LINK_STATE_CLKPM);
if (!pos)
return;
pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
reg16 &= ~(state);
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
}
EXPORT_SYMBOL(dahdi_pci_disable_link_state);
#endif /* CONFIG_PCI */
#endif /* 2.6.25 */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)
static inline void flush_find_master_work(void)
{
flush_scheduled_work();
}
#else
static inline void flush_find_master_work(void)
{
cancel_work_sync(&find_master_work);
}
#endif
static void __exit dahdi_cleanup(void)
{

View File

@ -61,38 +61,6 @@ static inline struct dahdi_span *dev_to_span(struct device *dev)
DAHDI_ADD_UEVENT_VAR("SPAN_NAME=%s", span->name); \
} while (0)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
#define DAHDI_ADD_UEVENT_VAR(fmt, val...) \
do { \
int err = add_uevent_var(envp, num_envp, &i, \
buffer, buffer_size, &len, \
fmt, val); \
if (err) \
return err; \
} while (0)
static int span_uevent(struct device *dev, char **envp, int num_envp,
char *buffer, int buffer_size)
{
struct dahdi_span *span;
int i = 0;
int len = 0;
if (!dev)
return -ENODEV;
span = dev_to_span(dev);
if (!span)
return -ENODEV;
dahdi_dbg(GENERAL, "SYFS dev_name=%s span=%s\n",
dev_name(dev), span->name);
SPAN_VAR_BLOCK;
envp[i] = NULL;
return 0;
}
#else
#define DAHDI_ADD_UEVENT_VAR(fmt, val...) \
do { \
int err = add_uevent_var(kenv, fmt, val); \
@ -115,8 +83,6 @@ static int span_uevent(struct device *dev, struct kobj_uevent_env *kenv)
return 0;
}
#endif
#define span_attr(field, format_string) \
static BUS_ATTR_READER(field##_show, dev, buf) \
{ \
@ -465,37 +431,6 @@ static inline struct dahdi_device *to_ddev(struct device *dev)
ddev->location); \
} while (0)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
#define DAHDI_ADD_UEVENT_VAR(fmt, val...) \
do { \
int err = add_uevent_var(envp, num_envp, &i, \
buffer, buffer_size, &len, \
fmt, val); \
if (err) \
return err; \
} while (0)
static int device_uevent(struct device *dev, char **envp, int num_envp,
char *buffer, int buffer_size)
{
struct dahdi_device *ddev;
int i = 0;
int len = 0;
if (!dev)
return -ENODEV;
ddev = to_ddev(dev);
if (!ddev)
return -ENODEV;
dahdi_dbg(GENERAL, "SYFS dev_name=%s\n", dev_name(dev));
DEVICE_VAR_BLOCK;
envp[i] = NULL;
return 0;
}
#else
#define DAHDI_ADD_UEVENT_VAR(fmt, val...) \
do { \
int err = add_uevent_var(kenv, fmt, val); \
@ -517,8 +452,6 @@ static int device_uevent(struct device *dev, struct kobj_uevent_env *kenv)
return 0;
}
#endif
static ssize_t
manufacturer_show(struct device *dev,
struct device_attribute *attr, char *buf)

View File

@ -22,27 +22,12 @@
ssize_t name(struct device_driver *drv, char * buf)
/* Device file creation macros */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
#define CLASS_DEV_CREATE(class, devt, device, name) \
device_create(class, device, devt, NULL, "%s", name)
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
#define CLASS_DEV_CREATE(class, devt, device, name) \
device_create(class, device, devt, name)
#else
#define CLASS_DEV_CREATE(class, devt, device, name) \
class_device_create(class, NULL, devt, device, name)
#endif
/* Device file destruction macros */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
#define CLASS_DEV_DESTROY(class, devt) \
device_destroy(class, devt)
#else
#define CLASS_DEV_DESTROY(class, devt) \
class_device_destroy(class, devt)
#endif
/* Global */
int __init dahdi_sysfs_chan_init(const struct file_operations *fops);

View File

@ -38,8 +38,7 @@
#include <linux/version.h>
#if defined(CONFIG_HIGH_RES_TIMERS) && \
LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
#if defined(CONFIG_HIGH_RES_TIMERS)
#define USE_HIGHRESTIMER
#endif

View File

@ -80,11 +80,7 @@ static int ztdeth_rcv(struct sk_buff *skb, struct net_device *dev, struct packet
{
struct dahdi_span *span;
struct ztdeth_header *zh;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
zh = (struct ztdeth_header *)skb_network_header(skb);
#else
zh = (struct ztdeth_header *)skb->nh.raw;
#endif
span = ztdeth_getspan(eth_hdr(skb)->h_source, zh->subaddr);
if (span) {
skb_pull(skb, sizeof(struct ztdeth_header));
@ -166,18 +162,9 @@ static void ztdeth_transmit(struct dahdi_dynamic *dyn, u8 *msg, size_t msglen)
/* Setup protocol and such */
skb->protocol = __constant_htons(ETH_P_DAHDI_DETH);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22)
skb_set_network_header(skb, 0);
#else
skb->nh.raw = skb->data;
#endif
skb->dev = dev;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
dev_hard_header(skb, dev, ETH_P_DAHDI_DETH, addr, dev->dev_addr, skb->len);
#else
if (dev->hard_header)
dev->hard_header(skb, dev, ETH_P_DAHDI_DETH, addr, dev->dev_addr, skb->len);
#endif
skb_queue_tail(&skbs, skb);
}
}
@ -368,11 +355,7 @@ static int ztdeth_create(struct dahdi_dynamic *dyn, const char *addr)
}
z->subaddr = htons(sub);
}
z->dev = dev_get_by_name(
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
&init_net,
#endif
z->ethdev);
z->dev = dev_get_by_name(&init_net, z->ethdev);
if (!z->dev) {
printk(KERN_NOTICE "TDMoE: Invalid device '%s'\n", z->ethdev);
kfree(z);

View File

@ -236,11 +236,7 @@ static int ztdethmf_rcv(struct sk_buff *skb, struct net_device *dev,
unsigned int samples, channels, rbslen, flags;
unsigned int skip = 0;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
zh = (struct ztdeth_header *) skb_network_header(skb);
#else
zh = (struct ztdeth_header *) skb->nh.raw;
#endif
if (ntohs(zh->subaddr) & 0x8000) {
/* got a multi-span frame */
num_spans = ntohs(zh->subaddr) & 0xFF;
@ -484,19 +480,9 @@ static void ztdethmf_transmit(struct dahdi_dynamic *dyn, u8 *msg, size_t msglen)
/* Setup protocol type */
skb->protocol = __constant_htons(ETH_P_ZTDETH);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
skb_set_network_header(skb, 0);
#else
skb->nh.raw = skb->data;
#endif
skb->dev = dev;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
dev_hard_header(skb, dev, ETH_P_ZTDETH, addr, dev->dev_addr, skb->len);
#else
if (dev->hard_header)
dev->hard_header(skb, dev, ETH_P_ZTDETH, addr,
dev->dev_addr, skb->len);
#endif
/* queue frame for delivery */
if (dev) {
skb_queue_tail(&skbs, skb);
@ -603,11 +589,7 @@ static int ztdethmf_create(struct dahdi_dynamic *dyn, const char *addr)
kfree(z);
return -EINVAL;
}
z->dev = dev_get_by_name(
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
&init_net,
#endif
z->ethdev);
z->dev = dev_get_by_name(&init_net, z->ethdev);
if (!z->dev) {
printk(KERN_ERR "TDMoE Multiframe: Invalid device '%s'\n", z->ethdev);
kfree(z);

View File

@ -33,11 +33,7 @@
#include <linux/version.h>
#include <linux/types.h>
#include <linux/pci.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
#include <linux/semaphore.h>
#else
#include <asm/semaphore.h>
#endif
#include <linux/slab.h>
#include <dahdi/kernel.h>
@ -380,15 +376,9 @@ static void update_channel_config(struct vpmadt032 *vpm, unsigned int channel,
* the hardware can take some time while messages are sent to the VPMADT032
* module and the driver waits for the responses.
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
static void vpmadt032_bh(void *data)
{
struct vpmadt032 *vpm = data;
#else
static void vpmadt032_bh(struct work_struct *data)
{
struct vpmadt032 *vpm = container_of(data, struct vpmadt032, work);
#endif
struct change_order *order;
while ((order = get_next_order(vpm))) {
@ -514,11 +504,7 @@ vpmadt032_alloc(struct vpmadt032_options *options)
sema_init(&vpm->sem, 1);
vpm->curpage = 0x80;
vpm->dspid = -1;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
INIT_WORK(&vpm->work, vpmadt032_bh, vpm);
#else
INIT_WORK(&vpm->work, vpmadt032_bh);
#endif
/* Do not use the global workqueue for processing these events. Some of
* the operations can take 100s of ms, most of that time spent sleeping.

View File

@ -42,10 +42,7 @@
#include <linux/workqueue.h>
#include <linux/delay.h>
#include <linux/version.h>
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25)
#include <linux/semaphore.h>
#endif
#include "gpakenum.h"
#include "adt_lec.h"

View File

@ -1197,11 +1197,8 @@ voicebus_release(struct voicebus *vb)
#endif
/* Make sure the underrun_work isn't running or going to run. */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)
flush_scheduled_work();
#else
cancel_work_sync(&vb->underrun_work);
#endif
/* quiesce the hardware */
voicebus_stop(vb);
@ -1236,9 +1233,6 @@ vb_increase_latency(struct voicebus *vb, unsigned int increase,
struct list_head *buffers)
{
struct vbb *vbb;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
struct vbb *n;
#endif
int i;
LIST_HEAD(local);
@ -1274,12 +1268,7 @@ vb_increase_latency(struct voicebus *vb, unsigned int increase,
}
handle_transmit(vb, &local);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
list_for_each_entry_safe(vbb, n, &local, entry)
list_move_tail(&vbb->entry, buffers);
#else
list_splice_tail(&local, buffers);
#endif
/* Set the new latency (but we want to ensure that there aren't any
* printks to the console, so we don't call the function) */
@ -1633,16 +1622,10 @@ tx_error_exit:
* @work: The work_struct used to queue this function.
*
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
static void handle_hardunderrun(void *data)
{
struct voicebus *vb = data;
#else
static void handle_hardunderrun(struct work_struct *work)
{
struct voicebus *vb = container_of(work, struct voicebus,
underrun_work);
#endif
if (test_bit(VOICEBUS_STOP, &vb->flags) ||
test_bit(VOICEBUS_STOPPED, &vb->flags))
return;
@ -1677,11 +1660,7 @@ static void handle_hardunderrun(struct work_struct *work)
* since it doesn't employ any locking on the voicebus interface.
*/
static irqreturn_t
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
vb_isr(int irq, void *dev_id, struct pt_regs *regs)
#else
vb_isr(int irq, void *dev_id)
#endif
{
struct voicebus *vb = dev_id;
unsigned long flags;
@ -1751,11 +1730,7 @@ vb_timer(TIMER_DATA_TYPE timer)
{
unsigned long start = jiffies;
struct voicebus *vb = from_timer(vb, timer, timer);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
vb_isr(0, vb, 0);
#else
vb_isr(0, vb);
#endif
if (!test_bit(VOICEBUS_STOPPED, &vb->flags)) {
mod_timer(&vb->timer, start + HZ/1000);
}
@ -1803,11 +1778,7 @@ __voicebus_init(struct voicebus *vb, const char *board_name,
timer_setup(&vb->timer, vb_timer, 0);
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
INIT_WORK(&vb->underrun_work, handle_hardunderrun, vb);
#else
INIT_WORK(&vb->underrun_work, handle_hardunderrun);
#endif
/* ----------------------------------------------------------------
Configure the hardware / kernel module interfaces.

View File

@ -173,9 +173,7 @@ struct voicebus {
struct sk_buff_head captured_packets;
struct net_device *netdev;
struct net_device_stats net_stats;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
struct napi_struct napi;
#endif
atomic_t tx_seqnum;
atomic_t rx_seqnum;
#endif

View File

@ -104,26 +104,6 @@ static int vb_net_receive(struct voicebus *vb, int max)
return count;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
static int vb_net_poll(struct net_device *netdev, int *budget)
{
struct voicebus *vb = voicebus_from_netdev(netdev);
int count = 0;
int quota = min(netdev->quota, *budget);
count = vb_net_receive(vb, quota);
*budget -= count;
netdev->quota -= count;
if (!skb_queue_len(&vb->captured_packets)) {
netif_rx_complete(netdev);
return 0;
} else {
return -1;
}
}
#else
static int vb_net_poll(struct napi_struct *napi, int budget)
{
struct voicebus *vb = container_of(napi, struct voicebus, napi);
@ -142,7 +122,6 @@ static int vb_net_poll(struct napi_struct *napi, int budget)
}
return count;
}
#endif
static void vb_net_set_multi(struct net_device *netdev)
{
@ -155,11 +134,7 @@ static int vb_net_up(struct net_device *netdev)
{
struct voicebus *vb = voicebus_from_netdev(netdev);
dev_dbg(&vb->pdev->dev, "%s\n", __func__);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
netif_poll_enable(netdev);
#else
napi_enable(&vb->napi);
#endif
return 0;
}
@ -167,11 +142,7 @@ static int vb_net_down(struct net_device *netdev)
{
struct voicebus *vb = voicebus_from_netdev(netdev);
dev_dbg(&vb->pdev->dev, "%s\n", __func__);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
netif_poll_disable(netdev);
#else
napi_disable(&vb->napi);
#endif
return 0;
}
@ -231,12 +202,7 @@ int vb_net_register(struct voicebus *vb, const char *board_name)
netdev->promiscuity = 0;
netdev->flags |= IFF_NOARP;
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
netdev->poll = vb_net_poll;
netdev->weight = 64;
# else
netif_napi_add(netdev, &vb->napi, vb_net_poll, 64);
# endif
skb_queue_head_init(&vb->captured_packets);
res = register_netdev(netdev);
@ -359,9 +325,7 @@ void vb_net_capture_vbb(struct voicebus *vb, const void *vbb, const int tx,
return;
skb_queue_tail(&vb->captured_packets, skb);
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
netif_rx_schedule(netdev);
# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
netif_rx_schedule(netdev, &vb->napi);
# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
netif_rx_schedule(&vb->napi);

View File

@ -393,16 +393,10 @@ struct vpmoct_load_work {
* long running firmware load.
*
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
static void vpmoct_load_complete_fn(void *data)
{
struct vpmoct_load_work *work = data;
#else
static void vpmoct_load_complete_fn(struct work_struct *data)
{
struct vpmoct_load_work *work =
container_of(data, struct vpmoct_load_work, work);
#endif
/* Do not touch work->vpm after calling load complete. It may have
* been freed in the function by the board driver. */
work->load_complete(work->vpm->dev, work->operational);
@ -421,11 +415,7 @@ vpmoct_load_complete(struct vpmoct_load_work *work, bool operational)
{
work->operational = operational;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
INIT_WORK(&work->work, vpmoct_load_complete_fn, work);
#else
INIT_WORK(&work->work, vpmoct_load_complete_fn);
#endif
schedule_work(&work->work);
}
@ -490,16 +480,10 @@ static void vpmoct_release_firmware(const struct firmware *fw)
* @vpm: The VPMOCT032 module to check / load.
*
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
static void vpmoct_load_flash(void *data)
{
struct vpmoct_load_work *work = data;
#else
static void vpmoct_load_flash(struct work_struct *data)
{
struct vpmoct_load_work *work =
container_of(data, struct vpmoct_load_work, work);
#endif
int res;
struct vpmoct *const vpm = work->vpm;
const struct firmware *fw;
@ -686,11 +670,7 @@ int vpmoct_init(struct vpmoct *vpm, load_complete_func_t load_complete)
return -ENOMEM;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
INIT_WORK(&work->work, vpmoct_load_flash, work);
#else
INIT_WORK(&work->work, vpmoct_load_flash);
#endif
work->vpm = vpm;
work->load_complete = load_complete;

View File

@ -48,15 +48,12 @@
#include "wct4xxp.h"
#include "vpm450m.h"
/* Work queues are a way to better distribute load on SMP systems */
#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20))
/*
* Work queues can significantly improve performance and scalability
* on multi-processor machines, but requires bypassing some kernel
* API's, so it's not guaranteed to be compatible with all kernels.
*/
/* #define ENABLE_WORKQUEUES */
#endif
/* Support first generation cards? */
#define SUPPORT_GEN1
@ -4048,15 +4045,9 @@ static void t4_increase_latency(struct t4 *wc, int newlatency)
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
static void t4_work_func(void *data)
{
struct t4 *wc = data;
#else
static void t4_work_func(struct work_struct *work)
{
struct t4 *wc = container_of(work, struct t4, bh_work);
#endif
if (test_bit(T4_CHANGE_LATENCY, &wc->checkflag)) {
if (wc->needed_latency != wc->numbufs) {
@ -5144,11 +5135,7 @@ static int __devinit t4_launch(struct t4 *wc)
&wc->ddev->spans);
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
INIT_WORK(&wc->bh_work, t4_work_func, wc);
#else
INIT_WORK(&wc->bh_work, t4_work_func);
#endif
res = dahdi_register_device(wc->ddev, &wc->dev->dev);
if (res) {

View File

@ -239,13 +239,8 @@ initialize_cmd(struct tcb *cmd, unsigned long cmd_flags)
cmd->flags = cmd_flags;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
/*! Used to allocate commands to submit to the dte. */
kmem_cache_t *cmd_cache;
#else
/*! Used to allocate commands to submit to the dte. */
static struct kmem_cache *cmd_cache;
#endif
static inline struct tcb *
__alloc_cmd(size_t size, gfp_t alloc_flags, unsigned long cmd_flags)
@ -376,9 +371,7 @@ struct wcdte {
struct sk_buff_head captured_packets;
struct net_device *netdev;
struct net_device_stats net_stats;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24)
struct napi_struct napi;
#endif
struct timer_list watchdog;
u16 open_channels;
unsigned long reported_packet_errors;
@ -501,11 +494,7 @@ wctc4xxp_net_up(struct net_device *netdev)
{
struct wcdte *wc = wcdte_from_netdev(netdev);
DTE_DEBUG(DTE_DEBUG_GENERAL, "%s\n", __func__);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
netif_poll_enable(netdev);
#else
napi_enable(&wc->napi);
#endif
return 0;
}
@ -514,11 +503,7 @@ wctc4xxp_net_down(struct net_device *netdev)
{
struct wcdte *wc = wcdte_from_netdev(netdev);
DTE_DEBUG(DTE_DEBUG_GENERAL, "%s\n", __func__);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
netif_poll_disable(netdev);
#else
napi_disable(&wc->napi);
#endif
return 0;
}
@ -558,27 +543,6 @@ wctc4xxp_net_receive(struct wcdte *wc, int max)
return count;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
static int
wctc4xxp_poll(struct net_device *netdev, int *budget)
{
struct wcdte *wc = wcdte_from_netdev(netdev);
int count = 0;
int quota = min(netdev->quota, *budget);
count = wctc4xxp_net_receive(wc, quota);
*budget -= count;
netdev->quota -= count;
if (!skb_queue_len(&wc->captured_packets)) {
netif_rx_complete(netdev);
return 0;
} else {
return -1;
}
}
#else
static int
wctc4xxp_poll(struct napi_struct *napi, int budget)
{
@ -598,7 +562,6 @@ wctc4xxp_poll(struct napi_struct *napi, int budget)
}
return count;
}
#endif
static struct net_device_stats *
wctc4xxp_net_get_stats(struct net_device *netdev)
@ -690,12 +653,7 @@ wctc4xxp_net_register(struct wcdte *wc)
netdev->promiscuity = 0;
netdev->flags |= IFF_NOARP;
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
netdev->poll = &wctc4xxp_poll;
netdev->weight = 64;
# else
netif_napi_add(netdev, &wc->napi, &wctc4xxp_poll, 64);
# endif
res = register_netdev(netdev);
if (res) {
@ -765,9 +723,7 @@ wctc4xxp_net_capture_cmd(struct wcdte *wc, const struct tcb *cmd)
return;
skb_queue_tail(&wc->captured_packets, skb);
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
netif_rx_schedule(netdev);
# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29)
netif_rx_schedule(netdev, &wc->napi);
# elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
netif_rx_schedule(&wc->napi);
@ -1977,13 +1933,9 @@ wctc4xxp_operation_allocate(struct dahdi_transcoder_channel *dtc)
int res = 0;
struct wcdte *wc = ((struct channel_pvt *)(dtc->pvt))->wc;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
mutex_lock(&wc->chanlock);
#else
res = mutex_lock_killable(&wc->chanlock);
if (res)
return res;
#endif
++wc->open_channels;
@ -2056,13 +2008,9 @@ wctc4xxp_operation_release(struct dahdi_transcoder_channel *dtc)
BUG_ON(!cpvt);
BUG_ON(!wc);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
mutex_lock(&wc->chanlock);
#else
res = mutex_lock_killable(&wc->chanlock);
if (res)
return res;
#endif
if (test_bit(DTE_SHUTDOWN, &wc->flags)) {
/* On shutdown, if we reload the firmware we will reset the
@ -2777,15 +2725,9 @@ static void service_rx_ring(struct wcdte *wc)
wctc4xxp_receive_demand_poll(wc);
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
static void deferred_work_func(void *param)
{
struct wcdte *wc = param;
#else
static void deferred_work_func(struct work_struct *work)
{
struct wcdte *wc = container_of(work, struct wcdte, deferred_work);
#endif
service_rx_ring(wc);
}
@ -3902,16 +3844,11 @@ static ssize_t wctc4xxp_force_alert_store(struct device *dev,
}
dev_info(&wc->pdev->dev, "Forcing alert type: 0x%x\n", alert_type);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
mutex_lock(&wc->chanlock);
#else
res = mutex_lock_killable(&wc->chanlock);
if (res) {
free_cmd(cmd);
return -EAGAIN;
}
#endif
parameters[0] = alert_type;
@ -4000,11 +3937,7 @@ wctc4xxp_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
INIT_LIST_HEAD(&wc->cmd_list);
INIT_LIST_HEAD(&wc->waiting_for_response_list);
INIT_LIST_HEAD(&wc->rx_list);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
INIT_WORK(&wc->deferred_work, deferred_work_func, wc);
#else
INIT_WORK(&wc->deferred_work, deferred_work_func);
#endif
init_waitqueue_head(&wc->waitq);
if (pci_set_dma_mask(wc->pdev, DMA_BIT_MASK(32))) {
@ -4295,20 +4228,10 @@ static int __init wctc4xxp_init(void)
int res;
unsigned long cache_flags;
#if defined(CONFIG_SLUB) && (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 22))
cache_flags = SLAB_HWCACHE_ALIGN | SLAB_STORE_USER | SLAB_DEBUG_FREE;
#else
cache_flags = SLAB_HWCACHE_ALIGN;
#endif
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)
cmd_cache = kmem_cache_create(THIS_MODULE->name, sizeof(struct tcb),
0, cache_flags, NULL, NULL);
# else
cmd_cache = kmem_cache_create(THIS_MODULE->name, sizeof(struct tcb),
0, cache_flags, NULL);
# endif
if (!cmd_cache)
return -ENOMEM;
spin_lock_init(&wctc4xxp_list_lock);

View File

@ -50,11 +50,7 @@ Tx Gain - W/Pre-Emphasis: -23.99 to 0.00 db
#include <linux/delay.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
#include <linux/semaphore.h>
#else
#include <asm/semaphore.h>
#endif
#include <linux/crc32.h>
#include <linux/slab.h>
@ -332,15 +328,9 @@ struct bg {
int ret;
};
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
static void bg_work_func(void *data)
{
struct bg *bg = data;
#else
static void bg_work_func(struct work_struct *work)
{
struct bg *bg = container_of(work, struct bg, work);
#endif
bg->ret = bg->fn(bg->wc, bg->param);
complete(&bg->complete);
}
@ -377,11 +367,7 @@ bg_create(struct wctdm *wc, bg_work_func_t fn, unsigned long param)
}
init_completion(&bg->complete);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
INIT_WORK(&bg->work, bg_work_func, bg);
#else
INIT_WORK(&bg->work, bg_work_func);
#endif
bg->wc = wc;
bg->fn = fn;
@ -463,16 +449,10 @@ struct vpmadt032_channel_setup {
struct wctdm *wc;
};
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
static void vpm_setup_work_func(void *data)
{
struct vpmadt032_channel_setup *setup = data;
#else
static void vpm_setup_work_func(struct work_struct *work)
{
struct vpmadt032_channel_setup *setup =
container_of(work, struct vpmadt032_channel_setup, work);
#endif
int i;
int res;
GpakChannelConfig_t chanconfig;
@ -634,13 +614,7 @@ static int config_vpmadt032(struct vpmadt032 *vpm, struct wctdm *wc)
return -ENOMEM;
setup->wc = wc;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
INIT_WORK(&setup->work, vpm_setup_work_func, setup);
#else
INIT_WORK(&setup->work, vpm_setup_work_func);
#endif
queue_work(vpm->wq, &setup->work);
return 0;

View File

@ -29,11 +29,7 @@
#include <dahdi/kernel.h>
#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)
#include <linux/semaphore.h>
#else
#include <asm/semaphore.h>
#endif
#include "voicebus/voicebus.h"

View File

@ -2386,15 +2386,9 @@ int b400m_dchan(struct dahdi_span *span)
/*
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
static void xhfc_work(void *data)
{
struct b400m *b4 = data;
#else
static void xhfc_work(struct work_struct *work)
{
struct b400m *b4 = container_of(work, struct b400m, xhfc_wq);
#endif
int i, j, k, fifo;
unsigned char b, b2;
@ -2630,11 +2624,7 @@ void b400m_post_init(struct b400m *b4)
snprintf(b4->name, sizeof(b4->name) - 1, "b400m-%d",
b4->b400m_no);
b4->xhfc_ws = create_singlethread_workqueue(b4->name);
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
INIT_WORK(&b4->xhfc_wq, xhfc_work, b4);
# else
INIT_WORK(&b4->xhfc_wq, xhfc_work);
# endif
b4->inited = 1;
}

View File

@ -2272,15 +2272,9 @@ static void te13x_handle_transmit(struct wcxb *xb, void *vfp)
#define SPAN_ALARMS \
(wc->span.alarms & ~DAHDI_ALARM_NOTOPEN)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
static void timer_work_func(void *param)
{
struct t13x *wc = param;
#else
static void timer_work_func(struct work_struct *work)
{
struct t13x *wc = container_of(work, struct t13x, timer_work);
#endif
static int work_count;
if (debug)
@ -2585,11 +2579,7 @@ static int __devinit te13xp_init_one(struct pci_dev *pdev,
mutex_init(&wc->lock);
timer_setup(&wc->timer, te13xp_timer, 0);
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
INIT_WORK(&wc->timer_work, timer_work_func, wc);
# else
INIT_WORK(&wc->timer_work, timer_work_func);
# endif
wc->ddev = dahdi_create_device();
if (!wc->ddev) {

View File

@ -55,19 +55,6 @@
#include "wcxb_spi.h"
#include "wcxb_flash.h"
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
# ifdef RHEL_RELEASE_VERSION
# if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5, 6)
# define T43X_HAVE_CANCEL_WORK_SYNC
# endif
# else
static inline int delayed_work_pending(struct work_struct *work)
{
return test_bit(0, &work->pending);
}
# endif
#endif
static const char *TE435_FW_FILENAME = "dahdi-fw-te435.bin";
static const char *TE436_FW_FILENAME = "dahdi-fw-te436.bin";
static const u32 TE435_VERSION = 0x13001e;
@ -120,11 +107,7 @@ struct t43x_span {
};
struct t43x_clksrc_work {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
struct work_struct work;
#else
struct delayed_work work;
#endif
spinlock_t lock;
enum wcxb_clock_sources clksrc;
bool is_timing_master;
@ -1191,16 +1174,10 @@ static void __t43x_set_rclk_src(struct t43x *wc, int span)
/* This is called from the workqueue to wait for the TDM engine stop */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
static void t43x_clksrc_work_fn(void *data)
{
struct t43x_clksrc_work *work = data;
#else
static void t43x_clksrc_work_fn(struct work_struct *data)
{
struct t43x_clksrc_work *work = container_of(to_delayed_work(data),
struct t43x_clksrc_work, work);
#endif
struct t43x_clksrc_work *work = container_of(data,
struct t43x_clksrc_work, work.work);
struct t43x *wc = container_of(work, struct t43x, clksrc_work);
if (debug) {
@ -2029,16 +2006,10 @@ struct maint_work_struct {
struct dahdi_span *span;
};
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
static void t43x_maint_work(void *data)
{
struct maint_work_struct *w = data;
#else
static void t43x_maint_work(struct work_struct *work)
{
struct maint_work_struct *w = container_of(work,
struct maint_work_struct, work);
#endif
struct t43x *wc = w->wc;
struct dahdi_span *span = w->span;
@ -2289,11 +2260,7 @@ static int t43x_maint(struct dahdi_span *span, int cmd)
work->wc = wc;
work->cmd = cmd;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
INIT_WORK(&work->work, t43x_maint_work, work);
#else
INIT_WORK(&work->work, t43x_maint_work);
#endif
queue_work(wc->wq, &work->work);
return 0;
}
@ -3075,15 +3042,9 @@ static void t43x_handle_transmit(struct wcxb *xb, void *vfp)
#define SPAN_ALARMS \
(ts->span.alarms & ~DAHDI_ALARM_NOTOPEN)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
static void timer_work_func(void *param)
{
struct t43x *wc = param;
#else
static void timer_work_func(struct work_struct *work)
{
struct t43x *wc = container_of(work, struct t43x, timer_work);
#endif
struct t43x_span *ts;
int x;
bool start_timer = false;
@ -3434,18 +3395,8 @@ static int __devinit t43x_init_one(struct pci_dev *pdev,
mutex_init(&wc->lock);
timer_setup(&wc->timer, t43x_timer, 0);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
INIT_WORK(&wc->timer_work, timer_work_func, wc);
#else
INIT_WORK(&wc->timer_work, timer_work_func);
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
INIT_WORK(&wc->clksrc_work.work, t43x_clksrc_work_fn,
&wc->clksrc_work.work);
#else
INIT_DELAYED_WORK(&wc->clksrc_work.work, t43x_clksrc_work_fn);
#endif
spin_lock_init(&wc->clksrc_work.lock);
wc->ddev = dahdi_create_device();
@ -3601,16 +3552,7 @@ static void __devexit t43x_remove_one(struct pci_dev *pdev)
release_vpm450m(wc->vpm);
wc->vpm = NULL;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
# ifdef T43X_HAVE_CANCEL_WORK_SYNC
cancel_work_sync(&wc->clksrc_work.work);
# else
cancel_delayed_work(&wc->clksrc_work.work);
flush_workqueue(wc->wq);
# endif
#else
cancel_delayed_work_sync(&wc->clksrc_work.work);
#endif
del_timer_sync(&wc->timer);
flush_workqueue(wc->wq);

View File

@ -31,10 +31,8 @@
#include <linux/version.h>
#include <linux/slab.h>
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 26)
#define HAVE_RATELIMIT
#include <linux/ratelimit.h>
#endif
#include <dahdi/kernel.h>
@ -129,11 +127,7 @@ static inline bool wcxb_is_pcie(const struct wcxb *xb)
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 33)
return pci_is_pcie(xb->pdev);
#else
#ifndef WCXB_PCI_DEV_DOES_NOT_HAVE_IS_PCIE
return (xb->pdev->is_pcie > 0);
#else
return (xb->flags.is_pcie > 0);
#endif
#endif
}
@ -653,10 +647,6 @@ int wcxb_init(struct wcxb *xb, const char *board_name, u32 int_mode)
pci_set_master(pdev);
#ifdef WCXB_PCI_DEV_DOES_NOT_HAVE_IS_PCIE
xb->flags.is_pcie = pci_find_capability(pdev, PCI_CAP_ID_EXP) ? 1 : 0;
#endif
WARN_ON(!pdev);
if (!pdev)
return -EINVAL;

View File

@ -27,14 +27,6 @@
#define WCXB_DEFAULT_MAXLATENCY 12U
#define WCXB_DMA_CHAN_SIZE 128
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
/* The is_pcie member was backported but I'm not sure in which version. */
# ifndef RHEL_RELEASE_VERSION
#define WCXB_PCI_DEV_DOES_NOT_HAVE_IS_PCIE
# endif
#else
#endif
struct wcxb;
struct wcxb_operations {
@ -63,9 +55,6 @@ struct wcxb {
u32 have_msi:1;
u32 latency_locked:1;
u32 drive_timing_cable:1;
#ifdef WCXB_PCI_DEV_DOES_NOT_HAVE_IS_PCIE
u32 is_pcie:1;
#endif
u32 dma_ins:1;
} flags;
void __iomem *membase;

View File

@ -561,9 +561,6 @@ static int __init xpp_mmap_init(void)
xframe_cache =
kmem_cache_create("xframe_cache",
sizeof(xframe_t) + XFRAME_DATASIZE, 0, 0,
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)
NULL,
#endif
NULL);
if (!xframe_cache) {
ret = -ENOMEM;

View File

@ -1152,16 +1152,10 @@ err:
* it returns only when all XPD's on the bus are detected and
* initialized.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
static void xbus_populate(struct work_struct *work)
{
struct xbus_workqueue *worker =
container_of(work, struct xbus_workqueue, xpds_init_work);
#else
void xbus_populate(void *data)
{
struct xbus_workqueue *worker = data;
#endif
xbus_t *xbus;
struct list_head *card;
struct list_head *next_card;
@ -1238,11 +1232,7 @@ int xbus_process_worker(xbus_t *xbus)
}
XBUS_DBG(DEVICES, xbus, "\n");
/* Initialize the work. (adapt to kernel API changes). */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
INIT_WORK(&worker->xpds_init_work, xbus_populate);
#else
INIT_WORK(&worker->xpds_init_work, xbus_populate, worker);
#endif
BUG_ON(!xbus);
/* Now send it */
if (!queue_work(worker->wq, &worker->xpds_init_work)) {

View File

@ -407,34 +407,6 @@ static int astribank_match(struct device *dev, struct device_driver *driver)
XBUS_ADD_UEVENT_VAR("XBUS_NAME=%s", xbus->busname); \
} while (0)
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)
#define XBUS_ADD_UEVENT_VAR(fmt, val...) \
do { \
int err = add_uevent_var(envp, num_envp, &i, \
buffer, buffer_size, &len, \
fmt, val); \
if (err) \
return err; \
} while (0)
static int astribank_uevent(struct device *dev, char **envp, int num_envp,
char *buffer, int buffer_size)
{
xbus_t *xbus;
int i = 0;
int len = 0;
extern char *initdir;
if (!dev)
return -ENODEV;
xbus = dev_to_xbus(dev);
DBG(GENERAL, "SYFS bus_id=%s xbus=%s\n", dev_name(dev), xbus->busname);
XBUS_VAR_BLOCK;
envp[i] = NULL;
return 0;
}
#else
#define XBUS_ADD_UEVENT_VAR(fmt, val...) \
do { \
int err = add_uevent_var(kenv, fmt, val); \
@ -455,8 +427,6 @@ static int astribank_uevent(struct device *dev, struct kobj_uevent_env *kenv)
return 0;
}
#endif
void astribank_uevent_send(xbus_t *xbus, enum kobject_action act)
{
struct kobject *kobj;

View File

@ -103,11 +103,7 @@ typedef char *charp;
#ifdef __KERNEL__
/* Kernel versions... */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
#define KMEM_CACHE_T kmem_cache_t
#else
#define KMEM_CACHE_T struct kmem_cache
#endif
#define KZALLOC(size, gfp) my_kzalloc(size, gfp)
#define KZFREE(p) \
@ -116,7 +112,6 @@ typedef char *charp;
kfree(p); \
} while (0);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
#define DEVICE_ATTR_READER(name, dev, buf) \
ssize_t name(struct device *dev, \
struct device_attribute *attr, char *buf)
@ -124,12 +119,6 @@ typedef char *charp;
ssize_t name(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count)
#else
#define DEVICE_ATTR_READER(name, dev, buf) \
ssize_t name(struct device *dev, char *buf)
#define DEVICE_ATTR_WRITER(name, dev, buf, count) \
ssize_t name(struct device *dev, const char *buf, size_t count)
#endif
#define DRIVER_ATTR_READER(name, drv, buf) \
ssize_t name(struct device_driver *drv, char * buf)
@ -142,19 +131,9 @@ typedef char *charp;
#define SET_PROC_DIRENTRY_OWNER(p) do { } while (0);
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
/* Also don't define this for later RHEL >= 5.2. */
#if defined(RHEL_RELEASE_CODE) && defined(RHEL_RELEASE_VERSION)
#if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5, 3)
typedef int bool;
#endif
#else
typedef int bool;
#endif
#endif
#else
typedef int bool;
#endif
#endif /* ifdef __KERNEL__ */
typedef struct xbus xbus_t;
typedef struct xpd xpd_t;
typedef struct xframe xframe_t;

View File

@ -32,11 +32,7 @@
#include <linux/version.h>
#include <asm/atomic.h>
#include <linux/slab.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
#include <linux/semaphore.h>
#else
#include <asm/semaphore.h>
#endif
#include <linux/moduleparam.h>
#endif /* __KERNEL__ */

View File

@ -222,20 +222,8 @@ static unsigned bus_count;
/* prevent races between open() and disconnect() */
static DEFINE_MUTEX(protect_xusb_devices);
/*
* AsteriskNow kernel has backported the "lean" callback from 2.6.20
* to 2.6.19 without any macro to notify of this fact -- how lovely.
* Debian-Etch and Centos5 are using 2.6.18 for now (lucky for us).
* Fedora6 jumped from 2.6.18 to 2.6.20. So far luck is on our side ;-)
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
#define USB_PASS_CB(u) struct urb *u, struct pt_regs *regs
#else
#define USB_PASS_CB(u) struct urb *u
#endif
static void xpp_send_callback(USB_PASS_CB(urb));
static void xpp_receive_callback(USB_PASS_CB(urb));
static void xpp_send_callback(struct urb *urb);
static void xpp_receive_callback(struct urb *urb);
static int xusb_probe(struct usb_interface *interface,
const struct usb_device_id *id);
static void xusb_disconnect(struct usb_interface *interface);
@ -869,7 +857,7 @@ static void xusb_disconnect(struct usb_interface *interface)
mutex_unlock(&protect_xusb_devices);
}
static void xpp_send_callback(USB_PASS_CB(urb))
static void xpp_send_callback(struct urb *urb)
{
struct uframe *uframe = urb_to_uframe(urb);
xframe_t *xframe = &uframe->xframe;
@ -935,7 +923,7 @@ static void xpp_send_callback(USB_PASS_CB(urb))
XUSB_ERR(xusb, "A urb from non-connected device?\n");
}
static void xpp_receive_callback(USB_PASS_CB(urb))
static void xpp_receive_callback(struct urb *urb)
{
struct uframe *uframe = urb_to_uframe(urb);
xframe_t *xframe = &uframe->xframe;
@ -1026,15 +1014,7 @@ static int __init xpp_usb_init(void)
xusb_cache =
kmem_cache_create("xusb_cache", sizeof(xframe_t) + XFRAME_DATASIZE,
#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 22)) && defined(CONFIG_SLUB)
0, SLAB_STORE_USER,
#else
0, 0,
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23)
NULL,
#endif
NULL);
0, 0, NULL);
if (!xusb_cache) {
ret = -ENOMEM;
goto failure;

View File

@ -60,43 +60,16 @@
#define dahdi_pci_module pci_register_driver
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
#define DAHDI_IRQ_HANDLER(a) static irqreturn_t a(int irq, void *dev_id)
#else
#define DAHDI_IRQ_HANDLER(a) static irqreturn_t a(int irq, void *dev_id, struct pt_regs *regs)
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26)
#ifdef CONFIG_PCI
#include <linux/pci-aspm.h>
#endif
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29)
#define HAVE_NET_DEVICE_OPS
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
# ifdef RHEL_RELEASE_VERSION
# if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5, 6)
#define dev_name(dev) ((dev)->bus_id)
#define dev_set_name(dev, format, ...) \
snprintf((dev)->bus_id, BUS_ID_SIZE, format, ## __VA_ARGS__)
# else
#define dev_set_name(dev, format, ...) \
do { \
kobject_set_name(&(dev)->kobj, format, ## __VA_ARGS__); \
snprintf((dev)->bus_id, BUS_ID_SIZE, \
kobject_name(&(dev)->kobj)); \
} while (0)
# endif
# else
#define dev_name(dev) ((dev)->bus_id)
#define dev_set_name(dev, format, ...) \
snprintf((dev)->bus_id, BUS_ID_SIZE, format, ## __VA_ARGS__)
# endif
#endif
/* __dev* were removed in 3.8. They still have effect in 2.6.18. */
#ifndef __devinit
# define __devinit
@ -1476,100 +1449,9 @@ static inline void *PDE_DATA(const struct inode *inode)
#endif
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 31)
#define KERN_CONT ""
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27)
# ifndef RHEL_RELEASE_VERSION
/* I'm not sure which 5.x release this was backported into. */
static inline int try_wait_for_completion(struct completion *x)
{
unsigned long flags;
int ret = 1;
spin_lock_irqsave(&x->wait.lock, flags);
if (!x->done)
ret = 0;
else
x->done--;
spin_unlock_irqrestore(&x->wait.lock, flags);
return ret;
}
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26)
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
static inline struct proc_dir_entry *proc_create_data(const char *name,
mode_t mode,
struct proc_dir_entry *parent,
const struct file_operations *proc_fops,
void *data)
{
struct proc_dir_entry *pde = create_proc_entry(name, mode, parent);
if (!pde)
return NULL;
pde->proc_fops = proc_fops;
pde->data = data;
return pde;
}
#endif /* CONFIG_PROC_FS */
#ifndef clamp
#define clamp(x, low, high) min(max(low, x), high)
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 25)
/* Some distributions backported fatal_signal_pending so we'll use a macro to
* override the inline function definition. */
#define fatal_signal_pending(p) \
(signal_pending((p)) && sigismember(&(p)->pending.signal, SIGKILL))
#ifdef CONFIG_PCI
#ifndef PCIE_LINK_STATE_L0S
#define PCIE_LINK_STATE_L0S 1
#define PCIE_LINK_STATE_L1 2
#define PCIE_LINK_STATE_CLKPM 4
#endif
#define pci_disable_link_state dahdi_pci_disable_link_state
void dahdi_pci_disable_link_state(struct pci_dev *pdev, int state);
#endif /* CONFIG_PCI */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22)
#define list_first_entry(ptr, type, member) \
list_entry((ptr)->next, type, member)
#define strncasecmp strnicmp
#ifndef __packed
#define __packed __attribute__((packed))
#endif
#include <linux/ctype.h>
/* A define of 'clamp_val' happened to be added in the patch
* linux-2.6-sata-prep-work-for-rhel5-3.patch kernel-2.6.spec that also
* backported support for strcasecmp to some later RHEL/Centos kernels.
* If you have an older kernel that breaks because strcasecmp is already
* defined, somebody out-smarted us. In that case, replace the line below
* with '#if 0' to get the code building, and file a bug report at
* https://issues.asterisk.org/ .
*/
#ifndef clamp_val
static inline int strcasecmp(const char *s1, const char *s2)
{
int c1, c2;
do {
c1 = tolower(*s1++);
c2 = tolower(*s2++);
} while (c1 == c2 && c1 != 0);
return c1 - c2;
}
#endif /* clamp_val */
#endif /* 2.6.22 */
#endif /* 2.6.25 */
#endif /* 2.6.26 */
#endif /* 2.6.27 */
#endif /* 2.6.31 */
#endif /* 3.10.0 */
#endif /* 3.16.0 */