您的位置:首页 > 运维架构 > Linux

Linux Kernel中的C语言技巧(1)

2011-12-07 00:00 190 查看
先想想以下几个问题:
1. 如果给你n个B对象,你如何创建一个链表将这些数据组织起来?
常用方法是:
struct B
{
data part;
struct B *next;
struct B *prev;//for doubly linked list
}

2. 如果给你n个B和一个A,将A作为head将B组织成链表,如何实现?
struct A
{
data part for A;
struct B *next;
struct B *prev;//for doubly linked list
}

3. 如果在一个项目中,有很多如上的应用场景,An, Bn.....我们如何组织我们的代码呢?
如果如上实现,我们会发现,对于每一种链表,我们都需要写相应的代码,并且,代码虽然相似,却不同。

struct B1
{
data part;
struct B1 *next;
struct B1 *prev;//for doubly linked list
}

struct B2
{
data part;
struct B2 *next;
struct B2 *prev;//for doubly linked list
}

这样做的结果,可想而知:大量重复的类似代码,维护和扩展的负担非常沉重。

如何解决这个问题呢?以下,简单介绍下,linux Kernel中的一种解决办法。首先看以下几个数据结构定义和宏定义。
struct list_head {
struct list_head *next, *prev;
};

static inline void INIT_LIST_HEAD(struct list_head *list)
{
list->next = list;
list->prev = list;
}

static inline void __list_add(struct list_head *new,
struct list_head *prev,
struct list_head *next)
{
next->prev = new;
new->next = next;
new->prev = prev;
prev->next = new;
}

#define list_for_each_entry(pos, head, member) \
for (pos = list_entry((head)->next, typeof(*pos), member); \
prefetch(pos->member.next), &pos->member != (head); \
pos = list_entry(pos->member.next, typeof(*pos), member))

#define list_entry(ptr, type, member) \
container_of(ptr, type, member)

spi_message_add_tail(struct spi_transfer *t, struct spi_message *m)
{
list_add_tail(&t->transfer_list, &m->transfers);
}//双向链表的表尾插入

基础好一点的看了以上几个定义应该就明白了,它实现了一个通用的循环双向链表的操作。
如果不了解container_of这个宏,可以参考我上篇博文。
有了以上工具,我们就可以如下解决文章开头的几个问题了:
struct B
{
data part;
struct list_head B_list;
}

struct A
{
data part;
struct list_head A_head;
}
这样,通过上面的几个宏,就可以很容易的将A和B组织成一个双向链表,可以进行数据插入和迭代遍历。
以下举个linux中的实例:(spi传输实例,以下的两个结构体将处理和传输实体分开,减少耦合,这是一种良好的设计方法)
struct spi_transfer {
/* it's ok if tx_buf == rx_buf (right?)
* for MicroWire, one buffer must be null
* buffers must work with dma_*map_single() calls, unless
* spi_message.is_dma_mapped reports a pre-existing mapping
*/
const void *tx_buf;
void *rx_buf;
unsigned len;

dma_addr_t tx_dma;
dma_addr_t rx_dma;

unsigned cs_change:1;
u8 bits_per_word;
u16 delay_usecs;
u32 speed_hz;

struct list_head transfer_list;
};

struct spi_message {
struct list_head transfers;

struct spi_device *spi;

unsigned is_dma_mapped:1;

/* REVISIT: we might want a flag affecting the behavior of the
* last transfer ... allowing things like "read 16 bit length L"
* immediately followed by "read L bytes". Basically imposing
* a specific message scheduling algorithm.
*
* Some controller drivers (message-at-a-time queue processing)
* could provide that as their default scheduling algorithm. But
* others (with multi-message pipelines) could need a flag to
* tell them about such special cases.
*/

/* completion is reported through a callback */
void (*complete)(void *context);
void *context;
unsigned actual_length;
int status;

/* for optional use by whatever driver currently owns the
* spi_message ... between calls to spi_async and then later
* complete(), that's the spi_master controller driver.
*/
struct list_head queue;
void *state;
};

int spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_master *master = spi->master;

/* Half-duplex links include original MicroWire, and ones with
* only one data pin like SPI_3WIRE (switches direction) or where
* either MOSI or MISO is missing. They can also be caused by
* software limitations.
*/
if ((master->flags & SPI_MASTER_HALF_DUPLEX)
|| (spi->mode & SPI_3WIRE)) {
struct spi_transfer *xfer;
unsigned flags = master->flags;

list_for_each_entry(xfer, &message->transfers, transfer_list) {
if (xfer->rx_buf && xfer->tx_buf)
return -EINVAL;
if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
return -EINVAL;
if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
return -EINVAL;
}
}

message->spi = spi;
message->status = -EINPROGRESS;
return master->transfer(spi, message);
}

结束。
内容来自用户分享和网络整理,不保证内容的准确性,如有侵权内容,可联系管理员处理 点击这里给我发消息