# HG changeset patch
# User Matt Mackall <mpm@selenic.com>
# Date 1249942084 18000
# Node ID ee915ff91fca36aea1c05d4e054f5bf530a34c62
# Parent  9b3b2b2a5478a20ffbf5fa1b36156c6c1533c5cf
imported patch comcerto-skb-header

diff -r 9b3b2b2a5478 -r ee915ff91fca include/linux/skbuff.h
--- a/include/linux/skbuff.h	Mon Aug 10 16:58:57 2009 -0500
+++ b/include/linux/skbuff.h	Mon Aug 10 17:08:04 2009 -0500
@@ -469,6 +469,16 @@
 				     void *here);
 extern void	      skb_under_panic(struct sk_buff *skb, int len,
 				      void *here);
+#if defined(CONFIG_ARCH_COMCERTO)
+extern struct sk_buff *__alloc_skb_header(unsigned int size, u8* data, gfp_t gfp_mask,
+			    int fclone, int node);
+static inline struct sk_buff *alloc_skb_header(unsigned int size, 
+					u8* data,
+					gfp_t priority)
+{
+	return __alloc_skb_header(size, data, priority, 0, -1);
+}
+#endif
 
 extern int skb_append_datato_frags(struct sock *sk, struct sk_buff *skb,
 			int getfrag(void *from, char *to, int offset,
diff -r 9b3b2b2a5478 -r ee915ff91fca net/core/skbuff.c
--- a/net/core/skbuff.c	Mon Aug 10 16:58:57 2009 -0500
+++ b/net/core/skbuff.c	Mon Aug 10 17:08:04 2009 -0500
@@ -231,6 +231,80 @@
 }
 EXPORT_SYMBOL(__alloc_skb);
 
+#if defined(CONFIG_ARCH_COMCERTO)
+/**
+ *	__alloc_skb_header	-	allocate a network buffer
+ *	@size: size to allocate
+ *	@gfp_mask: allocation mask
+ *	@fclone: allocate from fclone cache instead of head cache
+ *		and allocate a cloned (child) skb
+ *
+ *	Allocate a new &sk_buff. The returned buffer has no headroom and a
+ *	tail room of size bytes. The object has a reference count of one.
+ *	The return is the buffer. On a failure the return is %NULL.
+ *
+ *	Buffers may only be allocated from interrupts using a @gfp_mask of
+ *	%GFP_ATOMIC.
+ */
+struct sk_buff *__alloc_skb_header(unsigned int size, u8* data, gfp_t gfp_mask,
+			    int fclone, int node)
+{
+	struct kmem_cache *cache;
+	struct skb_shared_info *shinfo;
+	struct sk_buff *skb;
+
+	cache = fclone ? skbuff_fclone_cache : skbuff_head_cache;
+
+	if (size <= sizeof(struct skb_shared_info)) {
+		skb = NULL;
+		goto out;
+	}
+
+	size -=  sizeof(struct skb_shared_info)	;
+
+	/* Get the HEAD */
+	skb = kmem_cache_alloc_node(cache, gfp_mask & ~__GFP_DMA, node);
+	if (!skb)
+		goto out;
+	// prefectch skb range
+	asm(	"mcrr	p15, 2, %1, %0, c12\n"
+	    :
+	    : "r" ((u32)skb), "r" ((u32)skb +  offsetof(struct sk_buff, truesize))
+	    : "cc");
+
+
+	memset(skb, 0, offsetof(struct sk_buff, truesize));
+	skb->truesize = size + sizeof(struct sk_buff);
+	atomic_set(&skb->users, 1);
+	skb->head = data;
+	skb->data = data;
+	skb->tail = data;
+	skb->end  = data + size;
+	/* make sure we initialize shinfo sequentially */
+	shinfo = skb_shinfo(skb);
+	prefetch((void*)shinfo);
+	atomic_set(&shinfo->dataref, 1);
+	shinfo->nr_frags  = 0;
+	shinfo->gso_size = 0;
+	shinfo->gso_segs = 0;
+	shinfo->gso_type = 0;
+	shinfo->ip6_frag_id = 0;
+	shinfo->frag_list = NULL;
+
+	if (fclone) {
+		struct sk_buff *child = skb + 1;
+		atomic_t *fclone_ref = (atomic_t *) (child + 1);
+
+		skb->fclone = SKB_FCLONE_ORIG;
+		atomic_set(fclone_ref, 1);
+
+		child->fclone = SKB_FCLONE_UNAVAILABLE;
+	}
+out:
+	return skb;
+}
+EXPORT_SYMBOL(__alloc_skb_header);
+#endif
 /**
  *	__netdev_alloc_skb - allocate an skbuff for rx on a specific device
  *	@dev: network device to receive on
