diff -Naur rtnet/README.ipfragmentation rtnet-pre-0-2-10-patched/README.ipfragmentation
--- rtnet/README.ipfragmentation	Thu Jan  1 01:00:00 1970
+++ rtnet-pre-0-2-10-patched/README.ipfragmentation	Mon May 19 14:24:31 2003
@@ -0,0 +1,43 @@
+README.ipfragmentation
+=======================
+19-May-2003 - Mathias Koehrer (mathias_koehrer@yahoo.de)
+
+This file documents the patch against rtnet (pre-)0.2.10.
+-------------------------------------------------------------------
+
+Standard "rtnet" handles only IP (UDP) messages that fit into one
+IP frame (about 1400 byte UDP data).
+
+Using this patch, rtnet is extended to be able to handle longer
+IP (UDP) packets (sending and receiving). 
+
+The maximum size of a complete IP message (+headers!) has to be specified by the 
+module parameter "MAX_RTSKB_SIZE" of the module rtnet.
+If MAX_RTSKB_SIZE is not set, the standard size (one ethernet frame) will be used,
+IP fragmentation makes no sense then.
+Note: IP messages are specified to be up to 64 kByte in size.
+
+Example: 
+If you have IP messages of 8 kByte maximum size, you should load rtnet
+with something like:
+# insmod rtnet  MAX_RTSKB_SIZE=8300
+
+(Note: Please add some bytes to be able to store the headers...)
+
+
+Restrictions:
+-------------
+The drawback of the approach used in this patch is, that ALL buffers in use by 
+rtnet are of the specified with the "MAX_RTSKB_SIZE" parameter.
+I.e. if you do not have enough memory on your system, this approach 
+could fail...
+
+
+Possible Bugs:
+----------------
+It could be, that sending UDP messages using the rt*socket_writev() function fails
+(maybe even with non-fragmented IP messages...).
+
+IP fragmentation is only tested with IPv4/UDP protocol.
+
+
diff -Naur rtnet/include/rtnet_internal.h rtnet-pre-0-2-10-patched/include/rtnet_internal.h
--- rtnet/include/rtnet_internal.h	Wed Jan 29 17:09:02 2003
+++ rtnet-pre-0-2-10-patched/include/rtnet_internal.h	Mon May 19 13:46:03 2003
@@ -29,6 +29,11 @@
 extern void rt_memcpy_tokerneliovec(struct iovec *iov, unsigned char *kdata, int len);
 extern int  rt_memcpy_fromkerneliovec(unsigned char *kdata,struct iovec *iov,int len);
 
+/****************************************************************************************
+ * IP fragmentation
+ ****************************************************************************************/
+extern void rt_ip_fragment_cleanup(void);
+extern void rt_ip_fragment_init(void);
 
 #endif //__RTNET_INTERNAL_H__
 
diff -Naur rtnet/ipv4/ip_fragment.c rtnet-pre-0-2-10-patched/ipv4/ip_fragment.c
--- rtnet/ipv4/ip_fragment.c	Wed Jan 29 17:09:02 2003
+++ rtnet-pre-0-2-10-patched/ipv4/ip_fragment.c	Mon May 19 13:34:14 2003
@@ -1,6 +1,7 @@
 /* ip_fragment.c
  *
  * Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ * Extended 2003 by Mathias Koehrer <mathias_koehrer@yahoo.de>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -16,44 +17,267 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
+
+#include <net/checksum.h>
 #include <rtnet.h>
+#include <rtnet_internal.h>
+
+#include <linux/ip.h>
+#include <linux/in.h>
+
+
+/* This defined sets the number incoming fragmented IP messages that 
+ * can be handled parallel.
+ * */
+#define COLLECTOR_COUNT 4
+
+#define GARBAGE_COLLECT_LIMIT 50
+
+struct collector_str 
+{
+    
+    int   in_use;
+    __u32 saddr;
+    __u32 daddr;
+    __u16 id;
+    __u8  protocol;
+
+    struct rtskb *skb;
+    int   collected;
+    unsigned int   last_access;
+};
+static struct collector_str collector[COLLECTOR_COUNT];
+
+static unsigned int counter = 0;
+
+static SEM collector_sem;
+
+extern int MAX_RTSKB_SIZE;
+
+
+
+/*
+ * Return a pointer to the collector that holds the message that
+ * fits to the iphdr (param iph).
+ * If no collector can be found, a new one is created.
+ * */
+static struct collector_str * get_collector(struct iphdr *iph)
+{
+    int i;
+    struct collector_str *p_coll;
+
+    rt_sem_wait(&collector_sem);
+
+    /* Search in existing collectors... */
+    for (i=0; i < COLLECTOR_COUNT; i++)
+    {
+        p_coll = &collector[i];
+        if ( p_coll->in_use  &&    
+             (int)iph->saddr  == p_coll->saddr  &&
+             iph->daddr == p_coll->daddr  &&
+             iph->id    == p_coll->id     && 
+             iph->protocol == p_coll->protocol )
+        {
+            goto success_out;
+        }
+    }
+
+    /* Nothing found. Create a new one... */
+    for (i=0; i < COLLECTOR_COUNT; i++)
+    {
+        if ( ! collector[i].in_use )
+        {
+            collector[i].in_use = 1;
+            
+            p_coll = &collector[i];
+            p_coll->last_access = counter;
+            p_coll = &collector[i];
+            p_coll->collected = 0;
+            if (!(p_coll->skb = alloc_rtskb(MAX_RTSKB_SIZE)))
+            {
+                collector[i].in_use = 0;
+                goto error_out;
+            }
+            p_coll->saddr = iph->saddr;
+            p_coll->daddr = iph->daddr;
+            p_coll->id    = iph->id;
+            p_coll->protocol = iph->protocol;
+            goto success_out;
+        }
+    }
+
+error_out:
+    rt_sem_signal(&collector_sem);
+    rt_printk("RTnet: IP fragmentation - no collector available\n");
+    return NULL;
+
+success_out:
+    rt_sem_signal(&collector_sem);
+    return p_coll;
+
+
+}
 
+
+static void cleanup_collector(void)
+{
+    int i;
+    for (i=0; i < COLLECTOR_COUNT; i++)
+    {
+        if ( collector[i].in_use &&
+             collector[i].skb )
+        {
+            collector[i].in_use = 0;
+            kfree_rtskb(collector[i].skb);
+            collector[i].skb = NULL;
+        }
+    }
+}
+
+/*
+ * This is a very simple version of a garbage collector.
+ * Whenver the last access to any of the collectors is a while ago,
+ * the collector will be freed... 
+ * Under normal conditions, it should never be necessary to collect
+ * the garbage. 
+ * */
+static void garbage_collect(void)
+{
+    /* Kick off all collectors that are not in use anymore... */
+    int i;
+    for (i=0; i < COLLECTOR_COUNT; i++)
+    {
+        if ( collector[i].in_use && 
+             (counter - collector[i].last_access > GARBAGE_COLLECT_LIMIT))
+        {
+            rt_printk("RTnet: IP fragmentation garbage collection (saddr:%x, daddr:%x)\n",
+                        collector[i].saddr, collector[i].daddr);
+            kfree_rtskb(collector[i].skb);
+            collector[i].skb = NULL;
+            collector[i].in_use = 0;
+        }
+    }
+}
+
+
+/* 
+ * This function returns an rtskb that contains the complete, accumulated IP message.
+ * If not all fragments of the IP message have been received yet, it returns NULL 
+ * */
 struct rtskb *rt_ip_defrag(struct rtskb *skb)
 {
-	return skb;
-#if 0
+        int offset;
+        int end;
+        int flags;
+        int ihl;
+
 	struct iphdr *iph = skb->nh.iph;
-	struct ipq *qp;
-	struct net_device *dev;
-	
-	IP_INC_STATS_BH(IpReasmReqds);
+        struct collector_str *p_coll = 0;
+
+
+        counter++;
 
-	/* Start by cleaning up the memory. */
-	if (atomic_read(&ip_frag_mem) > sysctl_ipfrag_high_thresh)
-		ip_evictor();
+        /* Check, if there is already a "collector" for this connection: */
+        p_coll = get_collector(iph);
+        if (! p_coll )
+        {
+            /* Not able to create a collector.
+             * Stop and discard skb */
+            kfree_rtskb(skb);
+            return NULL;
+        }
+        p_coll->last_access = counter;
+        
 
-	dev = skb->dev;
+        /* Write the data to the collector */
+        offset = ntohs(iph->frag_off);
+        flags = offset & ~IP_OFFSET;
+        offset &= IP_OFFSET;
+        offset <<= 3;   /* offset is in 8-byte chunks */
+        ihl = iph->ihl * 4;
+        end = offset + skb->len ;
 
-	/* Lookup (or create) queue header */
-	if ((qp = ip_find(iph)) != NULL) {
-		struct sk_buff *ret = NULL;
+        if (end > MAX_RTSKB_SIZE)
+        {
+            struct rtskb *temp_skb = p_coll->skb;
+            
+            rt_printk("RTnet: discarding incoming IP fragment (offset %i, end:%i)\n", 
+                       offset, end);
+            kfree_rtskb(skb);
+            p_coll->skb = NULL;
+            p_coll->in_use = 0;
+            kfree_rtskb(temp_skb);
+            return NULL;
+        }
 
-		spin_lock(&qp->lock);
+        /* Copy data: */
+        memcpy(p_coll->skb->buf_start + ihl + offset, 
+                     skb->data + ihl,
+                     skb->len - ihl);
+        p_coll->collected += skb->len - ihl;
 
-		ip_frag_queue(qp, skb);
+        
 
-		if (qp->last_in == (FIRST_IN|LAST_IN) &&
-		    qp->meat == qp->len)
-			ret = ip_frag_reasm(qp, dev);
+        /* Is this the final fragment? */
+        if ((flags & IP_MF) == 0)
+        {
+            /* Determine complete skb length (including header) */
+            p_coll->skb->data_len = offset + skb->len;
+        }
 
-		spin_unlock(&qp->lock);
-		ipq_put(qp);
-		return ret;
-	}
+        /* Is this the first fragment? */
+        if (offset == 0)
+        {
+            /* Copy the header to the collector skb */
+            memcpy(p_coll->skb->buf_start, skb->data, ihl);
+            p_coll->collected += ihl;
+           
+            /* Set the pointers in the collector skb: */
+            p_coll->skb->data =   p_coll->skb->buf_start;
+            
+            p_coll->skb->nh.iph = (struct iphdr*) p_coll->skb->buf_start;
+            p_coll->skb->h.raw  = p_coll->skb->buf_start + ihl;
+        }
 
-	IP_INC_STATS_BH(IpReasmFails);
-#endif /* 0 */
-	kfree_rtskb(skb);
+
+        /* skb is no longer needed. Free it. */
+        kfree_rtskb(skb);
+
+        
+        /* All data bytes received? */
+        if (p_coll->collected == p_coll->skb->data_len)
+        {
+            /* Return p_coll->skb */
+            struct rtskb *ret_skb = p_coll->skb;
+            ret_skb->nh.iph->tot_len = htons(ret_skb->data_len);
+            p_coll->skb = NULL;
+            p_coll->in_use = 0;
+
+            garbage_collect();
+            return ret_skb;
+        }
+
+        
+        /* Not all bytes received, return NULL */
 	return NULL;
 }
 
+void rt_ip_fragment_init(void)
+{
+    int i;
+    rt_typed_sem_init(&collector_sem, 1, BIN_SEM);
+
+    for (i=0; i < COLLECTOR_COUNT; i++)
+    {
+        collector[i].in_use = 0;
+        collector[i].skb = NULL;
+    }
+    
+}
+
+void rt_ip_fragment_cleanup(void)
+{
+    rt_sem_delete(&collector_sem);
+    cleanup_collector();
+}
+
diff -Naur rtnet/ipv4/ip_output.c rtnet-pre-0-2-10-patched/ipv4/ip_output.c
--- rtnet/ipv4/ip_output.c	Sat May 17 21:28:11 2003
+++ rtnet-pre-0-2-10-patched/ipv4/ip_output.c	Mon May 19 14:07:15 2003
@@ -52,6 +52,118 @@
 
 static u16 rt_ip_id_count = 0;
 
+/* The MTU_CORRECTION is at least necessary with my testsetup (eepro100, HUB) - as
+ * the returned mtu value is too large.
+ * */
+#define MTU_CORRECTION 14
+
+/*
+ * Slow path for fragmented packets...
+ * */
+int rt_ip_build_xmit_slow(struct rtsocket *sk, 
+	            int getfrag (const void *, char *, unsigned int, unsigned int),
+		    const void *frag, 
+		    unsigned length, 
+		    struct rt_rtable *rt, 
+		    int flags)
+{
+	int	err=0;
+	struct	rtskb *skb;
+	struct	iphdr *iph;
+	
+	struct	rtnet_device *rtdev=rt->rt_dev;
+        int mtu = rtdev->mtu;
+        int fragheaderlen, fragdatalen;
+        int offset = 0;
+        u16 msg_rt_ip_id;
+
+
+
+        fragheaderlen = sizeof(struct iphdr); /* 20 byte... */
+        
+        fragdatalen  = ((mtu - fragheaderlen - MTU_CORRECTION) & ~7 ); 
+
+        /* Store id in local variable */
+        msg_rt_ip_id = ++rt_ip_id_count;
+       
+        for (offset = 0; offset < length; offset += fragdatalen)
+        {
+            int fraglen; /* The length (IP, including ip-header) of this very fragment */
+            __u16 frag_off = offset >> 3 ;
+
+            if (offset >= length - fragdatalen)
+            {
+                /* last fragment */
+                fraglen = fragheaderlen + length - offset ;
+            }
+            else
+            {
+                fraglen = fragheaderlen + fragdatalen;
+                frag_off |= IP_MF;
+            }
+
+            {
+                int hh_len = (rtdev->hard_header_len+15)&~15;
+
+                skb = alloc_rtskb(fraglen + hh_len + 15);
+                if (skb==NULL)
+                        goto no_rtskb; 
+                rtskb_reserve(skb, hh_len);
+            }
+            
+            skb->dst=rt; 
+            skb->rtdev=rt->rt_dev;
+            skb->nh.iph = iph = (struct iphdr *) rtskb_put(skb, fraglen);
+            
+            
+            iph->version=4;
+            iph->ihl=5;    /* 20 byte header - no options */
+            iph->tos=sk->tos;
+            iph->tot_len = htons(fraglen);
+            iph->id=htons(msg_rt_ip_id);
+            iph->frag_off = htons(frag_off);
+            iph->ttl=255;
+            iph->protocol=sk->protocol;
+            iph->saddr=rt->rt_src;
+            iph->daddr=rt->rt_dst;
+            iph->check=0;
+            iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+            
+            if ( (err=getfrag(frag, ((char *)iph)+iph->ihl*4, offset, fraglen - fragheaderlen)) )
+                    goto error;
+
+            {
+                    unsigned char *d, *s;
+                    
+                    d=rt->rt_dst_mac_addr;
+                    s=rtdev->dev_addr;
+
+            }
+
+
+            if ( !(rtdev->hard_header) ) {
+                 goto error;
+            } else if (rtdev->hard_header(skb, rtdev, ETH_P_IP, rt->rt_dst_mac_addr, rtdev->dev_addr, skb->len)<0) {
+
+                    goto error;
+            }
+
+            err = rtdev_xmit(skb);
+            if (err) {
+                    return -EAGAIN;
+            } 
+
+            
+
+            
+        }
+        return 0;
+	
+error:
+	kfree_rtskb(skb);
+no_rtskb:
+	return err; 
+}
 /*
  *	Fast path for unfragmented packets.
  */
@@ -74,6 +186,12 @@
 	 *	choice RAW frames within 20 bytes of maximum size(rare) to the long path
 	 */
 	length += sizeof(struct iphdr);
+
+        if (length > (rtdev->mtu - MTU_CORRECTION)) 
+        {
+            return rt_ip_build_xmit_slow(sk, getfrag, frag, 
+                            length - sizeof(struct iphdr), rt, flags);
+        }
 	
 	df = htons(IP_DF);
 
@@ -161,7 +279,7 @@
 void rt_ip_init(void)
 {
 	rtdev_add_pack(&ip_packet_type);
-
+        rt_ip_fragment_init();
 }
 
 
@@ -171,6 +289,7 @@
  */
 void rt_ip_release(void)
 {
+        rt_ip_fragment_cleanup();
 	rtdev_remove_pack(&ip_packet_type);
 }
 
diff -Naur rtnet/ipv4/udp.c rtnet-pre-0-2-10-patched/ipv4/udp.c
--- rtnet/ipv4/udp.c	Fri May 16 20:34:27 2003
+++ rtnet-pre-0-2-10-patched/ipv4/udp.c	Mon May 19 13:51:54 2003
@@ -187,6 +187,12 @@
 
 		/* copy the data */
 		copied = skb->len-sizeof(struct udphdr);
+                /* The data must not be longer than the value of the parameter "len" in
+                 * the socket recvmsg call */
+                if (copied > msg->msg_iov->iov_len)
+                {
+                    copied = msg->msg_iov->iov_len;
+                }
 		rt_memcpy_tokerneliovec(msg->msg_iov, skb->h.raw+sizeof(struct udphdr), copied);
 
 		kfree_rtskb(skb);
@@ -216,20 +222,28 @@
 static int rt_udp_getfrag(const void *p, char * to, unsigned int offset, unsigned int fraglen) 
 {
 	struct udpfakehdr *ufh = (struct udpfakehdr *)p;
-	
+
 	if (offset==0) {
+
+                /* Checksum of the complete data part of the UDP message: */
+ 		ufh->wcheck = csum_partial(ufh->iov->iov_base, ufh->iov->iov_len, ufh->wcheck);
 	
 		rt_memcpy_fromkerneliovec(to+sizeof(struct udphdr), ufh->iov,fraglen-sizeof(struct udphdr));
 		
- 		ufh->wcheck = csum_partial(to+sizeof(struct udphdr), fraglen-sizeof(struct udphdr), ufh->wcheck);
+                /* Checksum of the udp header: */
  		ufh->wcheck = csum_partial((char *)ufh, sizeof(struct udphdr),ufh->wcheck);
+
 		ufh->uh.check = csum_tcpudp_magic(ufh->saddr, ufh->daddr, ntohs(ufh->uh.len), IPPROTO_UDP, ufh->wcheck);
 		
 		if (ufh->uh.check == 0)
 			ufh->uh.check = -1;
+
 		memcpy(to, ufh, sizeof(struct udphdr));
 		return 0;
 	}
+        
+        rt_memcpy_fromkerneliovec(to, ufh->iov, fraglen);
+        
 	
 	return 0;
 }
diff -Naur rtnet/rtskb.c rtnet-pre-0-2-10-patched/rtskb.c
--- rtnet/rtskb.c	Fri May 16 21:31:45 2003
+++ rtnet-pre-0-2-10-patched/rtskb.c	Tue May 20 09:32:18 2003
@@ -29,12 +29,15 @@
 static int rtskb_pool_default = DEFAULT_RTSKB_POOL_DEF;
 static int rtskb_pool_min = DEFAULT_MIN_RTSKB_DEF;
 static int rtskb_pool_max = DEFAULT_MAX_RTSKB_DEF;
+int MAX_RTSKB_SIZE = ETH_FRAME_LEN;
 MODULE_PARM(rtskb_pool_default, "i");
 MODULE_PARM(rtskb_pool_min, "i");
 MODULE_PARM(rtskb_pool_max, "i");
+MODULE_PARM(MAX_RTSKB_SIZE, "i");
 MODULE_PARM_DESC(rtskb_pool_default, "number of Realtime Socket Buffers in pool");
 MODULE_PARM_DESC(rtskb_pool_min, "low water mark");
 MODULE_PARM_DESC(rtskb_pool_max, "high water mark");
+MODULE_PARM_DESC(MAX_RTSKB_SIZE, "Maximum size of an rtskb block (relevant for IP fragmentation)");
 
 /**
  * struct rtskb_pool
@@ -171,7 +174,7 @@
 static inline void rtskb_data_init(void *p, kmem_cache_t *cache, unsigned long flags)
 {
 	unsigned char *skb_data = p;
-	memset (skb_data, 0, SKB_DATA_ALIGN(ETH_FRAME_LEN));
+	memset (skb_data, 0, SKB_DATA_ALIGN(MAX_RTSKB_SIZE));
 }
 
 
@@ -184,7 +187,7 @@
 struct rtskb *new_rtskb(void)
 {
 	struct rtskb *skb;
-	unsigned int len = SKB_DATA_ALIGN(ETH_FRAME_LEN);
+	unsigned int len = SKB_DATA_ALIGN(MAX_RTSKB_SIZE);
 
 	if ( !(skb = kmem_cache_alloc(rtskb_cache, GFP_ATOMIC)) ) {
 		printk("RTnet: allocate rtskb failed.\n");
@@ -503,7 +506,7 @@
 		return -ENOMEM;
 	}
 	rtskb_data_cache = kmem_cache_create 
-		(RTSKB_DATA_CACHE, SKB_DATA_ALIGN(ETH_FRAME_LEN), 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
+		(RTSKB_DATA_CACHE, SKB_DATA_ALIGN(MAX_RTSKB_SIZE), 0, SLAB_HWCACHE_ALIGN, NULL, NULL);
 	if ( !rtskb_data_cache ) {
 		rt_printk("RTnet: allocating 'rtskb_data_cache' failed.");
 		return -ENOMEM;
