VirtualBox

Ticket #3372: VBoxNetFlt-linux.c

File VBoxNetFlt-linux.c, 36.9 KB (added by Aleksey Ilyushin, 16 years ago)

2.1.4 version with enabled debug messages

Line 
1/* $Id: VBoxNetFlt-linux.c 42726 2009-02-12 13:19:23Z aleksey $ */
2/** @file
3 * VBoxNetFlt - Network Filter Driver (Host), Linux Specific Code.
4 */
5
6/*
7 * Copyright (C) 2006-2008 Sun Microsystems, Inc.
8 *
9 * This file is part of VirtualBox Open Source Edition (OSE), as
10 * available from http://www.virtualbox.org. This file is free software;
11 * you can redistribute it and/or modify it under the terms of the GNU
12 * General Public License (GPL) as published by the Free Software
13 * Foundation, in version 2 as it comes in the "COPYING" file of the
14 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
15 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
16 *
17 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa
18 * Clara, CA 95054 USA or visit http://www.sun.com if you need
19 * additional information or have any questions.
20 */
21
22/*******************************************************************************
23* Header Files *
24*******************************************************************************/
25#include "the-linux-kernel.h"
26#include "version-generated.h"
27#include <linux/netdevice.h>
28#include <linux/etherdevice.h>
29#include <linux/rtnetlink.h>
30
31#define LOG_GROUP LOG_GROUP_NET_FLT_DRV
32#include <VBox/log.h>
33#include <VBox/err.h>
34#include <iprt/alloca.h>
35#include <iprt/assert.h>
36#include <iprt/spinlock.h>
37#include <iprt/semaphore.h>
38#include <iprt/initterm.h>
39#include <iprt/process.h>
40#include <iprt/mem.h>
41#include <iprt/log.h>
42#include <iprt/mp.h>
43#include <iprt/mem.h>
44#include <iprt/time.h>
45
46#define VBOXNETFLT_OS_SPECFIC 1
47#include "../VBoxNetFltInternal.h"
48
49#define LOG_ENABLED
50#undef Log
51#define Log(x) printk x
52#undef Log2
53#define Log2(x) printk x
54#undef LogFlow
55#define LogFlow(x) printk x
56#undef LogRel
57#define LogRel(x) printk x
58
59#define VBOX_FLT_NB_TO_INST(pNB) ((PVBOXNETFLTINS)((uint8_t *)pNB - \
60 RT_OFFSETOF(VBOXNETFLTINS, u.s.Notifier)))
61#define VBOX_FLT_PT_TO_INST(pPT) ((PVBOXNETFLTINS)((uint8_t *)pPT - \
62 RT_OFFSETOF(VBOXNETFLTINS, u.s.PacketType)))
63#define VBOX_FLT_XT_TO_INST(pXT) ((PVBOXNETFLTINS)((uint8_t *)pXT - \
64 RT_OFFSETOF(VBOXNETFLTINS, u.s.XmitTask)))
65
66#define VBOX_GET_PCOUNT(pDev) (pDev->promiscuity)
67
68#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22)
69# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb_reset_network_header(skb)
70# define VBOX_SKB_RESET_MAC_HDR(skb) skb_reset_mac_header(skb)
71#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
72# define VBOX_SKB_RESET_NETWORK_HDR(skb) skb->nh.raw = skb->data
73# define VBOX_SKB_RESET_MAC_HDR(skb) skb->mac.raw = skb->data
74#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) */
75
76#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)
77# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb)
78#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) */
79# define CHECKSUM_PARTIAL CHECKSUM_HW
80# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10)
81# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(skb, 0)
82# else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) */
83# if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)
84# define VBOX_SKB_CHECKSUM_HELP(skb) skb_checksum_help(&skb, 0)
85# else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7) */
86# define VBOX_SKB_CHECKSUM_HELP(skb) (!skb_checksum_help(skb))
87# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7) */
88# endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) */
89#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) */
90
91#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)
92# define VBOX_SKB_IS_GSO(skb) skb_is_gso(skb)
93 /* No features, very dumb device */
94# define VBOX_SKB_GSO_SEGMENT(skb) skb_gso_segment(skb, 0)
95#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
96# define VBOX_SKB_IS_GSO(skb) false
97# define VBOX_SKB_GSO_SEGMENT(skb) NULL
98#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) */
99
100#ifndef NET_IP_ALIGN
101# define NET_IP_ALIGN 2
102#endif
103
104#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12)
105unsigned dev_get_flags(const struct net_device *dev)
106{
107 unsigned flags;
108
109 flags = (dev->flags & ~(IFF_PROMISC |
110 IFF_ALLMULTI |
111 IFF_RUNNING)) |
112 (dev->gflags & (IFF_PROMISC |
113 IFF_ALLMULTI));
114
115 if (netif_running(dev) && netif_carrier_ok(dev))
116 flags |= IFF_RUNNING;
117
118 return flags;
119}
120#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 12) */
121
122/*******************************************************************************
123* Internal Functions *
124*******************************************************************************/
125static int VBoxNetFltLinuxInit(void);
126static void VBoxNetFltLinuxUnload(void);
127
128
129/*******************************************************************************
130* Global Variables *
131*******************************************************************************/
132/**
133 * The (common) global data.
134 */
135#ifdef RT_ARCH_AMD64
136/**
137 * Memory for the executable memory heap (in IPRT).
138 */
139extern uint8_t g_abExecMemory[4096]; /* cannot donate less than one page */
140__asm__(".section execmemory, \"awx\", @progbits\n\t"
141 ".align 32\n\t"
142 ".globl g_abExecMemory\n"
143 "g_abExecMemory:\n\t"
144 ".zero 4096\n\t"
145 ".type g_abExecMemory, @object\n\t"
146 ".size g_abExecMemory, 4096\n\t"
147 ".text\n\t");
148#endif
149
150static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
151
152module_init(VBoxNetFltLinuxInit);
153module_exit(VBoxNetFltLinuxUnload);
154
155MODULE_AUTHOR("Sun Microsystems, Inc.");
156MODULE_DESCRIPTION("VirtualBox Network Filter Driver");
157MODULE_LICENSE("GPL");
158#ifdef MODULE_VERSION
159# define xstr(s) str(s)
160# define str(s) #s
161MODULE_VERSION(VBOX_VERSION_STRING " (" xstr(INTNETTRUNKIFPORT_VERSION) ")");
162#endif
163
164/**
165 * The (common) global data.
166 */
167static VBOXNETFLTGLOBALS g_VBoxNetFltGlobals;
168
169
170/**
171 * Initialize module.
172 *
173 * @returns appropriate status code.
174 */
175static int __init VBoxNetFltLinuxInit(void)
176{
177 int rc;
178 /*
179 * Initialize IPRT.
180 */
181 rc = RTR0Init(0);
182 if (RT_SUCCESS(rc))
183 {
184#ifdef RT_ARCH_AMD64
185 rc = RTR0MemExecDonate(&g_abExecMemory[0], sizeof(g_abExecMemory));
186 printk("VBoxNetFlt: dbg - g_abExecMemory=%p\n", (void *)&g_abExecMemory[0]);
187 if (RT_FAILURE(rc))
188 {
189 printk("VBoxNetFlt: failed to donate exec memory, no logging will be available.\n");
190 }
191#endif
192 Log(("VBoxNetFltLinuxInit\n"));
193
194 /*
195 * Initialize the globals and connect to the support driver.
196 *
197 * This will call back vboxNetFltOsOpenSupDrv (and maybe vboxNetFltOsCloseSupDrv)
198 * for establishing the connect to the support driver.
199 */
200 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
201 rc = vboxNetFltInitGlobals(&g_VBoxNetFltGlobals);
202 if (RT_SUCCESS(rc))
203 {
204 LogRel(("VBoxNetFlt: Successfully started.\n"));
205 return 0;
206 }
207
208 LogRel(("VBoxNetFlt: failed to initialize device extension (rc=%d)\n", rc));
209 RTR0Term();
210 }
211 else
212 LogRel(("VBoxNetFlt: failed to initialize IPRT (rc=%d)\n", rc));
213
214 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
215 return -RTErrConvertToErrno(rc);
216}
217
218
219/**
220 * Unload the module.
221 *
222 * @todo We have to prevent this if we're busy!
223 */
224static void __exit VBoxNetFltLinuxUnload(void)
225{
226 int rc;
227 Log(("VBoxNetFltLinuxUnload\n"));
228 Assert(vboxNetFltCanUnload(&g_VBoxNetFltGlobals));
229
230 /*
231 * Undo the work done during start (in reverse order).
232 */
233 rc = vboxNetFltTryDeleteGlobals(&g_VBoxNetFltGlobals);
234 AssertRC(rc); NOREF(rc);
235
236 RTR0Term();
237
238 memset(&g_VBoxNetFltGlobals, 0, sizeof(g_VBoxNetFltGlobals));
239
240 Log(("VBoxNetFltLinuxUnload - done\n"));
241}
242
243
244/**
245 * Reads and retains the host interface handle.
246 *
247 * @returns The handle, NULL if detached.
248 * @param pThis
249 */
250DECLINLINE(struct net_device *) vboxNetFltLinuxRetainNetDev(PVBOXNETFLTINS pThis)
251{
252#if 0
253 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
254 struct net_device *pDev = NULL;
255
256 Log(("vboxNetFltLinuxRetainNetDev\n"));
257 /*
258 * Be careful here to avoid problems racing the detached callback.
259 */
260 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
261 if (!ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost))
262 {
263 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
264 if (pDev)
265 {
266 dev_hold(pDev);
267 Log(("vboxNetFltLinuxRetainNetDev: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
268 }
269 }
270 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
271
272 Log(("vboxNetFltLinuxRetainNetDev - done\n"));
273 return pDev;
274#else
275 return (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
276#endif
277}
278
279
280/**
281 * Release the host interface handle previously retained
282 * by vboxNetFltLinuxRetainNetDev.
283 *
284 * @param pThis The instance.
285 * @param pDev The vboxNetFltLinuxRetainNetDev
286 * return value, NULL is fine.
287 */
288DECLINLINE(void) vboxNetFltLinuxReleaseNetDev(PVBOXNETFLTINS pThis, struct net_device *pDev)
289{
290#if 0
291 Log(("vboxNetFltLinuxReleaseNetDev\n"));
292 NOREF(pThis);
293 if (pDev)
294 {
295 dev_put(pDev);
296 Log(("vboxNetFltLinuxReleaseNetDev: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
297 }
298 Log(("vboxNetFltLinuxReleaseNetDev - done\n"));
299#endif
300}
301
302#define VBOXNETFLT_CB_TAG(skb) (0xA1C90000 | (skb->dev->ifindex & 0xFFFF))
303#define VBOXNETFLT_SKB_TAG(skb) (*(uint32_t*)&((skb)->cb[sizeof((skb)->cb)-sizeof(uint32_t)]))
304
305/**
306 * Checks whether this is an mbuf created by vboxNetFltLinuxMBufFromSG,
307 * i.e. a buffer which we're pushing and should be ignored by the filter callbacks.
308 *
309 * @returns true / false accordingly.
310 * @param pBuf The sk_buff.
311 */
312DECLINLINE(bool) vboxNetFltLinuxSkBufIsOur(struct sk_buff *pBuf)
313{
314 return VBOXNETFLT_SKB_TAG(pBuf) == VBOXNETFLT_CB_TAG(pBuf);
315}
316
317
318/**
319 * Internal worker that create a linux sk_buff for a
320 * (scatter/)gather list.
321 *
322 * @returns Pointer to the sk_buff.
323 * @param pThis The instance.
324 * @param pSG The (scatter/)gather list.
325 */
326static struct sk_buff *vboxNetFltLinuxSkBufFromSG(PVBOXNETFLTINS pThis, PINTNETSG pSG, bool fDstWire)
327{
328 struct sk_buff *pPkt;
329 struct net_device *pDev;
330 /*
331 * Because we're lazy, we will ASSUME that all SGs coming from INTNET
332 * will only contain one single segment.
333 */
334 if (pSG->cSegsUsed != 1 || pSG->cbTotal != pSG->aSegs[0].cb)
335 {
336 LogRel(("VBoxNetFlt: Dropped multi-segment(%d) packet coming from internal network.\n", pSG->cSegsUsed));
337 return NULL;
338 }
339 if (pSG->cbTotal == 0)
340 {
341 LogRel(("VBoxNetFlt: Dropped empty packet coming from internal network.\n"));
342 return NULL;
343 }
344
345 /*
346 * Allocate a packet and copy over the data.
347 *
348 */
349 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
350 pPkt = dev_alloc_skb(pSG->cbTotal + NET_IP_ALIGN);
351 if (pPkt)
352 {
353 pPkt->dev = pDev;
354 /* Align IP header on 16-byte boundary: 2 + 14 (ethernet hdr size). */
355 skb_reserve(pPkt, NET_IP_ALIGN);
356 skb_put(pPkt, pSG->cbTotal);
357 memcpy(pPkt->data, pSG->aSegs[0].pv, pSG->cbTotal);
358 /* Set protocol and packet_type fields. */
359 pPkt->protocol = eth_type_trans(pPkt, pDev);
360 pPkt->ip_summed = CHECKSUM_NONE;
361 if (fDstWire)
362 {
363 VBOX_SKB_RESET_NETWORK_HDR(pPkt);
364 /* Restore ethernet header back. */
365 skb_push(pPkt, ETH_HLEN);
366 VBOX_SKB_RESET_MAC_HDR(pPkt);
367 }
368 VBOXNETFLT_SKB_TAG(pPkt) = VBOXNETFLT_CB_TAG(pPkt);
369
370 return pPkt;
371 }
372 else
373 Log(("vboxNetFltLinuxSkBufFromSG: Failed to allocate sk_buff(%u).\n", pSG->cbTotal));
374 pSG->pvUserData = NULL;
375
376 return NULL;
377}
378
379
380/**
381 * Initializes a SG list from an sk_buff.
382 *
383 * @returns Number of segments.
384 * @param pThis The instance.
385 * @param pBuf The sk_buff.
386 * @param pSG The SG.
387 * @param pvFrame The frame pointer, optional.
388 * @param cSegs The number of segments allocated for the SG.
389 * This should match the number in the mbuf exactly!
390 * @param fSrc The source of the frame.
391 */
392DECLINLINE(void) vboxNetFltLinuxSkBufToSG(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, PINTNETSG pSG, unsigned cSegs, uint32_t fSrc)
393{
394 int i;
395 NOREF(pThis);
396
397 Assert(!skb_shinfo(pBuf)->frag_list);
398 pSG->pvOwnerData = NULL;
399 pSG->pvUserData = NULL;
400 pSG->pvUserData2 = NULL;
401 pSG->cUsers = 1;
402 pSG->fFlags = INTNETSG_FLAGS_TEMP;
403 pSG->cSegsAlloc = cSegs;
404
405 if (fSrc & INTNETTRUNKDIR_WIRE)
406 {
407 /*
408 * The packet came from wire, ethernet header was removed by device driver.
409 * Restore it.
410 */
411 skb_push(pBuf, ETH_HLEN);
412 }
413 pSG->cbTotal = pBuf->len;
414#ifdef VBOXNETFLT_SG_SUPPORT
415 pSG->aSegs[0].cb = skb_headlen(pBuf);
416 pSG->aSegs[0].pv = pBuf->data;
417 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
418
419 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
420 {
421 skb_frag_t *pFrag = &skb_shinfo(pBuf)->frags[i];
422 pSG->aSegs[i+1].cb = pFrag->size;
423 pSG->aSegs[i+1].pv = kmap(pFrag->page);
424 printk("%p = kmap()\n", pSG->aSegs[i+1].pv);
425 pSG->aSegs[i+1].Phys = NIL_RTHCPHYS;
426 }
427 pSG->cSegsUsed = ++i;
428#else
429 pSG->aSegs[0].cb = pBuf->len;
430 pSG->aSegs[0].pv = pBuf->data;
431 pSG->aSegs[0].Phys = NIL_RTHCPHYS;
432 pSG->cSegsUsed = i = 1;
433#endif
434
435
436#ifdef PADD_RUNT_FRAMES_FROM_HOST
437 /*
438 * Add a trailer if the frame is too small.
439 *
440 * Since we're getting to the packet before it is framed, it has not
441 * yet been padded. The current solution is to add a segment pointing
442 * to a buffer containing all zeros and pray that works for all frames...
443 */
444 if (pSG->cbTotal < 60 && (fSrc & INTNETTRUNKDIR_HOST))
445 {
446 static uint8_t const s_abZero[128] = {0};
447
448 AssertReturnVoid(i < cSegs);
449
450 pSG->aSegs[i].Phys = NIL_RTHCPHYS;
451 pSG->aSegs[i].pv = (void *)&s_abZero[0];
452 pSG->aSegs[i].cb = 60 - pSG->cbTotal;
453 pSG->cbTotal = 60;
454 pSG->cSegsUsed++;
455 }
456#endif
457 Log4(("vboxNetFltLinuxSkBufToSG: allocated=%d, segments=%d frags=%d next=%p frag_list=%p pkt_type=%x fSrc=%x\n",
458 pSG->cSegsAlloc, pSG->cSegsUsed, skb_shinfo(pBuf)->nr_frags, pBuf->next, skb_shinfo(pBuf)->frag_list, pBuf->pkt_type, fSrc));
459 for (i = 0; i < pSG->cSegsUsed; i++)
460 Log4(("vboxNetFltLinuxSkBufToSG: #%d: cb=%d pv=%p\n",
461 i, pSG->aSegs[i].cb, pSG->aSegs[i].pv));
462}
463
464/**
465 * Packet handler,
466 *
467 * @returns 0 or EJUSTRETURN.
468 * @param pThis The instance.
469 * @param pMBuf The mbuf.
470 * @param pvFrame The start of the frame, optional.
471 * @param fSrc Where the packet (allegedly) comes from, one INTNETTRUNKDIR_* value.
472 * @param eProtocol The protocol.
473 */
474#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14)
475static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
476 struct net_device *pSkbDev,
477 struct packet_type *pPacketType,
478 struct net_device *pOrigDev)
479#else
480static int vboxNetFltLinuxPacketHandler(struct sk_buff *pBuf,
481 struct net_device *pSkbDev,
482 struct packet_type *pPacketType)
483#endif
484{
485 PVBOXNETFLTINS pThis;
486 struct net_device *pDev;
487 LogFlow(("vboxNetFltLinuxPacketHandler: pBuf=%p pSkbDev=%p pPacketType=%p\n",
488 pBuf, pSkbDev, pPacketType));
489 /*
490 * Drop it immediately?
491 */
492 if (!pBuf)
493 return 0;
494
495 pThis = VBOX_FLT_PT_TO_INST(pPacketType);
496 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
497 if (pThis->u.s.pDev != pSkbDev)
498 {
499 Log(("vboxNetFltLinuxPacketHandler: Devices do not match, pThis may be wrong! pThis=%p\n", pThis));
500 return 0;
501 }
502
503 Log4(("vboxNetFltLinuxPacketHandler: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
504 if (vboxNetFltLinuxSkBufIsOur(pBuf))
505 {
506 Log2(("vboxNetFltLinuxPacketHandler: got our own sk_buff, drop it.\n"));
507 dev_kfree_skb(pBuf);
508 return 0;
509 }
510
511#ifndef VBOXNETFLT_SG_SUPPORT
512 {
513 /*
514 * Get rid of fragmented packets, they cause too much trouble.
515 */
516 struct sk_buff *pCopy = skb_copy(pBuf, GFP_ATOMIC);
517 kfree_skb(pBuf);
518 if (!pCopy)
519 {
520 LogRel(("VBoxNetFlt: Failed to allocate packet buffer, dropping the packet.\n"));
521 return 0;
522 }
523 pBuf = pCopy;
524 }
525#endif
526
527 /* Add the packet to transmit queue and schedule the bottom half. */
528 skb_queue_tail(&pThis->u.s.XmitQueue, pBuf);
529 schedule_work(&pThis->u.s.XmitTask);
530 Log4(("vboxNetFltLinuxPacketHandler: scheduled work %p for sk_buff %p\n",
531 &pThis->u.s.XmitTask, pBuf));
532 /* It does not really matter what we return, it is ignored by the kernel. */
533 return 0;
534}
535
536static unsigned vboxNetFltLinuxSGSegments(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
537{
538#ifdef VBOXNETFLT_SG_SUPPORT
539 unsigned cSegs = 1 + skb_shinfo(pBuf)->nr_frags;
540#else
541 unsigned cSegs = 1;
542#endif
543#ifdef PADD_RUNT_FRAMES_FROM_HOST
544 /*
545 * Add a trailer if the frame is too small.
546 */
547 if (pBuf->len < 60)
548 cSegs++;
549#endif
550 return cSegs;
551}
552
553/* WARNING! This function should only be called after vboxNetFltLinuxSkBufToSG()! */
554static void vboxNetFltLinuxFreeSkBuff(struct sk_buff *pBuf, PINTNETSG pSG)
555{
556#ifdef VBOXNETFLT_SG_SUPPORT
557 int i;
558
559 for (i = 0; i < skb_shinfo(pBuf)->nr_frags; i++)
560 {
561 printk("kunmap(%p)\n", pSG->aSegs[i+1].pv);
562 kunmap(pSG->aSegs[i+1].pv);
563 }
564#endif
565
566 dev_kfree_skb(pBuf);
567}
568
569#ifndef LOG_ENABLED
570#define vboxNetFltDumpPacket(a, b, c, d)
571#else
572static void vboxNetFltDumpPacket(PINTNETSG pSG, bool fEgress, const char *pszWhere, int iIncrement)
573{
574 uint8_t *pInt, *pExt;
575 static int iPacketNo = 1;
576 iPacketNo += iIncrement;
577 if (fEgress)
578 {
579 pExt = pSG->aSegs[0].pv;
580 pInt = pExt + 6;
581 }
582 else
583 {
584 pInt = pSG->aSegs[0].pv;
585 pExt = pInt + 6;
586 }
587 Log(("VBoxNetFlt: (int)%02x:%02x:%02x:%02x:%02x:%02x"
588 " %s (%s)%02x:%02x:%02x:%02x:%02x:%02x (%u bytes) packet #%u\n",
589 pInt[0], pInt[1], pInt[2], pInt[3], pInt[4], pInt[5],
590 fEgress ? "-->" : "<--", pszWhere,
591 pExt[0], pExt[1], pExt[2], pExt[3], pExt[4], pExt[5],
592 pSG->cbTotal, iPacketNo));
593 Log3(("%.*Rhxd\n", pSG->aSegs[0].cb, pSG->aSegs[0].pv));
594}
595#endif
596
597static int vboxNetFltLinuxForwardSegment(PVBOXNETFLTINS pThis, struct sk_buff *pBuf, uint32_t fSrc)
598{
599 unsigned cSegs = vboxNetFltLinuxSGSegments(pThis, pBuf);
600 if (cSegs < MAX_SKB_FRAGS)
601 {
602 uint8_t *pTmp;
603 PINTNETSG pSG = (PINTNETSG)alloca(RT_OFFSETOF(INTNETSG, aSegs[cSegs]));
604 if (!pSG)
605 {
606 Log(("VBoxNetFlt: Failed to allocate SG buffer.\n"));
607 return VERR_NO_MEMORY;
608 }
609 vboxNetFltLinuxSkBufToSG(pThis, pBuf, pSG, cSegs, fSrc);
610
611 pTmp = pSG->aSegs[0].pv;
612 vboxNetFltDumpPacket(pSG, false, (fSrc & INTNETTRUNKDIR_HOST) ? "host" : "wire", 1);
613 pThis->pSwitchPort->pfnRecv(pThis->pSwitchPort, pSG, fSrc);
614 Log4(("VBoxNetFlt: Dropping the sk_buff.\n"));
615 vboxNetFltLinuxFreeSkBuff(pBuf, pSG);
616 }
617
618 return VINF_SUCCESS;
619}
620
621static void vboxNetFltLinuxForwardToIntNet(PVBOXNETFLTINS pThis, struct sk_buff *pBuf)
622{
623 uint32_t fSrc = pBuf->pkt_type == PACKET_OUTGOING ? INTNETTRUNKDIR_HOST : INTNETTRUNKDIR_WIRE;
624
625 if (VBOX_SKB_IS_GSO(pBuf))
626 {
627 /* Need to segment the packet */
628 struct sk_buff *pNext, *pSegment;
629 //Log2(("vboxNetFltLinuxForwardToIntNet: cb=%u gso_size=%u gso_segs=%u gso_type=%u\n",
630 // pBuf->len, skb_shinfo(pBuf)->gso_size, skb_shinfo(pBuf)->gso_segs, skb_shinfo(pBuf)->gso_type));
631
632 for (pSegment = VBOX_SKB_GSO_SEGMENT(pBuf); pSegment; pSegment = pNext)
633 {
634 pNext = pSegment->next;
635 pSegment->next = 0;
636 vboxNetFltLinuxForwardSegment(pThis, pSegment, fSrc);
637 }
638 dev_kfree_skb(pBuf);
639 }
640 else
641 {
642 if (pBuf->ip_summed == CHECKSUM_PARTIAL)
643 if (VBOX_SKB_CHECKSUM_HELP(pBuf))
644 {
645 LogRel(("VBoxNetFlt: Failed to compute checksum, dropping the packet.\n"));
646 dev_kfree_skb(pBuf);
647 return;
648 }
649 vboxNetFltLinuxForwardSegment(pThis, pBuf, fSrc);
650 }
651 /*
652 * Create a (scatter/)gather list for the sk_buff and feed it to the internal network.
653 */
654}
655
656#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
657static void vboxNetFltLinuxXmitTask(struct work_struct *pWork)
658#else
659static void vboxNetFltLinuxXmitTask(void *pWork)
660#endif
661{
662 struct sk_buff *pBuf;
663 bool fActive;
664 PVBOXNETFLTINS pThis;
665 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
666
667 Log4(("vboxNetFltLinuxXmitTask: Got work %p.\n", pWork));
668 pThis = VBOX_FLT_XT_TO_INST(pWork);
669 /*
670 * Active? Retain the instance and increment the busy counter.
671 */
672 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
673 fActive = ASMAtomicUoReadBool(&pThis->fActive);
674 if (fActive)
675 vboxNetFltRetain(pThis, true /* fBusy */);
676 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
677 if (!fActive)
678 return;
679
680 while ((pBuf = skb_dequeue(&pThis->u.s.XmitQueue)) != 0)
681 vboxNetFltLinuxForwardToIntNet(pThis, pBuf);
682
683 vboxNetFltRelease(pThis, true /* fBusy */);
684}
685
686/**
687 * Internal worker for vboxNetFltOsInitInstance and vboxNetFltOsMaybeRediscovered.
688 *
689 * @returns VBox status code.
690 * @param pThis The instance.
691 * @param fRediscovery If set we're doing a rediscovery attempt, so, don't
692 * flood the release log.
693 */
694static int vboxNetFltLinuxAttachToInterface(PVBOXNETFLTINS pThis, struct net_device *pDev)
695{
696 struct packet_type *pt;
697 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
698
699 LogFlow(("vboxNetFltLinuxAttachToInterface: pThis=%p (%s)\n", pThis, pThis->szName));
700
701 if (!pDev)
702 {
703 Log(("VBoxNetFlt: failed to find device '%s'\n", pThis->szName));
704 return VERR_INTNET_FLT_IF_NOT_FOUND;
705 }
706
707 dev_hold(pDev);
708 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
709 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, pDev);
710 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
711
712 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) retained. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
713 Log(("vboxNetFltLinuxAttachToInterface: Got pDev=%p pThis=%p pThis->u.s.pDev=%p\n", pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
714 /*
715 * Get the mac address while we still have a valid ifnet reference.
716 */
717 memcpy(&pThis->u.s.Mac, pDev->dev_addr, sizeof(pThis->u.s.Mac));
718
719 pt = &pThis->u.s.PacketType;
720 pt->type = __constant_htons(ETH_P_ALL);
721 pt->dev = pDev;
722 pt->func = vboxNetFltLinuxPacketHandler;
723 dev_add_pack(pt);
724 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
725 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
726 if (pDev)
727 {
728 ASMAtomicUoWriteBool(&pThis->fDisconnectedFromHost, false);
729 ASMAtomicUoWriteBool(&pThis->u.s.fRegistered, true);
730 pDev = NULL; /* don't dereference it */
731 }
732 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
733 Log(("vboxNetFltLinuxAttachToInterface: this=%p: Packet handler installed.\n", pThis));
734
735 /* Release the interface on failure. */
736 if (pDev)
737 {
738 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
739 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
740 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
741 dev_put(pDev);
742 Log(("vboxNetFltLinuxAttachToInterface: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
743 }
744
745 LogRel(("VBoxNetFlt: attached to '%s' / %.*Rhxs\n", pThis->szName, sizeof(pThis->u.s.Mac), &pThis->u.s.Mac));
746 return VINF_SUCCESS;
747}
748
749
750static int vboxNetFltLinuxUnregisterDevice(PVBOXNETFLTINS pThis, struct net_device *pDev)
751{
752 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
753
754 Assert(!pThis->fDisconnectedFromHost);
755 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
756 ASMAtomicWriteBool(&pThis->u.s.fRegistered, false);
757 ASMAtomicWriteBool(&pThis->fDisconnectedFromHost, true);
758 ASMAtomicUoWritePtr((void * volatile *)&pThis->u.s.pDev, NULL);
759 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
760
761 dev_remove_pack(&pThis->u.s.PacketType);
762 skb_queue_purge(&pThis->u.s.XmitQueue);
763 Log(("vboxNetFltLinuxUnregisterDevice: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
764 Log(("vboxNetFltLinuxUnregisterDevice: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
765 dev_put(pDev);
766
767 return NOTIFY_OK;
768}
769
770static int vboxNetFltLinuxDeviceIsUp(PVBOXNETFLTINS pThis, struct net_device *pDev)
771{
772 /* Check if we are not suspended and promiscuous mode has not been set. */
773 if (ASMAtomicUoReadBool(&pThis->fActive) && !ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
774 {
775 /* Note that there is no need for locking as the kernel got hold of the lock already. */
776 dev_set_promiscuity(pDev, 1);
777 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, true);
778 Log(("vboxNetFltLinuxDeviceIsUp: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
779 }
780 else
781 Log(("vboxNetFltLinuxDeviceIsUp: no need to enable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
782 return NOTIFY_OK;
783}
784
785static int vboxNetFltLinuxDeviceGoingDown(PVBOXNETFLTINS pThis, struct net_device *pDev)
786{
787 /* Undo promiscuous mode if we has set it. */
788 if (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet))
789 {
790 /* Note that there is no need for locking as the kernel got hold of the lock already. */
791 dev_set_promiscuity(pDev, -1);
792 ASMAtomicWriteBool(&pThis->u.s.fPromiscuousSet, false);
793 Log(("vboxNetFltLinuxDeviceGoingDown: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
794 }
795 else
796 Log(("vboxNetFltLinuxDeviceGoingDown: no need to disable promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
797 return NOTIFY_OK;
798}
799
800static int vboxNetFltLinuxNotifierCallback(struct notifier_block *self, unsigned long ulEventType, void *ptr)
801
802{
803 int rc = NOTIFY_OK;
804#ifdef DEBUG
805 char *pszEvent = "<unknown>";
806#endif
807 struct net_device *pDev = (struct net_device *)ptr;
808 PVBOXNETFLTINS pThis = VBOX_FLT_NB_TO_INST(self);
809
810#ifdef DEBUG
811 switch (ulEventType)
812 {
813 case NETDEV_REGISTER: pszEvent = "NETDEV_REGISTER"; break;
814 case NETDEV_UNREGISTER: pszEvent = "NETDEV_UNREGISTER"; break;
815 case NETDEV_UP: pszEvent = "NETDEV_UP"; break;
816 case NETDEV_DOWN: pszEvent = "NETDEV_DOWN"; break;
817 case NETDEV_REBOOT: pszEvent = "NETDEV_REBOOT"; break;
818 case NETDEV_CHANGENAME: pszEvent = "NETDEV_CHANGENAME"; break;
819 case NETDEV_CHANGE: pszEvent = "NETDEV_CHANGE"; break;
820 case NETDEV_CHANGEMTU: pszEvent = "NETDEV_CHANGEMTU"; break;
821 case NETDEV_CHANGEADDR: pszEvent = "NETDEV_CHANGEADDR"; break;
822 case NETDEV_GOING_DOWN: pszEvent = "NETDEV_GOING_DOWN"; break;
823 }
824 Log(("VBoxNetFlt: got event %s(0x%lx) on %s, pDev=%p pThis=%p pThis->u.s.pDev=%p\n",
825 pszEvent, ulEventType, pDev->name, pDev, pThis, ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev)));
826#endif
827 if (ulEventType == NETDEV_REGISTER && !strcmp(pDev->name, pThis->szName))
828 {
829 vboxNetFltLinuxAttachToInterface(pThis, pDev);
830 }
831 else
832 {
833 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
834 if (pDev != ptr)
835 return NOTIFY_OK;
836 rc = NOTIFY_OK;
837 switch (ulEventType)
838 {
839 case NETDEV_UNREGISTER:
840 rc = vboxNetFltLinuxUnregisterDevice(pThis, pDev);
841 break;
842 case NETDEV_UP:
843 rc = vboxNetFltLinuxDeviceIsUp(pThis, pDev);
844 break;
845 case NETDEV_GOING_DOWN:
846 rc = vboxNetFltLinuxDeviceGoingDown(pThis, pDev);
847 break;
848 case NETDEV_CHANGENAME:
849 break;
850 }
851 }
852
853 return rc;
854}
855
856bool vboxNetFltOsMaybeRediscovered(PVBOXNETFLTINS pThis)
857{
858 return !ASMAtomicUoReadBool(&pThis->fDisconnectedFromHost);
859}
860
861int vboxNetFltPortOsXmit(PVBOXNETFLTINS pThis, PINTNETSG pSG, uint32_t fDst)
862{
863 struct net_device * pDev;
864 int err;
865 int rc = VINF_SUCCESS;
866
867 LogFlow(("vboxNetFltPortOsXmit: pThis=%p (%s)\n", pThis, pThis->szName));
868
869 pDev = vboxNetFltLinuxRetainNetDev(pThis);
870 if (pDev)
871 {
872 /*
873 * Create a sk_buff for the gather list and push it onto the wire.
874 */
875 if (fDst & INTNETTRUNKDIR_WIRE)
876 {
877 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, true);
878 if (pBuf)
879 {
880 vboxNetFltDumpPacket(pSG, true, "wire", 1);
881 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
882 Log4(("vboxNetFltPortOsXmit: dev_queue_xmit(%p)\n", pBuf));
883 err = dev_queue_xmit(pBuf);
884 if (err)
885 rc = RTErrConvertFromErrno(err);
886 }
887 else
888 rc = VERR_NO_MEMORY;
889 }
890
891 /*
892 * Create a sk_buff for the gather list and push it onto the host stack.
893 */
894 if (fDst & INTNETTRUNKDIR_HOST)
895 {
896 struct sk_buff *pBuf = vboxNetFltLinuxSkBufFromSG(pThis, pSG, false);
897 if (pBuf)
898 {
899 vboxNetFltDumpPacket(pSG, true, "host", (fDst & INTNETTRUNKDIR_WIRE) ? 0 : 1);
900 Log4(("vboxNetFltPortOsXmit: pBuf->cb dump:\n%.*Rhxd\n", sizeof(pBuf->cb), pBuf->cb));
901 Log4(("vboxNetFltPortOsXmit: netif_rx_ni(%p)\n", pBuf));
902 err = netif_rx_ni(pBuf);
903 if (err)
904 rc = RTErrConvertFromErrno(err);
905 }
906 else
907 rc = VERR_NO_MEMORY;
908 }
909
910 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
911 }
912
913 return rc;
914}
915
916
917bool vboxNetFltPortOsIsPromiscuous(PVBOXNETFLTINS pThis)
918{
919 bool fRc = false;
920 struct net_device * pDev = vboxNetFltLinuxRetainNetDev(pThis);
921 if (pDev)
922 {
923 fRc = !!(pDev->promiscuity - (ASMAtomicUoReadBool(&pThis->u.s.fPromiscuousSet) & 1));
924 LogFlow(("vboxNetFltPortOsIsPromiscuous: returns %d, pDev->promiscuity=%d, fPromiscuousSet=%d\n",
925 fRc, pDev->promiscuity, pThis->u.s.fPromiscuousSet));
926 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
927 }
928 return fRc;
929}
930
931
932void vboxNetFltPortOsGetMacAddress(PVBOXNETFLTINS pThis, PRTMAC pMac)
933{
934 *pMac = pThis->u.s.Mac;
935}
936
937
938bool vboxNetFltPortOsIsHostMac(PVBOXNETFLTINS pThis, PCRTMAC pMac)
939{
940 /* ASSUMES that the MAC address never changes. */
941 return pThis->u.s.Mac.au16[0] == pMac->au16[0]
942 && pThis->u.s.Mac.au16[1] == pMac->au16[1]
943 && pThis->u.s.Mac.au16[2] == pMac->au16[2];
944}
945
946
947void vboxNetFltPortOsSetActive(PVBOXNETFLTINS pThis, bool fActive)
948{
949 struct net_device * pDev;
950
951 LogFlow(("vboxNetFltPortOsSetActive: pThis=%p (%s), fActive=%s\n",
952 pThis, pThis->szName, fActive?"true":"false"));
953
954 pDev = vboxNetFltLinuxRetainNetDev(pThis);
955 if (pDev)
956 {
957 /*
958 * This api is a bit weird, the best reference is the code.
959 *
960 * Also, we have a bit or race conditions wrt the maintance of
961 * host the interface promiscuity for vboxNetFltPortOsIsPromiscuous.
962 */
963 u_int16_t fIf;
964#ifdef LOG_ENABLED
965 unsigned const cPromiscBefore = VBOX_GET_PCOUNT(pDev);
966#endif
967 if (fActive)
968 {
969 Assert(!pThis->u.s.fPromiscuousSet);
970
971#if 0
972 /*
973 * Try bring the interface up and running if it's down.
974 */
975 fIf = dev_get_flags(pDev);
976 if ((fIf & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING))
977 {
978 rtnl_lock();
979 int err = dev_change_flags(pDev, fIf | IFF_UP);
980 rtnl_unlock();
981 fIf = dev_get_flags(pDev);
982 }
983
984 /*
985 * Is it already up? If it isn't, leave it to the link event or
986 * we'll upset if_pcount (as stated above, ifnet_set_promiscuous is weird).
987 */
988 if ((fIf & (IFF_UP | IFF_RUNNING)) == (IFF_UP | IFF_RUNNING)
989 && !ASMAtomicReadBool(&pThis->u.s.fPromiscuousSet))
990 {
991#endif
992 rtnl_lock();
993 dev_set_promiscuity(pDev, 1);
994 rtnl_unlock();
995 pThis->u.s.fPromiscuousSet = true;
996 Log(("vboxNetFltPortOsSetActive: enabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
997#if 0
998 /* check if it actually worked, this stuff is not always behaving well. */
999 if (!(dev_get_flags(pDev) & IFF_PROMISC))
1000 {
1001 err = dev_change_flags(pDev, fIf | IFF_PROMISC);
1002 if (!err)
1003 Log(("vboxNetFlt: fixed IFF_PROMISC on %s (%d->%d)\n", pThis->szName, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
1004 else
1005 Log(("VBoxNetFlt: failed to fix IFF_PROMISC on %s, err=%d (%d->%d)\n",
1006 pThis->szName, err, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
1007 }
1008#endif
1009#if 0
1010 }
1011 else if (!err)
1012 Log(("VBoxNetFlt: Waiting for the link to come up... (%d->%d)\n", cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
1013 if (err)
1014 LogRel(("VBoxNetFlt: Failed to put '%s' into promiscuous mode, err=%d (%d->%d)\n", pThis->szName, err, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
1015#endif
1016 }
1017 else
1018 {
1019 if (pThis->u.s.fPromiscuousSet)
1020 {
1021 rtnl_lock();
1022 dev_set_promiscuity(pDev, -1);
1023 rtnl_unlock();
1024 Log(("vboxNetFltPortOsSetActive: disabled promiscuous mode on %s (%d)\n", pThis->szName, VBOX_GET_PCOUNT(pDev)));
1025 }
1026 pThis->u.s.fPromiscuousSet = false;
1027
1028 fIf = dev_get_flags(pDev);
1029 Log(("VBoxNetFlt: fIf=%#x; %d->%d\n", fIf, cPromiscBefore, VBOX_GET_PCOUNT(pDev)));
1030 }
1031
1032 vboxNetFltLinuxReleaseNetDev(pThis, pDev);
1033 }
1034}
1035
1036
1037int vboxNetFltOsDisconnectIt(PVBOXNETFLTINS pThis)
1038{
1039 /* Nothing to do here. */
1040 return VINF_SUCCESS;
1041}
1042
1043
1044int vboxNetFltOsConnectIt(PVBOXNETFLTINS pThis)
1045{
1046 /* Nothing to do here. */
1047 return VINF_SUCCESS;
1048}
1049
1050
1051void vboxNetFltOsDeleteInstance(PVBOXNETFLTINS pThis)
1052{
1053 struct net_device *pDev;
1054 bool fRegistered;
1055 RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER;
1056
1057 RTSpinlockAcquire(pThis->hSpinlock, &Tmp);
1058 pDev = (struct net_device *)ASMAtomicUoReadPtr((void * volatile *)&pThis->u.s.pDev);
1059 fRegistered = ASMAtomicUoReadBool(&pThis->u.s.fRegistered);
1060 RTSpinlockRelease(pThis->hSpinlock, &Tmp);
1061 if (fRegistered)
1062 {
1063 dev_remove_pack(&pThis->u.s.PacketType);
1064 skb_queue_purge(&pThis->u.s.XmitQueue);
1065 Log(("vboxNetFltOsDeleteInstance: this=%p: Packet handler removed, xmit queue purged.\n", pThis));
1066 Log(("vboxNetFltOsDeleteInstance: Device %p(%s) released. ref=%d\n", pDev, pDev->name, atomic_read(&pDev->refcnt)));
1067 dev_put(pDev);
1068 }
1069 Log(("vboxNetFltOsDeleteInstance: this=%p: Notifier removed.\n", pThis));
1070 unregister_netdevice_notifier(&pThis->u.s.Notifier);
1071}
1072
1073
1074int vboxNetFltOsInitInstance(PVBOXNETFLTINS pThis)
1075{
1076 int err;
1077 pThis->u.s.Notifier.notifier_call = vboxNetFltLinuxNotifierCallback;
1078 err = register_netdevice_notifier(&pThis->u.s.Notifier);
1079 if (err)
1080 return VERR_INTNET_FLT_IF_FAILED;
1081 if (!pThis->u.s.fRegistered)
1082 {
1083 unregister_netdevice_notifier(&pThis->u.s.Notifier);
1084 LogRel(("VBoxNetFlt: failed to find %s.\n", pThis->szName));
1085 return VERR_INTNET_FLT_IF_NOT_FOUND;
1086 }
1087 Log(("vboxNetFltOsInitInstance: this=%p: Notifier installed.\n", pThis));
1088 return pThis->fDisconnectedFromHost ? VERR_INTNET_FLT_IF_FAILED : VINF_SUCCESS;
1089}
1090
1091int vboxNetFltOsPreInitInstance(PVBOXNETFLTINS pThis)
1092{
1093 /*
1094 * Init the linux specific members.
1095 */
1096 pThis->u.s.pDev = NULL;
1097 pThis->u.s.fRegistered = false;
1098 pThis->u.s.fPromiscuousSet = false;
1099 memset(&pThis->u.s.PacketType, 0, sizeof(pThis->u.s.PacketType));
1100 skb_queue_head_init(&pThis->u.s.XmitQueue);
1101#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
1102 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask);
1103#else
1104 INIT_WORK(&pThis->u.s.XmitTask, vboxNetFltLinuxXmitTask, &pThis->u.s.XmitTask);
1105#endif
1106
1107 return VINF_SUCCESS;
1108}
1109

© 2024 Oracle Support Privacy / Do Not Sell My Info Terms of Use Trademark Policy