Remove 2.6.22 files, no target uses them anymore

SVN-Revision: 12638
This commit is contained in:
Florian Fainelli 2008-09-21 11:32:06 +00:00
parent 78fd83bd12
commit d1f4f4646d
50 changed files with 0 additions and 44864 deletions

View file

@ -16,9 +16,6 @@ endif
ifeq ($(LINUX_VERSION),2.6.21.7) ifeq ($(LINUX_VERSION),2.6.21.7)
LINUX_KERNEL_MD5SUM:=bc15fad1487336d5dcb0945cd039d8ed LINUX_KERNEL_MD5SUM:=bc15fad1487336d5dcb0945cd039d8ed
endif endif
ifeq ($(LINUX_VERSION),2.6.22.19)
LINUX_KERNEL_MD5SUM:=4db27facb78aeb79d06e6ae6bf0ac0b6
endif
ifeq ($(LINUX_VERSION),2.6.23.17) ifeq ($(LINUX_VERSION),2.6.23.17)
LINUX_KERNEL_MD5SUM:=a0300a393ac91ce9c64bf31522b45e2e LINUX_KERNEL_MD5SUM:=a0300a393ac91ce9c64bf31522b45e2e
endif endif

File diff suppressed because it is too large Load diff

View file

@ -1,788 +0,0 @@
--- /dev/null
+++ b/include/linux/LzmaDecode.h
@@ -0,0 +1,100 @@
+/*
+ LzmaDecode.h
+ LZMA Decoder interface
+
+ LZMA SDK 4.05 Copyright (c) 1999-2004 Igor Pavlov (2004-08-25)
+ http://www.7-zip.org/
+
+ LZMA SDK is licensed under two licenses:
+ 1) GNU Lesser General Public License (GNU LGPL)
+ 2) Common Public License (CPL)
+ It means that you can select one of these two licenses and
+ follow rules of that license.
+
+ SPECIAL EXCEPTION:
+ Igor Pavlov, as the author of this code, expressly permits you to
+ statically or dynamically link your code (or bind by name) to the
+ interfaces of this file without subjecting your linked code to the
+ terms of the CPL or GNU LGPL. Any modifications or additions
+ to this file, however, are subject to the LGPL or CPL terms.
+*/
+
+#ifndef __LZMADECODE_H
+#define __LZMADECODE_H
+
+/* #define _LZMA_IN_CB */
+/* Use callback for input data */
+
+/* #define _LZMA_OUT_READ */
+/* Use read function for output data */
+
+/* #define _LZMA_PROB32 */
+/* It can increase speed on some 32-bit CPUs,
+ but memory usage will be doubled in that case */
+
+/* #define _LZMA_LOC_OPT */
+/* Enable local speed optimizations inside code */
+
+#ifndef UInt32
+#ifdef _LZMA_UINT32_IS_ULONG
+#define UInt32 unsigned long
+#else
+#define UInt32 unsigned int
+#endif
+#endif
+
+#ifdef _LZMA_PROB32
+#define CProb UInt32
+#else
+#define CProb unsigned short
+#endif
+
+#define LZMA_RESULT_OK 0
+#define LZMA_RESULT_DATA_ERROR 1
+#define LZMA_RESULT_NOT_ENOUGH_MEM 2
+
+#ifdef _LZMA_IN_CB
+typedef struct _ILzmaInCallback
+{
+ int (*Read)(void *object, unsigned char **buffer, UInt32 *bufferSize);
+} ILzmaInCallback;
+#endif
+
+#define LZMA_BASE_SIZE 1846
+#define LZMA_LIT_SIZE 768
+
+/*
+bufferSize = (LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp)))* sizeof(CProb)
+bufferSize += 100 in case of _LZMA_OUT_READ
+by default CProb is unsigned short,
+but if specify _LZMA_PROB_32, CProb will be UInt32(unsigned int)
+*/
+
+#ifdef _LZMA_OUT_READ
+int LzmaDecoderInit(
+ unsigned char *buffer, UInt32 bufferSize,
+ int lc, int lp, int pb,
+ unsigned char *dictionary, UInt32 dictionarySize,
+ #ifdef _LZMA_IN_CB
+ ILzmaInCallback *inCallback
+ #else
+ unsigned char *inStream, UInt32 inSize
+ #endif
+);
+#endif
+
+int LzmaDecode(
+ unsigned char *buffer,
+ #ifndef _LZMA_OUT_READ
+ UInt32 bufferSize,
+ int lc, int lp, int pb,
+ #ifdef _LZMA_IN_CB
+ ILzmaInCallback *inCallback,
+ #else
+ unsigned char *inStream, UInt32 inSize,
+ #endif
+ #endif
+ unsigned char *outStream, UInt32 outSize,
+ UInt32 *outSizeProcessed);
+
+#endif
--- /dev/null
+++ b/lib/LzmaDecode.c
@@ -0,0 +1,663 @@
+/*
+ LzmaDecode.c
+ LZMA Decoder
+
+ LZMA SDK 4.05 Copyright (c) 1999-2004 Igor Pavlov (2004-08-25)
+ http://www.7-zip.org/
+
+ LZMA SDK is licensed under two licenses:
+ 1) GNU Lesser General Public License (GNU LGPL)
+ 2) Common Public License (CPL)
+ It means that you can select one of these two licenses and
+ follow rules of that license.
+
+ SPECIAL EXCEPTION:
+ Igor Pavlov, as the author of this code, expressly permits you to
+ statically or dynamically link your code (or bind by name) to the
+ interfaces of this file without subjecting your linked code to the
+ terms of the CPL or GNU LGPL. Any modifications or additions
+ to this file, however, are subject to the LGPL or CPL terms.
+*/
+
+#include <linux/LzmaDecode.h>
+
+#ifndef Byte
+#define Byte unsigned char
+#endif
+
+#define kNumTopBits 24
+#define kTopValue ((UInt32)1 << kNumTopBits)
+
+#define kNumBitModelTotalBits 11
+#define kBitModelTotal (1 << kNumBitModelTotalBits)
+#define kNumMoveBits 5
+
+typedef struct _CRangeDecoder
+{
+ Byte *Buffer;
+ Byte *BufferLim;
+ UInt32 Range;
+ UInt32 Code;
+ #ifdef _LZMA_IN_CB
+ ILzmaInCallback *InCallback;
+ int Result;
+ #endif
+ int ExtraBytes;
+} CRangeDecoder;
+
+Byte RangeDecoderReadByte(CRangeDecoder *rd)
+{
+ if (rd->Buffer == rd->BufferLim)
+ {
+ #ifdef _LZMA_IN_CB
+ UInt32 size;
+ rd->Result = rd->InCallback->Read(rd->InCallback, &rd->Buffer, &size);
+ rd->BufferLim = rd->Buffer + size;
+ if (size == 0)
+ #endif
+ {
+ rd->ExtraBytes = 1;
+ return 0xFF;
+ }
+ }
+ return (*rd->Buffer++);
+}
+
+/* #define ReadByte (*rd->Buffer++) */
+#define ReadByte (RangeDecoderReadByte(rd))
+
+void RangeDecoderInit(CRangeDecoder *rd,
+ #ifdef _LZMA_IN_CB
+ ILzmaInCallback *inCallback
+ #else
+ Byte *stream, UInt32 bufferSize
+ #endif
+ )
+{
+ int i;
+ #ifdef _LZMA_IN_CB
+ rd->InCallback = inCallback;
+ rd->Buffer = rd->BufferLim = 0;
+ #else
+ rd->Buffer = stream;
+ rd->BufferLim = stream + bufferSize;
+ #endif
+ rd->ExtraBytes = 0;
+ rd->Code = 0;
+ rd->Range = (0xFFFFFFFF);
+ for(i = 0; i < 5; i++)
+ rd->Code = (rd->Code << 8) | ReadByte;
+}
+
+#define RC_INIT_VAR UInt32 range = rd->Range; UInt32 code = rd->Code;
+#define RC_FLUSH_VAR rd->Range = range; rd->Code = code;
+#define RC_NORMALIZE if (range < kTopValue) { range <<= 8; code = (code << 8) | ReadByte; }
+
+UInt32 RangeDecoderDecodeDirectBits(CRangeDecoder *rd, int numTotalBits)
+{
+ RC_INIT_VAR
+ UInt32 result = 0;
+ int i;
+ for (i = numTotalBits; i > 0; i--)
+ {
+ /* UInt32 t; */
+ range >>= 1;
+
+ result <<= 1;
+ if (code >= range)
+ {
+ code -= range;
+ result |= 1;
+ }
+ /*
+ t = (code - range) >> 31;
+ t &= 1;
+ code -= range & (t - 1);
+ result = (result + result) | (1 - t);
+ */
+ RC_NORMALIZE
+ }
+ RC_FLUSH_VAR
+ return result;
+}
+
+int RangeDecoderBitDecode(CProb *prob, CRangeDecoder *rd)
+{
+ UInt32 bound = (rd->Range >> kNumBitModelTotalBits) * *prob;
+ if (rd->Code < bound)
+ {
+ rd->Range = bound;
+ *prob += (kBitModelTotal - *prob) >> kNumMoveBits;
+ if (rd->Range < kTopValue)
+ {
+ rd->Code = (rd->Code << 8) | ReadByte;
+ rd->Range <<= 8;
+ }
+ return 0;
+ }
+ else
+ {
+ rd->Range -= bound;
+ rd->Code -= bound;
+ *prob -= (*prob) >> kNumMoveBits;
+ if (rd->Range < kTopValue)
+ {
+ rd->Code = (rd->Code << 8) | ReadByte;
+ rd->Range <<= 8;
+ }
+ return 1;
+ }
+}
+
+#define RC_GET_BIT2(prob, mi, A0, A1) \
+ UInt32 bound = (range >> kNumBitModelTotalBits) * *prob; \
+ if (code < bound) \
+ { A0; range = bound; *prob += (kBitModelTotal - *prob) >> kNumMoveBits; mi <<= 1; } \
+ else \
+ { A1; range -= bound; code -= bound; *prob -= (*prob) >> kNumMoveBits; mi = (mi + mi) + 1; } \
+ RC_NORMALIZE
+
+#define RC_GET_BIT(prob, mi) RC_GET_BIT2(prob, mi, ; , ;)
+
+int RangeDecoderBitTreeDecode(CProb *probs, int numLevels, CRangeDecoder *rd)
+{
+ int mi = 1;
+ int i;
+ #ifdef _LZMA_LOC_OPT
+ RC_INIT_VAR
+ #endif
+ for(i = numLevels; i > 0; i--)
+ {
+ #ifdef _LZMA_LOC_OPT
+ CProb *prob = probs + mi;
+ RC_GET_BIT(prob, mi)
+ #else
+ mi = (mi + mi) + RangeDecoderBitDecode(probs + mi, rd);
+ #endif
+ }
+ #ifdef _LZMA_LOC_OPT
+ RC_FLUSH_VAR
+ #endif
+ return mi - (1 << numLevels);
+}
+
+int RangeDecoderReverseBitTreeDecode(CProb *probs, int numLevels, CRangeDecoder *rd)
+{
+ int mi = 1;
+ int i;
+ int symbol = 0;
+ #ifdef _LZMA_LOC_OPT
+ RC_INIT_VAR
+ #endif
+ for(i = 0; i < numLevels; i++)
+ {
+ #ifdef _LZMA_LOC_OPT
+ CProb *prob = probs + mi;
+ RC_GET_BIT2(prob, mi, ; , symbol |= (1 << i))
+ #else
+ int bit = RangeDecoderBitDecode(probs + mi, rd);
+ mi = mi + mi + bit;
+ symbol |= (bit << i);
+ #endif
+ }
+ #ifdef _LZMA_LOC_OPT
+ RC_FLUSH_VAR
+ #endif
+ return symbol;
+}
+
+Byte LzmaLiteralDecode(CProb *probs, CRangeDecoder *rd)
+{
+ int symbol = 1;
+ #ifdef _LZMA_LOC_OPT
+ RC_INIT_VAR
+ #endif
+ do
+ {
+ #ifdef _LZMA_LOC_OPT
+ CProb *prob = probs + symbol;
+ RC_GET_BIT(prob, symbol)
+ #else
+ symbol = (symbol + symbol) | RangeDecoderBitDecode(probs + symbol, rd);
+ #endif
+ }
+ while (symbol < 0x100);
+ #ifdef _LZMA_LOC_OPT
+ RC_FLUSH_VAR
+ #endif
+ return symbol;
+}
+
+Byte LzmaLiteralDecodeMatch(CProb *probs, CRangeDecoder *rd, Byte matchByte)
+{
+ int symbol = 1;
+ #ifdef _LZMA_LOC_OPT
+ RC_INIT_VAR
+ #endif
+ do
+ {
+ int bit;
+ int matchBit = (matchByte >> 7) & 1;
+ matchByte <<= 1;
+ #ifdef _LZMA_LOC_OPT
+ {
+ CProb *prob = probs + ((1 + matchBit) << 8) + symbol;
+ RC_GET_BIT2(prob, symbol, bit = 0, bit = 1)
+ }
+ #else
+ bit = RangeDecoderBitDecode(probs + ((1 + matchBit) << 8) + symbol, rd);
+ symbol = (symbol << 1) | bit;
+ #endif
+ if (matchBit != bit)
+ {
+ while (symbol < 0x100)
+ {
+ #ifdef _LZMA_LOC_OPT
+ CProb *prob = probs + symbol;
+ RC_GET_BIT(prob, symbol)
+ #else
+ symbol = (symbol + symbol) | RangeDecoderBitDecode(probs + symbol, rd);
+ #endif
+ }
+ break;
+ }
+ }
+ while (symbol < 0x100);
+ #ifdef _LZMA_LOC_OPT
+ RC_FLUSH_VAR
+ #endif
+ return symbol;
+}
+
+#define kNumPosBitsMax 4
+#define kNumPosStatesMax (1 << kNumPosBitsMax)
+
+#define kLenNumLowBits 3
+#define kLenNumLowSymbols (1 << kLenNumLowBits)
+#define kLenNumMidBits 3
+#define kLenNumMidSymbols (1 << kLenNumMidBits)
+#define kLenNumHighBits 8
+#define kLenNumHighSymbols (1 << kLenNumHighBits)
+
+#define LenChoice 0
+#define LenChoice2 (LenChoice + 1)
+#define LenLow (LenChoice2 + 1)
+#define LenMid (LenLow + (kNumPosStatesMax << kLenNumLowBits))
+#define LenHigh (LenMid + (kNumPosStatesMax << kLenNumMidBits))
+#define kNumLenProbs (LenHigh + kLenNumHighSymbols)
+
+int LzmaLenDecode(CProb *p, CRangeDecoder *rd, int posState)
+{
+ if(RangeDecoderBitDecode(p + LenChoice, rd) == 0)
+ return RangeDecoderBitTreeDecode(p + LenLow +
+ (posState << kLenNumLowBits), kLenNumLowBits, rd);
+ if(RangeDecoderBitDecode(p + LenChoice2, rd) == 0)
+ return kLenNumLowSymbols + RangeDecoderBitTreeDecode(p + LenMid +
+ (posState << kLenNumMidBits), kLenNumMidBits, rd);
+ return kLenNumLowSymbols + kLenNumMidSymbols +
+ RangeDecoderBitTreeDecode(p + LenHigh, kLenNumHighBits, rd);
+}
+
+#define kNumStates 12
+
+#define kStartPosModelIndex 4
+#define kEndPosModelIndex 14
+#define kNumFullDistances (1 << (kEndPosModelIndex >> 1))
+
+#define kNumPosSlotBits 6
+#define kNumLenToPosStates 4
+
+#define kNumAlignBits 4
+#define kAlignTableSize (1 << kNumAlignBits)
+
+#define kMatchMinLen 2
+
+#define IsMatch 0
+#define IsRep (IsMatch + (kNumStates << kNumPosBitsMax))
+#define IsRepG0 (IsRep + kNumStates)
+#define IsRepG1 (IsRepG0 + kNumStates)
+#define IsRepG2 (IsRepG1 + kNumStates)
+#define IsRep0Long (IsRepG2 + kNumStates)
+#define PosSlot (IsRep0Long + (kNumStates << kNumPosBitsMax))
+#define SpecPos (PosSlot + (kNumLenToPosStates << kNumPosSlotBits))
+#define Align (SpecPos + kNumFullDistances - kEndPosModelIndex)
+#define LenCoder (Align + kAlignTableSize)
+#define RepLenCoder (LenCoder + kNumLenProbs)
+#define Literal (RepLenCoder + kNumLenProbs)
+
+#if Literal != LZMA_BASE_SIZE
+StopCompilingDueBUG
+#endif
+
+#ifdef _LZMA_OUT_READ
+
+typedef struct _LzmaVarState
+{
+ CRangeDecoder RangeDecoder;
+ Byte *Dictionary;
+ UInt32 DictionarySize;
+ UInt32 DictionaryPos;
+ UInt32 GlobalPos;
+ UInt32 Reps[4];
+ int lc;
+ int lp;
+ int pb;
+ int State;
+ int PreviousIsMatch;
+ int RemainLen;
+} LzmaVarState;
+
+int LzmaDecoderInit(
+ unsigned char *buffer, UInt32 bufferSize,
+ int lc, int lp, int pb,
+ unsigned char *dictionary, UInt32 dictionarySize,
+ #ifdef _LZMA_IN_CB
+ ILzmaInCallback *inCallback
+ #else
+ unsigned char *inStream, UInt32 inSize
+ #endif
+ )
+{
+ LzmaVarState *vs = (LzmaVarState *)buffer;
+ CProb *p = (CProb *)(buffer + sizeof(LzmaVarState));
+ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + lp));
+ UInt32 i;
+ if (bufferSize < numProbs * sizeof(CProb) + sizeof(LzmaVarState))
+ return LZMA_RESULT_NOT_ENOUGH_MEM;
+ vs->Dictionary = dictionary;
+ vs->DictionarySize = dictionarySize;
+ vs->DictionaryPos = 0;
+ vs->GlobalPos = 0;
+ vs->Reps[0] = vs->Reps[1] = vs->Reps[2] = vs->Reps[3] = 1;
+ vs->lc = lc;
+ vs->lp = lp;
+ vs->pb = pb;
+ vs->State = 0;
+ vs->PreviousIsMatch = 0;
+ vs->RemainLen = 0;
+ dictionary[dictionarySize - 1] = 0;
+ for (i = 0; i < numProbs; i++)
+ p[i] = kBitModelTotal >> 1;
+ RangeDecoderInit(&vs->RangeDecoder,
+ #ifdef _LZMA_IN_CB
+ inCallback
+ #else
+ inStream, inSize
+ #endif
+ );
+ return LZMA_RESULT_OK;
+}
+
+int LzmaDecode(unsigned char *buffer,
+ unsigned char *outStream, UInt32 outSize,
+ UInt32 *outSizeProcessed)
+{
+ LzmaVarState *vs = (LzmaVarState *)buffer;
+ CProb *p = (CProb *)(buffer + sizeof(LzmaVarState));
+ CRangeDecoder rd = vs->RangeDecoder;
+ int state = vs->State;
+ int previousIsMatch = vs->PreviousIsMatch;
+ Byte previousByte;
+ UInt32 rep0 = vs->Reps[0], rep1 = vs->Reps[1], rep2 = vs->Reps[2], rep3 = vs->Reps[3];
+ UInt32 nowPos = 0;
+ UInt32 posStateMask = (1 << (vs->pb)) - 1;
+ UInt32 literalPosMask = (1 << (vs->lp)) - 1;
+ int lc = vs->lc;
+ int len = vs->RemainLen;
+ UInt32 globalPos = vs->GlobalPos;
+
+ Byte *dictionary = vs->Dictionary;
+ UInt32 dictionarySize = vs->DictionarySize;
+ UInt32 dictionaryPos = vs->DictionaryPos;
+
+ if (len == -1)
+ {
+ *outSizeProcessed = 0;
+ return LZMA_RESULT_OK;
+ }
+
+ while(len > 0 && nowPos < outSize)
+ {
+ UInt32 pos = dictionaryPos - rep0;
+ if (pos >= dictionarySize)
+ pos += dictionarySize;
+ outStream[nowPos++] = dictionary[dictionaryPos] = dictionary[pos];
+ if (++dictionaryPos == dictionarySize)
+ dictionaryPos = 0;
+ len--;
+ }
+ if (dictionaryPos == 0)
+ previousByte = dictionary[dictionarySize - 1];
+ else
+ previousByte = dictionary[dictionaryPos - 1];
+#else
+
+int LzmaDecode(
+ Byte *buffer, UInt32 bufferSize,
+ int lc, int lp, int pb,
+ #ifdef _LZMA_IN_CB
+ ILzmaInCallback *inCallback,
+ #else
+ unsigned char *inStream, UInt32 inSize,
+ #endif
+ unsigned char *outStream, UInt32 outSize,
+ UInt32 *outSizeProcessed)
+{
+ UInt32 numProbs = Literal + ((UInt32)LZMA_LIT_SIZE << (lc + lp));
+ CProb *p = (CProb *)buffer;
+ CRangeDecoder rd;
+ UInt32 i;
+ int state = 0;
+ int previousIsMatch = 0;
+ Byte previousByte = 0;
+ UInt32 rep0 = 1, rep1 = 1, rep2 = 1, rep3 = 1;
+ UInt32 nowPos = 0;
+ UInt32 posStateMask = (1 << pb) - 1;
+ UInt32 literalPosMask = (1 << lp) - 1;
+ int len = 0;
+ if (bufferSize < numProbs * sizeof(CProb))
+ return LZMA_RESULT_NOT_ENOUGH_MEM;
+ for (i = 0; i < numProbs; i++)
+ p[i] = kBitModelTotal >> 1;
+ RangeDecoderInit(&rd,
+ #ifdef _LZMA_IN_CB
+ inCallback
+ #else
+ inStream, inSize
+ #endif
+ );
+#endif
+
+ *outSizeProcessed = 0;
+ while(nowPos < outSize)
+ {
+ int posState = (int)(
+ (nowPos
+ #ifdef _LZMA_OUT_READ
+ + globalPos
+ #endif
+ )
+ & posStateMask);
+ #ifdef _LZMA_IN_CB
+ if (rd.Result != LZMA_RESULT_OK)
+ return rd.Result;
+ #endif
+ if (rd.ExtraBytes != 0)
+ return LZMA_RESULT_DATA_ERROR;
+ if (RangeDecoderBitDecode(p + IsMatch + (state << kNumPosBitsMax) + posState, &rd) == 0)
+ {
+ CProb *probs = p + Literal + (LZMA_LIT_SIZE *
+ (((
+ (nowPos
+ #ifdef _LZMA_OUT_READ
+ + globalPos
+ #endif
+ )
+ & literalPosMask) << lc) + (previousByte >> (8 - lc))));
+
+ if (state < 4) state = 0;
+ else if (state < 10) state -= 3;
+ else state -= 6;
+ if (previousIsMatch)
+ {
+ Byte matchByte;
+ #ifdef _LZMA_OUT_READ
+ UInt32 pos = dictionaryPos - rep0;
+ if (pos >= dictionarySize)
+ pos += dictionarySize;
+ matchByte = dictionary[pos];
+ #else
+ matchByte = outStream[nowPos - rep0];
+ #endif
+ previousByte = LzmaLiteralDecodeMatch(probs, &rd, matchByte);
+ previousIsMatch = 0;
+ }
+ else
+ previousByte = LzmaLiteralDecode(probs, &rd);
+ outStream[nowPos++] = previousByte;
+ #ifdef _LZMA_OUT_READ
+ dictionary[dictionaryPos] = previousByte;
+ if (++dictionaryPos == dictionarySize)
+ dictionaryPos = 0;
+ #endif
+ }
+ else
+ {
+ previousIsMatch = 1;
+ if (RangeDecoderBitDecode(p + IsRep + state, &rd) == 1)
+ {
+ if (RangeDecoderBitDecode(p + IsRepG0 + state, &rd) == 0)
+ {
+ if (RangeDecoderBitDecode(p + IsRep0Long + (state << kNumPosBitsMax) + posState, &rd) == 0)
+ {
+ #ifdef _LZMA_OUT_READ
+ UInt32 pos;
+ #endif
+ if (
+ (nowPos
+ #ifdef _LZMA_OUT_READ
+ + globalPos
+ #endif
+ )
+ == 0)
+ return LZMA_RESULT_DATA_ERROR;
+ state = state < 7 ? 9 : 11;
+ #ifdef _LZMA_OUT_READ
+ pos = dictionaryPos - rep0;
+ if (pos >= dictionarySize)
+ pos += dictionarySize;
+ previousByte = dictionary[pos];
+ dictionary[dictionaryPos] = previousByte;
+ if (++dictionaryPos == dictionarySize)
+ dictionaryPos = 0;
+ #else
+ previousByte = outStream[nowPos - rep0];
+ #endif
+ outStream[nowPos++] = previousByte;
+ continue;
+ }
+ }
+ else
+ {
+ UInt32 distance;
+ if(RangeDecoderBitDecode(p + IsRepG1 + state, &rd) == 0)
+ distance = rep1;
+ else
+ {
+ if(RangeDecoderBitDecode(p + IsRepG2 + state, &rd) == 0)
+ distance = rep2;
+ else
+ {
+ distance = rep3;
+ rep3 = rep2;
+ }
+ rep2 = rep1;
+ }
+ rep1 = rep0;
+ rep0 = distance;
+ }
+ len = LzmaLenDecode(p + RepLenCoder, &rd, posState);
+ state = state < 7 ? 8 : 11;
+ }
+ else
+ {
+ int posSlot;
+ rep3 = rep2;
+ rep2 = rep1;
+ rep1 = rep0;
+ state = state < 7 ? 7 : 10;
+ len = LzmaLenDecode(p + LenCoder, &rd, posState);
+ posSlot = RangeDecoderBitTreeDecode(p + PosSlot +
+ ((len < kNumLenToPosStates ? len : kNumLenToPosStates - 1) <<
+ kNumPosSlotBits), kNumPosSlotBits, &rd);
+ if (posSlot >= kStartPosModelIndex)
+ {
+ int numDirectBits = ((posSlot >> 1) - 1);
+ rep0 = ((2 | ((UInt32)posSlot & 1)) << numDirectBits);
+ if (posSlot < kEndPosModelIndex)
+ {
+ rep0 += RangeDecoderReverseBitTreeDecode(
+ p + SpecPos + rep0 - posSlot - 1, numDirectBits, &rd);
+ }
+ else
+ {
+ rep0 += RangeDecoderDecodeDirectBits(&rd,
+ numDirectBits - kNumAlignBits) << kNumAlignBits;
+ rep0 += RangeDecoderReverseBitTreeDecode(p + Align, kNumAlignBits, &rd);
+ }
+ }
+ else
+ rep0 = posSlot;
+ rep0++;
+ }
+ if (rep0 == (UInt32)(0))
+ {
+ /* it's for stream version */
+ len = -1;
+ break;
+ }
+ if (rep0 > nowPos
+ #ifdef _LZMA_OUT_READ
+ + globalPos
+ #endif
+ )
+ {
+ return LZMA_RESULT_DATA_ERROR;
+ }
+ len += kMatchMinLen;
+ do
+ {
+ #ifdef _LZMA_OUT_READ
+ UInt32 pos = dictionaryPos - rep0;
+ if (pos >= dictionarySize)
+ pos += dictionarySize;
+ previousByte = dictionary[pos];
+ dictionary[dictionaryPos] = previousByte;
+ if (++dictionaryPos == dictionarySize)
+ dictionaryPos = 0;
+ #else
+ previousByte = outStream[nowPos - rep0];
+ #endif
+ outStream[nowPos++] = previousByte;
+ len--;
+ }
+ while(len > 0 && nowPos < outSize);
+ }
+ }
+
+ #ifdef _LZMA_OUT_READ
+ vs->RangeDecoder = rd;
+ vs->DictionaryPos = dictionaryPos;
+ vs->GlobalPos = globalPos + nowPos;
+ vs->Reps[0] = rep0;
+ vs->Reps[1] = rep1;
+ vs->Reps[2] = rep2;
+ vs->Reps[3] = rep3;
+ vs->State = state;
+ vs->PreviousIsMatch = previousIsMatch;
+ vs->RemainLen = len;
+ #endif
+
+ *outSizeProcessed = nowPos;
+ return LZMA_RESULT_OK;
+}
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -13,7 +13,7 @@
lib-y += kobject.o kref.o kobject_uevent.o klist.o
obj-y += div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
- bust_spinlocks.o hexdump.o
+ bust_spinlocks.o hexdump.o LzmaDecode.o
ifeq ($(CONFIG_DEBUG_KOBJECT),y)
CFLAGS_kobject.o += -DDEBUG
@@ -58,6 +58,7 @@
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_SWIOTLB) += swiotlb.o
+
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
lib-$(CONFIG_GENERIC_BUG) += bug.o

View file

@ -1,107 +0,0 @@
--- a/fs/squashfs/inode.c
+++ b/fs/squashfs/inode.c
@@ -4,6 +4,9 @@
* Copyright (c) 2002, 2003, 2004, 2005, 2006
* Phillip Lougher <phillip@lougher.org.uk>
*
+ * LZMA decompressor support added by Oleg I. Vdovikin
+ * Copyright (c) 2005 Oleg I.Vdovikin <oleg@cs.msu.su>
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2,
@@ -21,6 +24,7 @@
* inode.c
*/
+#define SQUASHFS_LZMA
#include <linux/types.h>
#include <linux/squashfs_fs.h>
#include <linux/module.h>
@@ -44,6 +48,19 @@
#include "squashfs.h"
+#ifdef SQUASHFS_LZMA
+#include <linux/LzmaDecode.h>
+
+/* default LZMA settings, should be in sync with mksquashfs */
+#define LZMA_LC 3
+#define LZMA_LP 0
+#define LZMA_PB 2
+
+#define LZMA_WORKSPACE_SIZE ((LZMA_BASE_SIZE + \
+ (LZMA_LIT_SIZE << (LZMA_LC + LZMA_LP))) * sizeof(CProb))
+
+#endif
+
static void squashfs_put_super(struct super_block *);
static int squashfs_statfs(struct dentry *, struct kstatfs *);
static int squashfs_symlink_readpage(struct file *file, struct page *page);
@@ -64,7 +81,11 @@
const char *, void *, struct vfsmount *);
+#ifdef SQUASHFS_LZMA
+static unsigned char lzma_workspace[LZMA_WORKSPACE_SIZE];
+#else
static z_stream stream;
+#endif
static struct file_system_type squashfs_fs_type = {
.owner = THIS_MODULE,
@@ -249,6 +270,15 @@
if (compressed) {
int zlib_err;
+#ifdef SQUASHFS_LZMA
+ if ((zlib_err = LzmaDecode(lzma_workspace,
+ LZMA_WORKSPACE_SIZE, LZMA_LC, LZMA_LP, LZMA_PB,
+ c_buffer, c_byte, buffer, msblk->read_size, &bytes)) != LZMA_RESULT_OK)
+ {
+ ERROR("lzma returned unexpected result 0x%x\n", zlib_err);
+ bytes = 0;
+ }
+#else
stream.next_in = c_buffer;
stream.avail_in = c_byte;
stream.next_out = buffer;
@@ -263,7 +293,7 @@
bytes = 0;
} else
bytes = stream.total_out;
-
+#endif
up(&msblk->read_data_mutex);
}
@@ -2045,15 +2075,19 @@
printk(KERN_INFO "squashfs: version 3.0 (2006/03/15) "
"Phillip Lougher\n");
+#ifndef SQUASHFS_LZMA
if (!(stream.workspace = vmalloc(zlib_inflate_workspacesize()))) {
ERROR("Failed to allocate zlib workspace\n");
destroy_inodecache();
err = -ENOMEM;
goto out;
}
+#endif
if ((err = register_filesystem(&squashfs_fs_type))) {
+#ifndef SQUASHFS_LZMA
vfree(stream.workspace);
+#endif
destroy_inodecache();
}
@@ -2064,7 +2098,9 @@
static void __exit exit_squashfs_fs(void)
{
+#ifndef SQUASHFS_LZMA
vfree(stream.workspace);
+#endif
unregister_filesystem(&squashfs_fs_type);
destroy_inodecache();
}

View file

@ -1,12 +0,0 @@
--- a/Makefile
+++ b/Makefile
@@ -507,6 +507,9 @@
NOSTDINC_FLAGS += -nostdinc -isystem $(shell $(CC) -print-file-name=include)
CHECKFLAGS += $(NOSTDINC_FLAGS)
+# improve gcc optimization
+CFLAGS += $(call cc-option,-funit-at-a-time,)
+
# warn about C99 declaration after statement
CFLAGS += $(call cc-option,-Wdeclaration-after-statement,)

View file

@ -1,11 +0,0 @@
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -188,7 +188,7 @@
if something tries to do an invalid xchg(). */
extern void __xchg_called_with_bad_pointer(void);
-static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
+static __always_inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
{
switch (size) {
case 4:

View file

@ -1,36 +0,0 @@
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -51,6 +51,7 @@
#define SST49LF040B 0x0050
#define SST49LF008A 0x005a
#define AT49BV6416 0x00d6
+#define MANUFACTURER_SAMSUNG 0x00ec
static int cfi_amdstd_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
static int cfi_amdstd_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -294,12 +295,19 @@
if (extp->MajorVersion != '1' ||
(extp->MinorVersion < '0' || extp->MinorVersion > '4')) {
- printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
- "version %c.%c.\n", extp->MajorVersion,
- extp->MinorVersion);
- kfree(extp);
- kfree(mtd);
- return NULL;
+ if (cfi->mfr == MANUFACTURER_SAMSUNG &&
+ (extp->MajorVersion == '3' && extp->MinorVersion == '3')) {
+ printk(KERN_NOTICE " Newer Samsung flash detected, "
+ "should be compatibile with Amd/Fujitsu.\n");
+ }
+ else {
+ printk(KERN_ERR " Unknown Amd/Fujitsu Extended Query "
+ "version %c.%c.\n", extp->MajorVersion,
+ extp->MinorVersion);
+ kfree(extp);
+ kfree(mtd);
+ return NULL;
+ }
}
/* Install our own private info structure */

View file

@ -1,169 +0,0 @@
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -933,7 +933,7 @@
static int __xipram xip_wait_for_operation(
struct map_info *map, struct flchip *chip,
- unsigned long adr, unsigned int chip_op_time )
+ unsigned long adr, int *chip_op_time )
{
struct cfi_private *cfi = map->fldrv_priv;
struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
@@ -942,7 +942,7 @@
flstate_t oldstate, newstate;
start = xip_currtime();
- usec = chip_op_time * 8;
+ usec = *chip_op_time * 8;
if (usec == 0)
usec = 500000;
done = 0;
@@ -1052,8 +1052,8 @@
#define XIP_INVAL_CACHED_RANGE(map, from, size) \
INVALIDATE_CACHED_RANGE(map, from, size)
-#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, usec) \
- xip_wait_for_operation(map, chip, cmd_adr, usec)
+#define INVAL_CACHE_AND_WAIT(map, chip, cmd_adr, inval_adr, inval_len, p_usec) \
+ xip_wait_for_operation(map, chip, cmd_adr, p_usec)
#else
@@ -1065,65 +1065,65 @@
static int inval_cache_and_wait_for_operation(
struct map_info *map, struct flchip *chip,
unsigned long cmd_adr, unsigned long inval_adr, int inval_len,
- unsigned int chip_op_time)
+ int *chip_op_time )
{
struct cfi_private *cfi = map->fldrv_priv;
map_word status, status_OK = CMD(0x80);
- int chip_state = chip->state;
- unsigned int timeo, sleep_time;
+ int z, chip_state = chip->state;
+ unsigned long timeo;
spin_unlock(chip->mutex);
if (inval_len)
INVALIDATE_CACHED_RANGE(map, inval_adr, inval_len);
+ if (*chip_op_time)
+ cfi_udelay(*chip_op_time);
spin_lock(chip->mutex);
- /* set our timeout to 8 times the expected delay */
- timeo = chip_op_time * 8;
- if (!timeo)
- timeo = 500000;
- sleep_time = chip_op_time / 2;
+ timeo = *chip_op_time * 8 * HZ / 1000000;
+ if (timeo < HZ/2)
+ timeo = HZ/2;
+ timeo += jiffies;
+ z = 0;
for (;;) {
+ if (chip->state != chip_state) {
+ /* Someone's suspended the operation: sleep */
+ DECLARE_WAITQUEUE(wait, current);
+
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ add_wait_queue(&chip->wq, &wait);
+ spin_unlock(chip->mutex);
+ schedule();
+ remove_wait_queue(&chip->wq, &wait);
+ timeo = jiffies + (HZ / 2); /* FIXME */
+ spin_lock(chip->mutex);
+ continue;
+ }
+
status = map_read(map, cmd_adr);
if (map_word_andequal(map, status, status_OK, status_OK))
break;
- if (!timeo) {
+ /* OK Still waiting */
+ if (time_after(jiffies, timeo)) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
return -ETIME;
}
- /* OK Still waiting. Drop the lock, wait a while and retry. */
+ /* Latency issues. Drop the lock, wait a while and retry */
+ z++;
spin_unlock(chip->mutex);
- if (sleep_time >= 1000000/HZ) {
- /*
- * Half of the normal delay still remaining
- * can be performed with a sleeping delay instead
- * of busy waiting.
- */
- msleep(sleep_time/1000);
- timeo -= sleep_time;
- sleep_time = 1000000/HZ;
- } else {
- udelay(1);
- cond_resched();
- timeo--;
- }
+ cfi_udelay(1);
spin_lock(chip->mutex);
-
- while (chip->state != chip_state) {
- /* Someone's suspended the operation: sleep */
- DECLARE_WAITQUEUE(wait, current);
- set_current_state(TASK_UNINTERRUPTIBLE);
- add_wait_queue(&chip->wq, &wait);
- spin_unlock(chip->mutex);
- schedule();
- remove_wait_queue(&chip->wq, &wait);
- spin_lock(chip->mutex);
- }
}
+ if (!z) {
+ if (!--(*chip_op_time))
+ *chip_op_time = 1;
+ } else if (z > 1)
+ ++(*chip_op_time);
+
/* Done and happy. */
chip->state = FL_STATUS;
return 0;
@@ -1132,7 +1132,8 @@
#endif
#define WAIT_TIMEOUT(map, chip, adr, udelay) \
- INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, udelay);
+ ({ int __udelay = (udelay); \
+ INVAL_CACHE_AND_WAIT(map, chip, adr, 0, 0, &__udelay); })
static int do_point_onechip (struct map_info *map, struct flchip *chip, loff_t adr, size_t len)
@@ -1356,7 +1357,7 @@
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, map_bankwidth(map),
- chip->word_write_time);
+ &chip->word_write_time);
if (ret) {
xip_enable(map, chip, adr);
printk(KERN_ERR "%s: word write error (status timeout)\n", map->name);
@@ -1593,7 +1594,7 @@
ret = INVAL_CACHE_AND_WAIT(map, chip, cmd_adr,
adr, len,
- chip->buffer_write_time);
+ &chip->buffer_write_time);
if (ret) {
map_write(map, CMD(0x70), cmd_adr);
chip->state = FL_STATUS;
@@ -1728,7 +1729,7 @@
ret = INVAL_CACHE_AND_WAIT(map, chip, adr,
adr, len,
- chip->erase_time);
+ &chip->erase_time);
if (ret) {
map_write(map, CMD(0x70), adr);
chip->state = FL_STATUS;

View file

@ -1,19 +0,0 @@
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -4,4 +4,3 @@
obj-$(CONFIG_SQUASHFS) += squashfs.o
squashfs-y += inode.o
-squashfs-y += squashfs2_0.o
--- a/fs/squashfs/squashfs.h
+++ b/fs/squashfs/squashfs.h
@@ -24,6 +24,9 @@
#ifdef CONFIG_SQUASHFS_1_0_COMPATIBILITY
#undef CONFIG_SQUASHFS_1_0_COMPATIBILITY
#endif
+#ifdef CONFIG_SQUASHFS_2_0_COMPATIBILITY
+#undef CONFIG_SQUASHFS_2_0_COMPATIBILITY
+#endif
#ifdef SQUASHFS_TRACE
#define TRACE(s, args...) printk(KERN_NOTICE "SQUASHFS: "s, ## args)

View file

@ -1,19 +0,0 @@
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -129,11 +129,15 @@
#endif
.endm
+
+ j kernel_entry
+ nop
+
/*
* Reserved space for exception handlers.
* Necessary for machines which link their kernels at KSEG0.
*/
- .fill 0x400
+ .align 10
EXPORT(stext) # used for profiling
EXPORT(_stext)

View file

@ -1,18 +0,0 @@
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -887,7 +887,6 @@
case CPU_R10000:
case CPU_R12000:
case CPU_R14000:
- case CPU_4KC:
case CPU_SB1:
case CPU_SB1A:
case CPU_4KSC:
@@ -915,6 +914,7 @@
tlbw(p);
break;
+ case CPU_4KC:
case CPU_4KEC:
case CPU_24K:
case CPU_34K:

View file

@ -1,32 +0,0 @@
--- a/arch/mips/defconfig
+++ b/arch/mips/defconfig
@@ -69,6 +69,7 @@
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_TIME=y
+CONFIG_GENERIC_GPIO=n
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
CONFIG_ARC=y
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -704,6 +704,10 @@
bool
default y
+config GENERIC_GPIO
+ bool
+ default n
+
config SCHED_NO_NO_OMIT_FRAME_POINTER
bool
default y
--- /dev/null
+++ b/include/asm-mips/gpio.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_MIPS_GPIO_H
+#define _ASM_MIPS_GPIO_H
+
+#include <gpio.h>
+
+#endif /* _ASM_MIPS_GPIO_H */

File diff suppressed because it is too large Load diff

View file

@ -1,19 +0,0 @@
From: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
---
arch/i386/kernel/cpu/Makefile | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
--- a/arch/i386/kernel/cpu/Makefile
+++ b/arch/i386/kernel/cpu/Makefile
@@ -8,7 +8,7 @@
obj-y += cyrix.o
obj-y += centaur.o
obj-y += transmeta.o
-obj-y += intel.o intel_cacheinfo.o
+obj-y += intel.o intel_cacheinfo.o addon_cpuid_features.o
obj-y += rise.o
obj-y += nexgen.o
obj-y += umc.o

View file

@ -1,112 +0,0 @@
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/init.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
#include <linux/buffer_head.h>
#include <linux/mutex.h>
#include <linux/mount.h>
@@ -237,10 +238,11 @@
/* FIXME: ensure that mtd->size % erase_size == 0 */
-static struct block2mtd_dev *add_device(char *devname, int erase_size)
+static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
{
struct block_device *bdev;
struct block2mtd_dev *dev;
+ struct mtd_partition *part;
if (!devname)
return NULL;
@@ -279,14 +281,18 @@
/* Setup the MTD structure */
/* make the name contain the block device in */
- dev->mtd.name = kmalloc(sizeof("block2mtd: ") + strlen(devname),
- GFP_KERNEL);
+
+ if (!mtdname)
+ mtdname = devname;
+
+ dev->mtd.name = kmalloc(strlen(mtdname) + 1, GFP_KERNEL);
+
if (!dev->mtd.name)
goto devinit_err;
+
+ strcpy(dev->mtd.name, mtdname);
- sprintf(dev->mtd.name, "block2mtd: %s", devname);
-
- dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK;
+ dev->mtd.size = dev->blkdev->bd_inode->i_size & PAGE_MASK & ~(erase_size - 1);
dev->mtd.erasesize = erase_size;
dev->mtd.writesize = 1;
dev->mtd.type = MTD_RAM;
@@ -298,15 +304,18 @@
dev->mtd.read = block2mtd_read;
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
-
- if (add_mtd_device(&dev->mtd)) {
+
+ part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
+ part->name = dev->mtd.name;
+ part->offset = 0;
+ part->size = dev->mtd.size;
+ if (add_mtd_partitions(&dev->mtd, part, 1)) {
/* Device didnt get added, so free the entry */
goto devinit_err;
}
list_add(&dev->list, &blkmtd_device_list);
INFO("mtd%d: [%s] erase_size = %dKiB [%d]", dev->mtd.index,
- dev->mtd.name + strlen("blkmtd: "),
- dev->mtd.erasesize >> 10, dev->mtd.erasesize);
+ mtdname, dev->mtd.erasesize >> 10, dev->mtd.erasesize);
return dev;
devinit_err:
@@ -379,9 +388,9 @@
static int block2mtd_setup2(const char *val)
{
- char buf[80 + 12]; /* 80 for device, 12 for erase size */
+ char buf[80 + 12 + 80]; /* 80 for device, 12 for erase size, 80 for name */
char *str = buf;
- char *token[2];
+ char *token[3];
char *name;
size_t erase_size = PAGE_SIZE;
int i, ret;
@@ -392,7 +401,7 @@
strcpy(str, val);
kill_final_newline(str);
- for (i = 0; i < 2; i++)
+ for (i = 0; i < 3; i++)
token[i] = strsep(&str, ",");
if (str)
@@ -412,8 +421,10 @@
parse_err("illegal erase size");
}
}
+ if (token[2] && (strlen(token[2]) + 1 > 80))
+ parse_err("mtd device name too long");
- add_device(name, erase_size);
+ add_device(name, erase_size, token[2]);
return 0;
}
@@ -447,7 +458,7 @@
module_param_call(block2mtd, block2mtd_setup, NULL, NULL, 0200);
-MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>]\"");
+MODULE_PARM_DESC(block2mtd, "Device to use. \"block2mtd=<dev>[,<erasesize>[,<name>]]\"");
static int __init block2mtd_init(void)
{

View file

@ -1,944 +0,0 @@
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -47,6 +47,16 @@
devices. Partitioning on NFTL 'devices' is a different - that's the
'normal' form of partitioning used on a block device.
+config MTD_ROOTFS_ROOT_DEV
+ bool "Automatically set 'rootfs' partition to be root filesystem"
+ depends on MTD_PARTITIONS
+ default y
+
+config MTD_ROOTFS_SPLIT
+ bool "Automatically split 'rootfs' partition for squashfs"
+ depends on MTD_PARTITIONS
+ default y
+
config MTD_REDBOOT_PARTS
tristate "RedBoot partition table parsing"
depends on MTD_PARTITIONS
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -20,6 +20,8 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/compatmac.h>
+#include <linux/squashfs_fs.h>
+#include <linux/root_dev.h>
/* Our partition linked list */
static LIST_HEAD(mtd_partitions);
@@ -39,7 +41,7 @@
* the pointer to that structure with this macro.
*/
#define PART(x) ((struct mtd_part *)(x))
-
+#define IS_PART(mtd) (mtd->read == part_read)
/*
* MTD methods which simply translate the effective address and pass through
@@ -308,6 +310,312 @@
return 0;
}
+static u_int32_t cur_offset = 0;
+static int add_one_partition(struct mtd_info *master, const struct mtd_partition *part,
+ int i, struct mtd_part **slp)
+{
+ struct mtd_part *slave;
+
+ /* allocate the partition structure */
+ slave = kzalloc (sizeof(*slave), GFP_KERNEL);
+ if (!slave) {
+ printk ("memory allocation error while creating partitions for \"%s\"\n",
+ master->name);
+ del_mtd_partitions(master);
+ return -ENOMEM;
+ }
+ list_add(&slave->list, &mtd_partitions);
+
+ /* set up the MTD object for this partition */
+ slave->mtd.type = master->type;
+ slave->mtd.flags = master->flags & ~part->mask_flags;
+ slave->mtd.size = part->size;
+ slave->mtd.writesize = master->writesize;
+ slave->mtd.oobsize = master->oobsize;
+ slave->mtd.oobavail = master->oobavail;
+ slave->mtd.subpage_sft = master->subpage_sft;
+
+ slave->mtd.name = part->name;
+ slave->mtd.owner = master->owner;
+
+ slave->mtd.read = part_read;
+ slave->mtd.write = part_write;
+ slave->mtd.refresh_device = part->refresh_partition;
+
+ if(master->point && master->unpoint){
+ slave->mtd.point = part_point;
+ slave->mtd.unpoint = part_unpoint;
+ }
+
+ if (master->read_oob)
+ slave->mtd.read_oob = part_read_oob;
+ if (master->write_oob)
+ slave->mtd.write_oob = part_write_oob;
+ if(master->read_user_prot_reg)
+ slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
+ if(master->read_fact_prot_reg)
+ slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
+ if(master->write_user_prot_reg)
+ slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
+ if(master->lock_user_prot_reg)
+ slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
+ if(master->get_user_prot_info)
+ slave->mtd.get_user_prot_info = part_get_user_prot_info;
+ if(master->get_fact_prot_info)
+ slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
+ if (master->sync)
+ slave->mtd.sync = part_sync;
+ if (!i && master->suspend && master->resume) {
+ slave->mtd.suspend = part_suspend;
+ slave->mtd.resume = part_resume;
+ }
+ if (master->writev)
+ slave->mtd.writev = part_writev;
+ if (master->lock)
+ slave->mtd.lock = part_lock;
+ if (master->unlock)
+ slave->mtd.unlock = part_unlock;
+ if (master->block_isbad)
+ slave->mtd.block_isbad = part_block_isbad;
+ if (master->block_markbad)
+ slave->mtd.block_markbad = part_block_markbad;
+ slave->mtd.erase = part_erase;
+ slave->master = master;
+ slave->offset = part->offset;
+ slave->index = i;
+
+ if (slave->offset == MTDPART_OFS_APPEND)
+ slave->offset = cur_offset;
+ if (slave->offset == MTDPART_OFS_NXTBLK) {
+ slave->offset = cur_offset;
+ if ((cur_offset % master->erasesize) != 0) {
+ /* Round up to next erasesize */
+ slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
+ printk(KERN_NOTICE "Moving partition %d: "
+ "0x%08x -> 0x%08x\n", i,
+ cur_offset, slave->offset);
+ }
+ }
+ if (slave->mtd.size == MTDPART_SIZ_FULL)
+ slave->mtd.size = master->size - slave->offset;
+ cur_offset = slave->offset + slave->mtd.size;
+
+ printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
+ slave->offset + slave->mtd.size, slave->mtd.name);
+
+ /* let's do some sanity checks */
+ if (slave->offset >= master->size) {
+ /* let's register it anyway to preserve ordering */
+ slave->offset = 0;
+ slave->mtd.size = 0;
+ printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
+ part->name);
+ }
+ if (slave->offset + slave->mtd.size > master->size) {
+ slave->mtd.size = master->size - slave->offset;
+ printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
+ part->name, master->name, slave->mtd.size);
+ }
+ if (master->numeraseregions>1) {
+ /* Deal with variable erase size stuff */
+ int i;
+ struct mtd_erase_region_info *regions = master->eraseregions;
+
+ /* Find the first erase regions which is part of this partition. */
+ for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
+ ;
+
+ for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
+ if (slave->mtd.erasesize < regions[i].erasesize) {
+ slave->mtd.erasesize = regions[i].erasesize;
+ }
+ }
+ } else {
+ /* Single erase size */
+ slave->mtd.erasesize = master->erasesize;
+ }
+
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
+ (slave->offset % slave->mtd.erasesize)) {
+ /* Doesn't start on a boundary of major erase size */
+ /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
+ part->name);
+ }
+ if ((slave->mtd.flags & MTD_WRITEABLE) &&
+ (slave->mtd.size % slave->mtd.erasesize)) {
+ slave->mtd.flags &= ~MTD_WRITEABLE;
+ printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
+ part->name);
+ }
+
+ slave->mtd.ecclayout = master->ecclayout;
+ if (master->block_isbad) {
+ uint32_t offs = 0;
+
+ while(offs < slave->mtd.size) {
+ if (master->block_isbad(master,
+ offs + slave->offset))
+ slave->mtd.ecc_stats.badblocks++;
+ offs += slave->mtd.erasesize;
+ }
+ }
+
+ if(part->mtdp)
+ { /* store the object pointer (caller may or may not register it */
+ *part->mtdp = &slave->mtd;
+ slave->registered = 0;
+ }
+ else
+ {
+ /* register our partition */
+ add_mtd_device(&slave->mtd);
+ slave->registered = 1;
+ }
+
+ if (slp)
+ *slp = slave;
+
+ return 0;
+}
+
+#ifdef CONFIG_MTD_ROOTFS_SPLIT
+#define ROOTFS_SPLIT_NAME "rootfs_data"
+#define ROOTFS_REMOVED_NAME "<removed>"
+static int split_squashfs(struct mtd_info *master, int offset, int *split_offset)
+{
+ char buf[512];
+ struct squashfs_super_block *sb = (struct squashfs_super_block *) buf;
+ int len, ret;
+
+ ret = master->read(master, offset, sizeof(*sb), &len, buf);
+ if (ret || (len != sizeof(*sb))) {
+ printk(KERN_ALERT "split_squashfs: error occured while reading "
+ "from \"%s\"\n", master->name);
+ return -EINVAL;
+ }
+
+ if (*((u32 *) buf) != SQUASHFS_MAGIC) {
+ printk(KERN_ALERT "split_squasfs: no squashfs found in \"%s\"\n",
+ master->name);
+ *split_offset = 0;
+ return 0;
+ }
+
+ if (sb->bytes_used <= 0) {
+ printk(KERN_ALERT "split_squashfs: squashfs is empty in \"%s\"\n",
+ master->name);
+ *split_offset = 0;
+ return 0;
+ }
+
+ len = (u32) sb->bytes_used;
+ len += (offset & 0x000fffff);
+ len += (master->erasesize - 1);
+ len &= ~(master->erasesize - 1);
+ len -= (offset & 0x000fffff);
+ *split_offset = offset + len;
+
+ return 0;
+}
+
+static int split_rootfs_data(struct mtd_info *master, struct mtd_info *rpart, struct mtd_partition *part,
+ int index)
+{
+ struct mtd_partition *dpart;
+ struct mtd_part *slave = NULL;
+ int split_offset = 0;
+ int ret;
+
+ ret = split_squashfs(master, part->offset, &split_offset);
+ if (ret)
+ return ret;
+
+ if (split_offset <= 0)
+ return 0;
+
+ dpart = kmalloc(sizeof(*part)+sizeof(ROOTFS_SPLIT_NAME)+1, GFP_KERNEL);
+ if (dpart == NULL) {
+ printk(KERN_INFO "split_squashfs: no memory for partition \"%s\"\n",
+ ROOTFS_SPLIT_NAME);
+ return -ENOMEM;
+ }
+
+ memcpy(dpart, part, sizeof(*part));
+ dpart->name = (unsigned char *)&dpart[1];
+ strcpy(dpart->name, ROOTFS_SPLIT_NAME);
+
+ dpart->size -= split_offset - dpart->offset;
+ dpart->offset = split_offset;
+
+ if (dpart == NULL)
+ return 1;
+
+ printk(KERN_INFO "mtd: partition \"%s\" created automatically, ofs=%X, len=%X \n",
+ ROOTFS_SPLIT_NAME, dpart->offset, dpart->size);
+
+ ret = add_one_partition(master, dpart, index, &slave);
+ if (ret)
+ kfree(dpart);
+ else if (slave)
+ rpart->split = &slave->mtd;
+
+ return ret;
+}
+
+static int refresh_rootfs_split(struct mtd_info *mtd)
+{
+ struct mtd_partition tpart;
+ struct mtd_part *part;
+ int index = 0;
+ int offset, size;
+ int ret;
+
+ part = PART(mtd);
+
+ /* check for the new squashfs offset first */
+ ret = split_squashfs(part->master, part->offset, &offset);
+ if (ret)
+ return ret;
+
+ if ((offset > 0) && !mtd->split) {
+ printk(KERN_INFO "%s: creating new split partition for \"%s\"\n", __func__, mtd->name);
+ /* if we don't have a rootfs split partition, create a new one */
+ tpart.name = mtd->name;
+ tpart.size = mtd->size;
+ tpart.offset = part->offset;
+
+ /* find the index of the last partition */
+ if (!list_empty(&mtd_partitions))
+ index = list_first_entry(&mtd_partitions, struct mtd_part, list)->index + 1;
+
+ return split_rootfs_data(part->master, &part->mtd, &tpart, index);
+ } else if ((offset > 0) && mtd->split) {
+ /* update the offsets of the existing partition */
+ size = mtd->size + part->offset - offset;
+
+ part = PART(mtd->split);
+ part->offset = offset;
+ part->mtd.size = size;
+ printk(KERN_INFO "%s: %s partition \"" ROOTFS_SPLIT_NAME "\", offset: 0x%06x (0x%06x)\n",
+ __func__, (!strcmp(part->mtd.name, ROOTFS_SPLIT_NAME) ? "updating" : "creating"),
+ part->offset, part->mtd.size);
+ strcpy(part->mtd.name, ROOTFS_SPLIT_NAME);
+ } else if ((offset <= 0) && mtd->split) {
+ printk(KERN_INFO "%s: removing partition \"%s\"\n", __func__, mtd->split->name);
+
+ /* mark existing partition as removed */
+ part = PART(mtd->split);
+ strcpy(part->mtd.name, ROOTFS_REMOVED_NAME);
+ part->offset = 0;
+ part->mtd.size = 0;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_MTD_ROOTFS_SPLIT */
+
/*
* This function, given a master MTD object and a partition table, creates
* and registers slave MTD objects which are bound to the master according to
@@ -320,168 +628,31 @@
int nbparts)
{
struct mtd_part *slave;
- u_int32_t cur_offset = 0;
- int i;
+ struct mtd_partition *part;
+ int i, j, ret = 0;
printk (KERN_NOTICE "Creating %d MTD partitions on \"%s\":\n", nbparts, master->name);
- for (i = 0; i < nbparts; i++) {
-
- /* allocate the partition structure */
- slave = kzalloc (sizeof(*slave), GFP_KERNEL);
- if (!slave) {
- printk ("memory allocation error while creating partitions for \"%s\"\n",
- master->name);
- del_mtd_partitions(master);
- return -ENOMEM;
- }
- list_add(&slave->list, &mtd_partitions);
-
- /* set up the MTD object for this partition */
- slave->mtd.type = master->type;
- slave->mtd.flags = master->flags & ~parts[i].mask_flags;
- slave->mtd.size = parts[i].size;
- slave->mtd.writesize = master->writesize;
- slave->mtd.oobsize = master->oobsize;
- slave->mtd.oobavail = master->oobavail;
- slave->mtd.subpage_sft = master->subpage_sft;
-
- slave->mtd.name = parts[i].name;
- slave->mtd.owner = master->owner;
-
- slave->mtd.read = part_read;
- slave->mtd.write = part_write;
-
- if(master->point && master->unpoint){
- slave->mtd.point = part_point;
- slave->mtd.unpoint = part_unpoint;
- }
-
- if (master->read_oob)
- slave->mtd.read_oob = part_read_oob;
- if (master->write_oob)
- slave->mtd.write_oob = part_write_oob;
- if(master->read_user_prot_reg)
- slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
- if(master->read_fact_prot_reg)
- slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
- if(master->write_user_prot_reg)
- slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
- if(master->lock_user_prot_reg)
- slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
- if(master->get_user_prot_info)
- slave->mtd.get_user_prot_info = part_get_user_prot_info;
- if(master->get_fact_prot_info)
- slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
- if (master->sync)
- slave->mtd.sync = part_sync;
- if (!i && master->suspend && master->resume) {
- slave->mtd.suspend = part_suspend;
- slave->mtd.resume = part_resume;
- }
- if (master->writev)
- slave->mtd.writev = part_writev;
- if (master->lock)
- slave->mtd.lock = part_lock;
- if (master->unlock)
- slave->mtd.unlock = part_unlock;
- if (master->block_isbad)
- slave->mtd.block_isbad = part_block_isbad;
- if (master->block_markbad)
- slave->mtd.block_markbad = part_block_markbad;
- slave->mtd.erase = part_erase;
- slave->master = master;
- slave->offset = parts[i].offset;
- slave->index = i;
-
- if (slave->offset == MTDPART_OFS_APPEND)
- slave->offset = cur_offset;
- if (slave->offset == MTDPART_OFS_NXTBLK) {
- slave->offset = cur_offset;
- if ((cur_offset % master->erasesize) != 0) {
- /* Round up to next erasesize */
- slave->offset = ((cur_offset / master->erasesize) + 1) * master->erasesize;
- printk(KERN_NOTICE "Moving partition %d: "
- "0x%08x -> 0x%08x\n", i,
- cur_offset, slave->offset);
- }
- }
- if (slave->mtd.size == MTDPART_SIZ_FULL)
- slave->mtd.size = master->size - slave->offset;
- cur_offset = slave->offset + slave->mtd.size;
-
- printk (KERN_NOTICE "0x%08x-0x%08x : \"%s\"\n", slave->offset,
- slave->offset + slave->mtd.size, slave->mtd.name);
-
- /* let's do some sanity checks */
- if (slave->offset >= master->size) {
- /* let's register it anyway to preserve ordering */
- slave->offset = 0;
- slave->mtd.size = 0;
- printk ("mtd: partition \"%s\" is out of reach -- disabled\n",
- parts[i].name);
- }
- if (slave->offset + slave->mtd.size > master->size) {
- slave->mtd.size = master->size - slave->offset;
- printk ("mtd: partition \"%s\" extends beyond the end of device \"%s\" -- size truncated to %#x\n",
- parts[i].name, master->name, slave->mtd.size);
- }
- if (master->numeraseregions>1) {
- /* Deal with variable erase size stuff */
- int i;
- struct mtd_erase_region_info *regions = master->eraseregions;
-
- /* Find the first erase regions which is part of this partition. */
- for (i=0; i < master->numeraseregions && slave->offset >= regions[i].offset; i++)
- ;
-
- for (i--; i < master->numeraseregions && slave->offset + slave->mtd.size > regions[i].offset; i++) {
- if (slave->mtd.erasesize < regions[i].erasesize) {
- slave->mtd.erasesize = regions[i].erasesize;
- }
+ for (i = 0, j = 0; i < nbparts; i++) {
+ part = (struct mtd_partition *) &parts[i];
+ ret = add_one_partition(master, part, j, &slave);
+ if (ret)
+ return ret;
+ j++;
+
+ if (strcmp(part->name, "rootfs") == 0 && slave->registered) {
+#ifdef CONFIG_MTD_ROOTFS_ROOT_DEV
+ if (ROOT_DEV == 0) {
+ printk(KERN_NOTICE "mtd: partition \"rootfs\" "
+ "set to be root filesystem\n");
+ ROOT_DEV = MKDEV(MTD_BLOCK_MAJOR, slave->mtd.index);
}
- } else {
- /* Single erase size */
- slave->mtd.erasesize = master->erasesize;
- }
-
- if ((slave->mtd.flags & MTD_WRITEABLE) &&
- (slave->offset % slave->mtd.erasesize)) {
- /* Doesn't start on a boundary of major erase size */
- /* FIXME: Let it be writable if it is on a boundary of _minor_ erase size though */
- slave->mtd.flags &= ~MTD_WRITEABLE;
- printk ("mtd: partition \"%s\" doesn't start on an erase block boundary -- force read-only\n",
- parts[i].name);
- }
- if ((slave->mtd.flags & MTD_WRITEABLE) &&
- (slave->mtd.size % slave->mtd.erasesize)) {
- slave->mtd.flags &= ~MTD_WRITEABLE;
- printk ("mtd: partition \"%s\" doesn't end on an erase block -- force read-only\n",
- parts[i].name);
- }
-
- slave->mtd.ecclayout = master->ecclayout;
- if (master->block_isbad) {
- uint32_t offs = 0;
-
- while(offs < slave->mtd.size) {
- if (master->block_isbad(master,
- offs + slave->offset))
- slave->mtd.ecc_stats.badblocks++;
- offs += slave->mtd.erasesize;
- }
- }
-
- if(parts[i].mtdp)
- { /* store the object pointer (caller may or may not register it */
- *parts[i].mtdp = &slave->mtd;
- slave->registered = 0;
- }
- else
- {
- /* register our partition */
- add_mtd_device(&slave->mtd);
- slave->registered = 1;
+#endif
+#ifdef CONFIG_MTD_ROOTFS_SPLIT
+ ret = split_rootfs_data(master, &slave->mtd, part, j);
+ if (ret == 0)
+ j++;
+#endif
}
}
@@ -557,6 +728,32 @@
return ret;
}
+int refresh_mtd_partitions(struct mtd_info *mtd)
+{
+ int ret = 0;
+
+ if (IS_PART(mtd)) {
+ struct mtd_part *part;
+ struct mtd_info *master;
+
+ part = PART(mtd);
+ master = part->master;
+ if (master->refresh_device)
+ ret = master->refresh_device(master);
+ }
+
+ if (!ret && mtd->refresh_device)
+ ret = mtd->refresh_device(mtd);
+
+#ifdef CONFIG_MTD_ROOTFS_SPLIT
+ if (!ret && IS_PART(mtd) && !strcmp(mtd->name, "rootfs"))
+ refresh_rootfs_split(mtd);
+#endif
+
+ return 0;
+}
+
EXPORT_SYMBOL_GPL(parse_mtd_partitions);
+EXPORT_SYMBOL_GPL(refresh_mtd_partitions);
EXPORT_SYMBOL_GPL(register_mtd_parser);
EXPORT_SYMBOL_GPL(deregister_mtd_parser);
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -34,6 +34,8 @@
struct block_device *blkdev;
struct mtd_info mtd;
struct mutex write_mutex;
+ rwlock_t bdev_mutex;
+ char devname[0];
};
@@ -86,6 +88,12 @@
size_t len = instr->len;
int err;
+ read_lock(&dev->bdev_mutex);
+ if (!dev->blkdev) {
+ err = -EINVAL;
+ goto done;
+ }
+
instr->state = MTD_ERASING;
mutex_lock(&dev->write_mutex);
err = _block2mtd_erase(dev, from, len);
@@ -98,6 +106,10 @@
instr->state = MTD_ERASE_DONE;
mtd_erase_callback(instr);
+
+done:
+ read_unlock(&dev->bdev_mutex);
+
return err;
}
@@ -109,10 +121,14 @@
struct page *page;
int index = from >> PAGE_SHIFT;
int offset = from & (PAGE_SIZE-1);
- int cpylen;
+ int cpylen, err = 0;
+
+ read_lock(&dev->bdev_mutex);
+ if (!dev->blkdev || (from > mtd->size)) {
+ err = -EINVAL;
+ goto done;
+ }
- if (from > mtd->size)
- return -EINVAL;
if (from + len > mtd->size)
len = mtd->size - from;
@@ -127,10 +143,14 @@
len = len - cpylen;
page = page_read(dev->blkdev->bd_inode->i_mapping, index);
- if (!page)
- return -ENOMEM;
- if (IS_ERR(page))
- return PTR_ERR(page);
+ if (!page) {
+ err = -ENOMEM;
+ goto done;
+ }
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto done;
+ }
memcpy(buf, page_address(page) + offset, cpylen);
page_cache_release(page);
@@ -141,7 +161,10 @@
offset = 0;
index++;
}
- return 0;
+
+done:
+ read_unlock(&dev->bdev_mutex);
+ return err;
}
@@ -193,12 +216,22 @@
size_t *retlen, const u_char *buf)
{
struct block2mtd_dev *dev = mtd->priv;
- int err;
+ int err = 0;
+
+ read_lock(&dev->bdev_mutex);
+ if (!dev->blkdev) {
+ err = -EINVAL;
+ goto done;
+ }
if (!len)
- return 0;
- if (to >= mtd->size)
- return -ENOSPC;
+ goto done;
+
+ if (to >= mtd->size) {
+ err = -ENOSPC;
+ goto done;
+ }
+
if (to + len > mtd->size)
len = mtd->size - to;
@@ -207,6 +240,9 @@
mutex_unlock(&dev->write_mutex);
if (err > 0)
err = 0;
+
+done:
+ read_unlock(&dev->bdev_mutex);
return err;
}
@@ -215,51 +251,29 @@
static void block2mtd_sync(struct mtd_info *mtd)
{
struct block2mtd_dev *dev = mtd->priv;
- sync_blockdev(dev->blkdev);
- return;
-}
-
-
-static void block2mtd_free_device(struct block2mtd_dev *dev)
-{
- if (!dev)
- return;
-
- kfree(dev->mtd.name);
- if (dev->blkdev) {
- invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping,
- 0, -1);
- close_bdev_excl(dev->blkdev);
- }
+ read_lock(&dev->bdev_mutex);
+ if (dev->blkdev)
+ sync_blockdev(dev->blkdev);
+ read_unlock(&dev->bdev_mutex);
- kfree(dev);
+ return;
}
-/* FIXME: ensure that mtd->size % erase_size == 0 */
-static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
+static int _open_bdev(struct block2mtd_dev *dev)
{
struct block_device *bdev;
- struct block2mtd_dev *dev;
- struct mtd_partition *part;
-
- if (!devname)
- return NULL;
-
- dev = kzalloc(sizeof(struct block2mtd_dev), GFP_KERNEL);
- if (!dev)
- return NULL;
/* Get a handle on the device */
- bdev = open_bdev_excl(devname, O_RDWR, NULL);
+ bdev = open_bdev_excl(dev->devname, O_RDWR, NULL);
#ifndef MODULE
if (IS_ERR(bdev)) {
/* We might not have rootfs mounted at this point. Try
to resolve the device name by other means. */
- dev_t devt = name_to_dev_t(devname);
+ dev_t devt = name_to_dev_t(dev->devname);
if (devt) {
bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ);
}
@@ -267,17 +281,96 @@
#endif
if (IS_ERR(bdev)) {
- ERROR("error: cannot open device %s", devname);
- goto devinit_err;
+ ERROR("error: cannot open device %s", dev->devname);
+ return 1;
}
dev->blkdev = bdev;
if (MAJOR(bdev->bd_dev) == MTD_BLOCK_MAJOR) {
ERROR("attempting to use an MTD device as a block device");
- goto devinit_err;
+ return 1;
}
+ return 0;
+}
+
+static void _close_bdev(struct block2mtd_dev *dev)
+{
+ struct block_device *bdev;
+
+ if (!dev->blkdev)
+ return;
+
+ bdev = dev->blkdev;
+ invalidate_mapping_pages(dev->blkdev->bd_inode->i_mapping, 0, -1);
+ close_bdev_excl(dev->blkdev);
+ dev->blkdev = NULL;
+}
+
+static void block2mtd_free_device(struct block2mtd_dev *dev)
+{
+ if (!dev)
+ return;
+
+ kfree(dev->mtd.name);
+ _close_bdev(dev);
+ kfree(dev);
+}
+
+
+static int block2mtd_refresh(struct mtd_info *mtd)
+{
+ struct block2mtd_dev *dev = mtd->priv;
+ struct block_device *bdev;
+ dev_t devt;
+ int err = 0;
+
+ /* no other mtd function can run at this point */
+ write_lock(&dev->bdev_mutex);
+
+ /* get the device number for the whole disk */
+ devt = MKDEV(MAJOR(dev->blkdev->bd_dev), 0);
+
+ /* close the old block device */
+ _close_bdev(dev);
+
+ /* open the whole disk, issue a partition rescan, then */
+ bdev = open_by_devnum(devt, FMODE_WRITE | FMODE_READ);
+ if (!bdev || !bdev->bd_disk)
+ err = -EINVAL;
+ else {
+ err = rescan_partitions(bdev->bd_disk, bdev);
+ }
+ if (bdev)
+ close_bdev_excl(bdev);
+
+ /* try to open the partition block device again */
+ _open_bdev(dev);
+ write_unlock(&dev->bdev_mutex);
+
+ return err;
+}
+
+/* FIXME: ensure that mtd->size % erase_size == 0 */
+static struct block2mtd_dev *add_device(char *devname, int erase_size, char *mtdname)
+{
+ struct block2mtd_dev *dev;
+ struct mtd_partition *part;
+
+ if (!devname)
+ return NULL;
+
+ dev = kzalloc(sizeof(struct block2mtd_dev) + strlen(devname) + 1, GFP_KERNEL);
+ if (!dev)
+ return NULL;
+
+ strcpy(dev->devname, devname);
+
+ if (_open_bdev(dev))
+ goto devinit_err;
+
mutex_init(&dev->write_mutex);
+ rwlock_init(&dev->bdev_mutex);
/* Setup the MTD structure */
/* make the name contain the block device in */
@@ -304,6 +397,7 @@
dev->mtd.read = block2mtd_read;
dev->mtd.priv = dev;
dev->mtd.owner = THIS_MODULE;
+ dev->mtd.refresh_device = block2mtd_refresh;
part = kzalloc(sizeof(struct mtd_partition), GFP_KERNEL);
part->name = dev->mtd.name;
--- a/drivers/mtd/mtdchar.c
+++ b/drivers/mtd/mtdchar.c
@@ -16,6 +16,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/compatmac.h>
+#include <linux/mtd/partitions.h>
#include <asm/uaccess.h>
@@ -752,6 +753,13 @@
file->f_pos = 0;
break;
}
+#ifdef CONFIG_MTD_PARTITIONS
+ case MTDREFRESH:
+ {
+ ret = refresh_mtd_partitions(mtd);
+ break;
+ }
+#endif
default:
ret = -ENOTTY;
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -98,6 +98,7 @@
uint8_t *oobbuf;
};
+struct mtd_info;
struct mtd_info {
u_char type;
u_int32_t flags;
@@ -195,6 +196,9 @@
struct module *owner;
int usecount;
+ int (*refresh_device)(struct mtd_info *mtd);
+ struct mtd_info *split;
+
/* If the driver is something smart, like UBI, it may need to maintain
* its own reference counting. The below functions are only for driver.
* The driver may register its callbacks. These callbacks are not
--- a/include/linux/mtd/partitions.h
+++ b/include/linux/mtd/partitions.h
@@ -36,6 +36,7 @@
* erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
*/
+struct mtd_partition;
struct mtd_partition {
char *name; /* identifier string */
u_int32_t size; /* partition size */
@@ -43,6 +44,7 @@
u_int32_t mask_flags; /* master MTD flags to mask out for this partition */
struct nand_ecclayout *ecclayout; /* out of band layout for this partition (NAND only)*/
struct mtd_info **mtdp; /* pointer to store the MTD object */
+ int (*refresh_partition)(struct mtd_info *);
};
#define MTDPART_OFS_NXTBLK (-2)
@@ -52,6 +54,7 @@
int add_mtd_partitions(struct mtd_info *, const struct mtd_partition *, int);
int del_mtd_partitions(struct mtd_info *);
+int refresh_mtd_partitions(struct mtd_info *);
/*
* Functions dealing with the various ways of partitioning the space
--- a/include/mtd/mtd-abi.h
+++ b/include/mtd/mtd-abi.h
@@ -95,6 +95,7 @@
#define ECCGETLAYOUT _IOR('M', 17, struct nand_ecclayout)
#define ECCGETSTATS _IOR('M', 18, struct mtd_ecc_stats)
#define MTDFILEMODE _IO('M', 19)
+#define MTDREFRESH _IO('M', 23)
/*
* Obsolete legacy interface. Keep it in order not to break userspace

View file

@ -1,109 +0,0 @@
--- a/include/linux/netfilter/xt_layer7.h
+++ b/include/linux/netfilter/xt_layer7.h
@@ -8,6 +8,7 @@
char protocol[MAX_PROTOCOL_LEN];
char pattern[MAX_PATTERN_LEN];
u_int8_t invert;
+ u_int8_t pkt;
};
#endif /* _XT_LAYER7_H */
--- a/net/netfilter/xt_layer7.c
+++ b/net/netfilter/xt_layer7.c
@@ -297,34 +297,36 @@
}
/* add the new app data to the conntrack. Return number of bytes added. */
-static int add_data(struct nf_conn * master_conntrack,
- char * app_data, int appdatalen)
+static int add_datastr(char *target, int offset, char *app_data, int len)
{
int length = 0, i;
- int oldlength = master_conntrack->layer7.app_data_len;
-
- /* This is a fix for a race condition by Deti Fliegl. However, I'm not
- clear on whether the race condition exists or whether this really
- fixes it. I might just be being dense... Anyway, if it's not really
- a fix, all it does is waste a very small amount of time. */
- if(!master_conntrack->layer7.app_data) return 0;
+
+ if (!target) return 0;
/* Strip nulls. Make everything lower case (our regex lib doesn't
do case insensitivity). Add it to the end of the current data. */
- for(i = 0; i < maxdatalen-oldlength-1 &&
- i < appdatalen; i++) {
+ for(i = 0; i < maxdatalen-offset-1 && i < len; i++) {
if(app_data[i] != '\0') {
/* the kernel version of tolower mungs 'upper ascii' */
- master_conntrack->layer7.app_data[length+oldlength] =
+ target[length+offset] =
isascii(app_data[i])?
tolower(app_data[i]) : app_data[i];
length++;
}
}
+ target[length+offset] = '\0';
+
+ return length;
+}
- master_conntrack->layer7.app_data[length+oldlength] = '\0';
- master_conntrack->layer7.app_data_len = length + oldlength;
+/* add the new app data to the conntrack. Return number of bytes added. */
+static int add_data(struct nf_conn * master_conntrack,
+ char * app_data, int appdatalen)
+{
+ int length;
+ length = add_datastr(master_conntrack->layer7.app_data, master_conntrack->layer7.app_data_len, app_data, appdatalen);
+ master_conntrack->layer7.app_data_len += length;
return length;
}
@@ -411,7 +413,7 @@
const struct xt_layer7_info * info = matchinfo;
enum ip_conntrack_info master_ctinfo, ctinfo;
struct nf_conn *master_conntrack, *conntrack;
- unsigned char * app_data;
+ unsigned char *app_data, *tmp_data;
unsigned int pattern_result, appdatalen;
regexp * comppattern;
@@ -439,8 +441,8 @@
master_conntrack = master_ct(master_conntrack);
/* if we've classified it or seen too many packets */
- if(TOTAL_PACKETS > num_packets ||
- master_conntrack->layer7.app_proto) {
+ if(!info->pkt && (TOTAL_PACKETS > num_packets ||
+ master_conntrack->layer7.app_proto)) {
pattern_result = match_no_append(conntrack, master_conntrack,
ctinfo, master_ctinfo, info);
@@ -473,6 +475,25 @@
/* the return value gets checked later, when we're ready to use it */
comppattern = compile_and_cache(info->pattern, info->protocol);
+ if (info->pkt) {
+ tmp_data = kmalloc(maxdatalen, GFP_ATOMIC);
+ if(!tmp_data){
+ if (net_ratelimit())
+ printk(KERN_ERR "layer7: out of memory in match, bailing.\n");
+ return info->invert;
+ }
+
+ tmp_data[0] = '\0';
+ add_datastr(tmp_data, 0, app_data, appdatalen);
+ pattern_result = ((comppattern && regexec(comppattern, tmp_data)) ? 1 : 0);
+
+ kfree(tmp_data);
+ tmp_data = NULL;
+ spin_unlock_bh(&l7_lock);
+
+ return (pattern_result ^ info->invert);
+ }
+
/* On the first packet of a connection, allocate space for app data */
if(TOTAL_PACKETS == 1 && !skb->cb[0] &&
!master_conntrack->layer7.app_data){

View file

@ -1,944 +0,0 @@
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_ipp2p.h
@@ -0,0 +1,31 @@
+#ifndef __IPT_IPP2P_H
+#define __IPT_IPP2P_H
+#define IPP2P_VERSION "0.8.1_rc1"
+
+struct ipt_p2p_info {
+ int cmd;
+ int debug;
+};
+
+#endif //__IPT_IPP2P_H
+
+#define SHORT_HAND_IPP2P 1 /* --ipp2p switch*/
+//#define SHORT_HAND_DATA 4 /* --ipp2p-data switch*/
+#define SHORT_HAND_NONE 5 /* no short hand*/
+
+#define IPP2P_EDK (1 << 1)
+#define IPP2P_DATA_KAZAA (1 << 2)
+#define IPP2P_DATA_EDK (1 << 3)
+#define IPP2P_DATA_DC (1 << 4)
+#define IPP2P_DC (1 << 5)
+#define IPP2P_DATA_GNU (1 << 6)
+#define IPP2P_GNU (1 << 7)
+#define IPP2P_KAZAA (1 << 8)
+#define IPP2P_BIT (1 << 9)
+#define IPP2P_APPLE (1 << 10)
+#define IPP2P_SOUL (1 << 11)
+#define IPP2P_WINMX (1 << 12)
+#define IPP2P_ARES (1 << 13)
+#define IPP2P_MUTE (1 << 14)
+#define IPP2P_WASTE (1 << 15)
+#define IPP2P_XDCC (1 << 16)
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_ipp2p.c
@@ -0,0 +1,882 @@
+#if defined(MODVERSIONS)
+#include <linux/modversions.h>
+#endif
+#include <linux/module.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/version.h>
+#include <linux/netfilter_ipv4/ipt_ipp2p.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+
+#define get_u8(X,O) (*(__u8 *)(X + O))
+#define get_u16(X,O) (*(__u16 *)(X + O))
+#define get_u32(X,O) (*(__u32 *)(X + O))
+
+MODULE_AUTHOR("Eicke Friedrich/Klaus Degner <ipp2p@ipp2p.org>");
+MODULE_DESCRIPTION("An extension to iptables to identify P2P traffic.");
+MODULE_LICENSE("GPL");
+
+
+/*Search for UDP eDonkey/eMule/Kad commands*/
+int
+udp_search_edk (unsigned char *haystack, int packet_len)
+{
+ unsigned char *t = haystack;
+ t += 8;
+
+ switch (t[0]) {
+ case 0xe3:
+ { /*edonkey*/
+ switch (t[1])
+ {
+ /* client -> server status request */
+ case 0x96:
+ if (packet_len == 14) return ((IPP2P_EDK * 100) + 50);
+ break;
+ /* server -> client status request */
+ case 0x97: if (packet_len == 42) return ((IPP2P_EDK * 100) + 51);
+ break;
+ /* server description request */
+ /* e3 2a ff f0 .. | size == 6 */
+ case 0xa2: if ( (packet_len == 14) && ( get_u16(t,2) == __constant_htons(0xfff0) ) ) return ((IPP2P_EDK * 100) + 52);
+ break;
+ /* server description response */
+ /* e3 a3 ff f0 .. | size > 40 && size < 200 */
+ //case 0xa3: return ((IPP2P_EDK * 100) + 53);
+ // break;
+ case 0x9a: if (packet_len==26) return ((IPP2P_EDK * 100) + 54);
+ break;
+
+ case 0x92: if (packet_len==18) return ((IPP2P_EDK * 100) + 55);
+ break;
+ }
+ break;
+ }
+ case 0xe4:
+ {
+ switch (t[1])
+ {
+ /* e4 20 .. | size == 43 */
+ case 0x20: if ((packet_len == 43) && (t[2] != 0x00) && (t[34] != 0x00)) return ((IPP2P_EDK * 100) + 60);
+ break;
+ /* e4 00 .. 00 | size == 35 ? */
+ case 0x00: if ((packet_len == 35) && (t[26] == 0x00)) return ((IPP2P_EDK * 100) + 61);
+ break;
+ /* e4 10 .. 00 | size == 35 ? */
+ case 0x10: if ((packet_len == 35) && (t[26] == 0x00)) return ((IPP2P_EDK * 100) + 62);
+ break;
+ /* e4 18 .. 00 | size == 35 ? */
+ case 0x18: if ((packet_len == 35) && (t[26] == 0x00)) return ((IPP2P_EDK * 100) + 63);
+ break;
+ /* e4 52 .. | size = 44 */
+ case 0x52: if (packet_len == 44 ) return ((IPP2P_EDK * 100) + 64);
+ break;
+ /* e4 58 .. | size == 6 */
+ case 0x58: if (packet_len == 14 ) return ((IPP2P_EDK * 100) + 65);
+ break;
+ /* e4 59 .. | size == 2 */
+ case 0x59: if (packet_len == 10 )return ((IPP2P_EDK * 100) + 66);
+ break;
+ /* e4 28 .. | packet_len == 52,77,102,127... */
+ case 0x28: if (((packet_len-52) % 25) == 0) return ((IPP2P_EDK * 100) + 67);
+ break;
+ /* e4 50 xx xx | size == 4 */
+ case 0x50: if (packet_len == 12) return ((IPP2P_EDK * 100) + 68);
+ break;
+ /* e4 40 xx xx | size == 48 */
+ case 0x40: if (packet_len == 56) return ((IPP2P_EDK * 100) + 69);
+ break;
+ }
+ break;
+ }
+ } /* end of switch (t[0]) */
+ return 0;
+}/*udp_search_edk*/
+
+
+/*Search for UDP Gnutella commands*/
+int
+udp_search_gnu (unsigned char *haystack, int packet_len)
+{
+ unsigned char *t = haystack;
+ t += 8;
+
+ if (memcmp(t, "GND", 3) == 0) return ((IPP2P_GNU * 100) + 51);
+ if (memcmp(t, "GNUTELLA ", 9) == 0) return ((IPP2P_GNU * 100) + 52);
+ return 0;
+}/*udp_search_gnu*/
+
+
+/*Search for UDP KaZaA commands*/
+int
+udp_search_kazaa (unsigned char *haystack, int packet_len)
+{
+ unsigned char *t = haystack;
+
+ if (t[packet_len-1] == 0x00){
+ t += (packet_len - 6);
+ if (memcmp(t, "KaZaA", 5) == 0) return (IPP2P_KAZAA * 100 +50);
+ }
+
+ return 0;
+}/*udp_search_kazaa*/
+
+/*Search for UDP DirectConnect commands*/
+int
+udp_search_directconnect (unsigned char *haystack, int packet_len)
+{
+ unsigned char *t = haystack;
+ if ((*(t + 8) == 0x24) && (*(t + packet_len - 1) == 0x7c)) {
+ t+=8;
+ if (memcmp(t, "SR ", 3) == 0) return ((IPP2P_DC * 100) + 60);
+ if (memcmp(t, "Ping ", 5) == 0) return ((IPP2P_DC * 100) + 61);
+ }
+ return 0;
+}/*udp_search_directconnect*/
+
+
+
+/*Search for UDP BitTorrent commands*/
+int
+udp_search_bit (unsigned char *haystack, int packet_len)
+{
+ switch(packet_len)
+ {
+ case 24:
+ /* ^ 00 00 04 17 27 10 19 80 */
+ if ((ntohl(get_u32(haystack, 8)) == 0x00000417) && (ntohl(get_u32(haystack, 12)) == 0x27101980))
+ return (IPP2P_BIT * 100 + 50);
+ break;
+ case 44:
+ if (get_u32(haystack, 16) == __constant_htonl(0x00000400) && get_u32(haystack, 36) == __constant_htonl(0x00000104))
+ return (IPP2P_BIT * 100 + 51);
+ if (get_u32(haystack, 16) == __constant_htonl(0x00000400))
+ return (IPP2P_BIT * 100 + 61);
+ break;
+ case 65:
+ if (get_u32(haystack, 16) == __constant_htonl(0x00000404) && get_u32(haystack, 36) == __constant_htonl(0x00000104))
+ return (IPP2P_BIT * 100 + 52);
+ if (get_u32(haystack, 16) == __constant_htonl(0x00000404))
+ return (IPP2P_BIT * 100 + 62);
+ break;
+ case 67:
+ if (get_u32(haystack, 16) == __constant_htonl(0x00000406) && get_u32(haystack, 36) == __constant_htonl(0x00000104))
+ return (IPP2P_BIT * 100 + 53);
+ if (get_u32(haystack, 16) == __constant_htonl(0x00000406))
+ return (IPP2P_BIT * 100 + 63);
+ break;
+ case 211:
+ if (get_u32(haystack, 8) == __constant_htonl(0x00000405))
+ return (IPP2P_BIT * 100 + 54);
+ break;
+ case 29:
+ if ((get_u32(haystack, 8) == __constant_htonl(0x00000401)))
+ return (IPP2P_BIT * 100 + 55);
+ break;
+ case 52:
+ if (get_u32(haystack,8) == __constant_htonl(0x00000827) &&
+ get_u32(haystack,12) == __constant_htonl(0x37502950))
+ return (IPP2P_BIT * 100 + 80);
+ break;
+ default:
+ /* this packet does not have a constant size */
+ if (packet_len >= 40 && get_u32(haystack, 16) == __constant_htonl(0x00000402) && get_u32(haystack, 36) == __constant_htonl(0x00000104))
+ return (IPP2P_BIT * 100 + 56);
+ break;
+ }
+
+ /* some extra-bitcomet rules:
+ * "d1:" [a|r] "d2:id20:"
+ */
+ if (packet_len > 30 && get_u8(haystack, 8) == 'd' && get_u8(haystack, 9) == '1' && get_u8(haystack, 10) == ':' )
+ {
+ if (get_u8(haystack, 11) == 'a' || get_u8(haystack, 11) == 'r')
+ {
+ if (memcmp(haystack+12,"d2:id20:",8)==0)
+ return (IPP2P_BIT * 100 + 57);
+ }
+ }
+
+#if 0
+ /* bitlord rules */
+ /* packetlen must be bigger than 40 */
+ /* first 4 bytes are zero */
+ if (packet_len > 40 && get_u32(haystack, 8) == 0x00000000)
+ {
+ /* first rule: 00 00 00 00 01 00 00 xx xx xx xx 00 00 00 00*/
+ if (get_u32(haystack, 12) == 0x00000000 &&
+ get_u32(haystack, 16) == 0x00010000 &&
+ get_u32(haystack, 24) == 0x00000000 )
+ return (IPP2P_BIT * 100 + 71);
+
+ /* 00 01 00 00 0d 00 00 xx xx xx xx 00 00 00 00*/
+ if (get_u32(haystack, 12) == 0x00000001 &&
+ get_u32(haystack, 16) == 0x000d0000 &&
+ get_u32(haystack, 24) == 0x00000000 )
+ return (IPP2P_BIT * 100 + 71);
+
+
+ }
+#endif
+
+ return 0;
+}/*udp_search_bit*/
+
+
+
+/*Search for Ares commands*/
+//#define IPP2P_DEBUG_ARES
+int
+search_ares (const unsigned char *payload, const u16 plen)
+//int search_ares (unsigned char *haystack, int packet_len, int head_len)
+{
+// const unsigned char *t = haystack + head_len;
+
+ /* all ares packets start with */
+ if (payload[1] == 0 && (plen - payload[0]) == 3)
+ {
+ switch (payload[2])
+ {
+ case 0x5a:
+ /* ares connect */
+ if ( plen == 6 && payload[5] == 0x05 ) return ((IPP2P_ARES * 100) + 1);
+ break;
+ case 0x09:
+ /* ares search, min 3 chars --> 14 bytes
+ * lets define a search can be up to 30 chars --> max 34 bytes
+ */
+ if ( plen >= 14 && plen <= 34 ) return ((IPP2P_ARES * 100) + 1);
+ break;
+#ifdef IPP2P_DEBUG_ARES
+ default:
+ printk(KERN_DEBUG "Unknown Ares command %x recognized, len: %u \n", (unsigned int) payload[2],plen);
+#endif /* IPP2P_DEBUG_ARES */
+ }
+ }
+
+#if 0
+ /* found connect packet: 03 00 5a 04 03 05 */
+ /* new version ares 1.8: 03 00 5a xx xx 05 */
+ if ((plen) == 6){ /* possible connect command*/
+ if ((payload[0] == 0x03) && (payload[1] == 0x00) && (payload[2] == 0x5a) && (payload[5] == 0x05))
+ return ((IPP2P_ARES * 100) + 1);
+ }
+ if ((plen) == 60){ /* possible download command*/
+ if ((payload[59] == 0x0a) && (payload[58] == 0x0a)){
+ if (memcmp(t, "PUSH SHA1:", 10) == 0) /* found download command */
+ return ((IPP2P_ARES * 100) + 2);
+ }
+ }
+#endif
+
+ return 0;
+} /*search_ares*/
+
+/*Search for SoulSeek commands*/
+int
+search_soul (const unsigned char *payload, const u16 plen)
+{
+//#define IPP2P_DEBUG_SOUL
+ /* match: xx xx xx xx | xx = sizeof(payload) - 4 */
+ if (get_u32(payload, 0) == (plen - 4)){
+ const __u32 m=get_u32(payload, 4);
+ /* match 00 yy yy 00, yy can be everything */
+ if ( get_u8(payload, 4) == 0x00 && get_u8(payload, 7) == 0x00 )
+ {
+#ifdef IPP2P_DEBUG_SOUL
+ printk(KERN_DEBUG "0: Soulseek command 0x%x recognized\n",get_u32(payload, 4));
+#endif /* IPP2P_DEBUG_SOUL */
+ return ((IPP2P_SOUL * 100) + 1);
+ }
+
+ /* next match: 01 yy 00 00 | yy can be everything */
+ if ( get_u8(payload, 4) == 0x01 && get_u16(payload, 6) == 0x0000 )
+ {
+#ifdef IPP2P_DEBUG_SOUL
+ printk(KERN_DEBUG "1: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
+#endif /* IPP2P_DEBUG_SOUL */
+ return ((IPP2P_SOUL * 100) + 2);
+ }
+
+ /* other soulseek commandos are: 1-5,7,9,13-18,22,23,26,28,35-37,40-46,50,51,60,62-69,91,92,1001 */
+ /* try to do this in an intelligent way */
+ /* get all small commandos */
+ switch(m)
+ {
+ case 7:
+ case 9:
+ case 22:
+ case 23:
+ case 26:
+ case 28:
+ case 50:
+ case 51:
+ case 60:
+ case 91:
+ case 92:
+ case 1001:
+#ifdef IPP2P_DEBUG_SOUL
+ printk(KERN_DEBUG "2: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
+#endif /* IPP2P_DEBUG_SOUL */
+ return ((IPP2P_SOUL * 100) + 3);
+ }
+
+ if (m > 0 && m < 6 )
+ {
+#ifdef IPP2P_DEBUG_SOUL
+ printk(KERN_DEBUG "3: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
+#endif /* IPP2P_DEBUG_SOUL */
+ return ((IPP2P_SOUL * 100) + 4);
+ }
+ if (m > 12 && m < 19 )
+ {
+#ifdef IPP2P_DEBUG_SOUL
+ printk(KERN_DEBUG "4: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
+#endif /* IPP2P_DEBUG_SOUL */
+ return ((IPP2P_SOUL * 100) + 5);
+ }
+
+ if (m > 34 && m < 38 )
+ {
+#ifdef IPP2P_DEBUG_SOUL
+ printk(KERN_DEBUG "5: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
+#endif /* IPP2P_DEBUG_SOUL */
+ return ((IPP2P_SOUL * 100) + 6);
+ }
+
+ if (m > 39 && m < 47 )
+ {
+#ifdef IPP2P_DEBUG_SOUL
+ printk(KERN_DEBUG "6: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
+#endif /* IPP2P_DEBUG_SOUL */
+ return ((IPP2P_SOUL * 100) + 7);
+ }
+
+ if (m > 61 && m < 70 )
+ {
+#ifdef IPP2P_DEBUG_SOUL
+ printk(KERN_DEBUG "7: Soulseek command 0x%x recognized\n",get_u16(payload, 4));
+#endif /* IPP2P_DEBUG_SOUL */
+ return ((IPP2P_SOUL * 100) + 8);
+ }
+
+#ifdef IPP2P_DEBUG_SOUL
+ printk(KERN_DEBUG "unknown SOULSEEK command: 0x%x, first 16 bit: 0x%x, first 8 bit: 0x%x ,soulseek ???\n",get_u32(payload, 4),get_u16(payload, 4) >> 16,get_u8(payload, 4) >> 24);
+#endif /* IPP2P_DEBUG_SOUL */
+ }
+
+ /* match 14 00 00 00 01 yy 00 00 00 STRING(YY) 01 00 00 00 00 46|50 00 00 00 00 */
+ /* without size at the beginning !!! */
+ if ( get_u32(payload, 0) == 0x14 && get_u8(payload, 4) == 0x01 )
+ {
+ __u32 y=get_u32(payload, 5);
+ /* we need 19 chars + string */
+ if ( (y + 19) <= (plen) )
+ {
+ const unsigned char *w=payload+9+y;
+ if (get_u32(w, 0) == 0x01 && ( get_u16(w, 4) == 0x4600 || get_u16(w, 4) == 0x5000) && get_u32(w, 6) == 0x00);
+#ifdef IPP2P_DEBUG_SOUL
+ printk(KERN_DEBUG "Soulssek special client command recognized\n");
+#endif /* IPP2P_DEBUG_SOUL */
+ return ((IPP2P_SOUL * 100) + 9);
+ }
+ }
+ return 0;
+}
+
+
+/*Search for WinMX commands*/
+int
+search_winmx (const unsigned char *payload, const u16 plen)
+{
+//#define IPP2P_DEBUG_WINMX
+ if (((plen) == 4) && (memcmp(payload, "SEND", 4) == 0)) return ((IPP2P_WINMX * 100) + 1);
+ if (((plen) == 3) && (memcmp(payload, "GET", 3) == 0)) return ((IPP2P_WINMX * 100) + 2);
+ //if (packet_len < (head_len + 10)) return 0;
+ if (plen < 10) return 0;
+
+ if ((memcmp(payload, "SEND", 4) == 0) || (memcmp(payload, "GET", 3) == 0)){
+ u16 c=4;
+ const u16 end=plen-2;
+ u8 count=0;
+ while (c < end)
+ {
+ if (payload[c]== 0x20 && payload[c+1] == 0x22)
+ {
+ c++;
+ count++;
+ if (count>=2) return ((IPP2P_WINMX * 100) + 3);
+ }
+ c++;
+ }
+ }
+
+ if ( plen == 149 && payload[0] == '8' )
+ {
+#ifdef IPP2P_DEBUG_WINMX
+ printk(KERN_INFO "maybe WinMX\n");
+#endif
+ if (get_u32(payload,17) == 0 && get_u32(payload,21) == 0 && get_u32(payload,25) == 0 &&
+// get_u32(payload,33) == __constant_htonl(0x71182b1a) && get_u32(payload,37) == __constant_htonl(0x05050000) &&
+// get_u32(payload,133) == __constant_htonl(0x31097edf) && get_u32(payload,145) == __constant_htonl(0xdcb8f792))
+ get_u16(payload,39) == 0 && get_u16(payload,135) == __constant_htons(0x7edf) && get_u16(payload,147) == __constant_htons(0xf792))
+
+ {
+#ifdef IPP2P_DEBUG_WINMX
+ printk(KERN_INFO "got WinMX\n");
+#endif
+ return ((IPP2P_WINMX * 100) + 4);
+ }
+ }
+ return 0;
+} /*search_winmx*/
+
+
+/*Search for appleJuice commands*/
+int
+search_apple (const unsigned char *payload, const u16 plen)
+{
+ if ( (plen > 7) && (payload[6] == 0x0d) && (payload[7] == 0x0a) && (memcmp(payload, "ajprot", 6) == 0)) return (IPP2P_APPLE * 100);
+
+ return 0;
+}
+
+
+/*Search for BitTorrent commands*/
+int
+search_bittorrent (const unsigned char *payload, const u16 plen)
+{
+ if (plen > 20)
+ {
+ /* test for match 0x13+"BitTorrent protocol" */
+ if (payload[0] == 0x13)
+ {
+ if (memcmp(payload+1, "BitTorrent protocol", 19) == 0) return (IPP2P_BIT * 100);
+ }
+
+ /* get tracker commandos, all starts with GET /
+ * then it can follow: scrape| announce
+ * and then ?hash_info=
+ */
+ if (memcmp(payload,"GET /",5) == 0)
+ {
+ /* message scrape */
+ if ( memcmp(payload+5,"scrape?info_hash=",17)==0 ) return (IPP2P_BIT * 100 + 1);
+ /* message announce */
+ if ( memcmp(payload+5,"announce?info_hash=",19)==0 ) return (IPP2P_BIT * 100 + 2);
+ }
+ }
+ else
+ {
+ /* bitcomet encryptes the first packet, so we have to detect another
+ * one later in the flow */
+ /* first try failed, too many missdetections */
+ //if ( size == 5 && get_u32(t,0) == __constant_htonl(1) && t[4] < 3) return (IPP2P_BIT * 100 + 3);
+
+ /* second try: block request packets */
+ if ( plen == 17 && get_u32(payload,0) == __constant_htonl(0x0d) && payload[4] == 0x06 && get_u32(payload,13) == __constant_htonl(0x4000) ) return (IPP2P_BIT * 100 + 3);
+ }
+
+ return 0;
+}
+
+
+
+/*check for Kazaa get command*/
+int
+search_kazaa (const unsigned char *payload, const u16 plen)
+
+{
+ if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a) && memcmp(payload, "GET /.hash=", 11) == 0)
+ return (IPP2P_DATA_KAZAA * 100);
+
+ return 0;
+}
+
+
+/*check for gnutella get command*/
+int
+search_gnu (const unsigned char *payload, const u16 plen)
+{
+ if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a))
+ {
+ if (memcmp(payload, "GET /get/", 9) == 0) return ((IPP2P_DATA_GNU * 100) + 1);
+ if (memcmp(payload, "GET /uri-res/", 13) == 0) return ((IPP2P_DATA_GNU * 100) + 2);
+ }
+ return 0;
+}
+
+
+/*check for gnutella get commands and other typical data*/
+int
+search_all_gnu (const unsigned char *payload, const u16 plen)
+{
+
+ if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a))
+ {
+
+ if (memcmp(payload, "GNUTELLA CONNECT/", 17) == 0) return ((IPP2P_GNU * 100) + 1);
+ if (memcmp(payload, "GNUTELLA/", 9) == 0) return ((IPP2P_GNU * 100) + 2);
+
+
+ if ((memcmp(payload, "GET /get/", 9) == 0) || (memcmp(payload, "GET /uri-res/", 13) == 0))
+ {
+ u16 c=8;
+ const u16 end=plen-22;
+ while (c < end) {
+ if ( payload[c] == 0x0a && payload[c+1] == 0x0d && ((memcmp(&payload[c+2], "X-Gnutella-", 11) == 0) || (memcmp(&payload[c+2], "X-Queue:", 8) == 0)))
+ return ((IPP2P_GNU * 100) + 3);
+ c++;
+ }
+ }
+ }
+ return 0;
+}
+
+
+/*check for KaZaA download commands and other typical data*/
+int
+search_all_kazaa (const unsigned char *payload, const u16 plen)
+{
+ if ((payload[plen-2] == 0x0d) && (payload[plen-1] == 0x0a))
+ {
+
+ if (memcmp(payload, "GIVE ", 5) == 0) return ((IPP2P_KAZAA * 100) + 1);
+
+ if (memcmp(payload, "GET /", 5) == 0) {
+ u16 c = 8;
+ const u16 end=plen-22;
+ while (c < end) {
+ if ( payload[c] == 0x0a && payload[c+1] == 0x0d && ((memcmp(&payload[c+2], "X-Kazaa-Username: ", 18) == 0) || (memcmp(&payload[c+2], "User-Agent: PeerEnabler/", 24) == 0)))
+ return ((IPP2P_KAZAA * 100) + 2);
+ c++;
+ }
+ }
+ }
+ return 0;
+}
+
+/*fast check for edonkey file segment transfer command*/
+int
+search_edk (const unsigned char *payload, const u16 plen)
+{
+ if (payload[0] != 0xe3)
+ return 0;
+ else {
+ if (payload[5] == 0x47)
+ return (IPP2P_DATA_EDK * 100);
+ else
+ return 0;
+ }
+}
+
+
+
+/*intensive but slower search for some edonkey packets including size-check*/
+int
+search_all_edk (const unsigned char *payload, const u16 plen)
+{
+ if (payload[0] != 0xe3)
+ return 0;
+ else {
+ //t += head_len;
+ const u16 cmd = get_u16(payload, 1);
+ if (cmd == (plen - 5)) {
+ switch (payload[5]) {
+ case 0x01: return ((IPP2P_EDK * 100) + 1); /*Client: hello or Server:hello*/
+ case 0x4c: return ((IPP2P_EDK * 100) + 9); /*Client: Hello-Answer*/
+ }
+ }
+ return 0;
+ }
+}
+
+
+/*fast check for Direct Connect send command*/
+int
+search_dc (const unsigned char *payload, const u16 plen)
+{
+
+ if (payload[0] != 0x24 )
+ return 0;
+ else {
+ if (memcmp(&payload[1], "Send|", 5) == 0)
+ return (IPP2P_DATA_DC * 100);
+ else
+ return 0;
+ }
+
+}
+
+
+/*intensive but slower check for all direct connect packets*/
+int
+search_all_dc (const unsigned char *payload, const u16 plen)
+{
+// unsigned char *t = haystack;
+
+ if (payload[0] == 0x24 && payload[plen-1] == 0x7c)
+ {
+ const unsigned char *t=&payload[1];
+ /* Client-Hub-Protocol */
+ if (memcmp(t, "Lock ", 5) == 0) return ((IPP2P_DC * 100) + 1);
+ /* Client-Client-Protocol, some are already recognized by client-hub (like lock) */
+ if (memcmp(t, "MyNick ", 7) == 0) return ((IPP2P_DC * 100) + 38);
+ }
+ return 0;
+}
+
+/*check for mute*/
+int
+search_mute (const unsigned char *payload, const u16 plen)
+{
+ if ( plen == 209 || plen == 345 || plen == 473 || plen == 609 || plen == 1121 )
+ {
+ //printk(KERN_DEBUG "size hit: %u",size);
+ if (memcmp(payload,"PublicKey: ",11) == 0 )
+ {
+ return ((IPP2P_MUTE * 100) + 0);
+
+/* if (memcmp(t+size-14,"\x0aEndPublicKey\x0a",14) == 0)
+ {
+ printk(KERN_DEBUG "end pubic key hit: %u",size);
+
+ }*/
+ }
+ }
+ return 0;
+}
+
+
+/* check for xdcc */
+int
+search_xdcc (const unsigned char *payload, const u16 plen)
+{
+ /* search in small packets only */
+ if (plen > 20 && plen < 200 && payload[plen-1] == 0x0a && payload[plen-2] == 0x0d && memcmp(payload,"PRIVMSG ",8) == 0)
+ {
+
+ u16 x=10;
+ const u16 end=plen - 13;
+
+ /* is seems to be a irc private massage, chedck for xdcc command */
+ while (x < end)
+ {
+ if (payload[x] == ':')
+ {
+ if ( memcmp(&payload[x+1],"xdcc send #",11) == 0 )
+ return ((IPP2P_XDCC * 100) + 0);
+ }
+ x++;
+ }
+ }
+ return 0;
+}
+
+/* search for waste */
+int search_waste(const unsigned char *payload, const u16 plen)
+{
+ if ( plen >= 8 && memcmp(payload,"GET.sha1:",9) == 0)
+ return ((IPP2P_WASTE * 100) + 0);
+
+ return 0;
+}
+
+
+static struct {
+ int command;
+ __u8 short_hand; /*for fucntions included in short hands*/
+ int packet_len;
+ int (*function_name) (const unsigned char *, const u16);
+} matchlist[] = {
+ {IPP2P_EDK,SHORT_HAND_IPP2P,20, &search_all_edk},
+// {IPP2P_DATA_KAZAA,SHORT_HAND_DATA,200, &search_kazaa},
+// {IPP2P_DATA_EDK,SHORT_HAND_DATA,60, &search_edk},
+// {IPP2P_DATA_DC,SHORT_HAND_DATA,26, &search_dc},
+ {IPP2P_DC,SHORT_HAND_IPP2P,5, search_all_dc},
+// {IPP2P_DATA_GNU,SHORT_HAND_DATA,40, &search_gnu},
+ {IPP2P_GNU,SHORT_HAND_IPP2P,5, &search_all_gnu},
+ {IPP2P_KAZAA,SHORT_HAND_IPP2P,5, &search_all_kazaa},
+ {IPP2P_BIT,SHORT_HAND_IPP2P,20, &search_bittorrent},
+ {IPP2P_APPLE,SHORT_HAND_IPP2P,5, &search_apple},
+ {IPP2P_SOUL,SHORT_HAND_IPP2P,5, &search_soul},
+ {IPP2P_WINMX,SHORT_HAND_IPP2P,2, &search_winmx},
+ {IPP2P_ARES,SHORT_HAND_IPP2P,5, &search_ares},
+ {IPP2P_MUTE,SHORT_HAND_NONE,200, &search_mute},
+ {IPP2P_WASTE,SHORT_HAND_NONE,5, &search_waste},
+ {IPP2P_XDCC,SHORT_HAND_NONE,5, &search_xdcc},
+ {0,0,0,NULL}
+};
+
+
+static struct {
+ int command;
+ __u8 short_hand; /*for fucntions included in short hands*/
+ int packet_len;
+ int (*function_name) (unsigned char *, int);
+} udp_list[] = {
+ {IPP2P_KAZAA,SHORT_HAND_IPP2P,14, &udp_search_kazaa},
+ {IPP2P_BIT,SHORT_HAND_IPP2P,23, &udp_search_bit},
+ {IPP2P_GNU,SHORT_HAND_IPP2P,11, &udp_search_gnu},
+ {IPP2P_EDK,SHORT_HAND_IPP2P,9, &udp_search_edk},
+ {IPP2P_DC,SHORT_HAND_IPP2P,12, &udp_search_directconnect},
+ {0,0,0,NULL}
+};
+
+
+static int
+match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
+ const struct xt_match *match,
+#endif
+ const void *matchinfo,
+ int offset,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
+ unsigned int protoff,
+#elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ const void *hdr,
+ u_int16_t datalen,
+#endif
+ int *hotdrop)
+{
+ const struct ipt_p2p_info *info = matchinfo;
+ unsigned char *haystack;
+ struct iphdr *ip = ip_hdr(skb);
+ int p2p_result = 0, i = 0;
+// int head_len;
+ int hlen = ntohs(ip->tot_len)-(ip->ihl*4); /*hlen = packet-data length*/
+
+ /*must not be a fragment*/
+ if (offset) {
+ if (info->debug) printk("IPP2P.match: offset found %i \n",offset);
+ return 0;
+ }
+
+ /*make sure that skb is linear*/
+ if(skb_is_nonlinear(skb)){
+ if (info->debug) printk("IPP2P.match: nonlinear skb found\n");
+ return 0;
+ }
+
+
+ haystack=(char *)ip+(ip->ihl*4); /*haystack = packet data*/
+
+ switch (ip->protocol){
+ case IPPROTO_TCP: /*what to do with a TCP packet*/
+ {
+ struct tcphdr *tcph = (void *) ip + ip->ihl * 4;
+
+ if (tcph->fin) return 0; /*if FIN bit is set bail out*/
+ if (tcph->syn) return 0; /*if SYN bit is set bail out*/
+ if (tcph->rst) return 0; /*if RST bit is set bail out*/
+
+ haystack += tcph->doff * 4; /*get TCP-Header-Size*/
+ hlen -= tcph->doff * 4;
+ while (matchlist[i].command) {
+ if ((((info->cmd & matchlist[i].command) == matchlist[i].command) ||
+ ((info->cmd & matchlist[i].short_hand) == matchlist[i].short_hand)) &&
+ (hlen > matchlist[i].packet_len)) {
+ p2p_result = matchlist[i].function_name(haystack, hlen);
+ if (p2p_result)
+ {
+ if (info->debug) printk("IPP2P.debug:TCP-match: %i from: %u.%u.%u.%u:%i to: %u.%u.%u.%u:%i Length: %i\n",
+ p2p_result, NIPQUAD(ip->saddr),ntohs(tcph->source), NIPQUAD(ip->daddr),ntohs(tcph->dest),hlen);
+ return p2p_result;
+ }
+ }
+ i++;
+ }
+ return p2p_result;
+ }
+
+ case IPPROTO_UDP: /*what to do with an UDP packet*/
+ {
+ struct udphdr *udph = (void *) ip + ip->ihl * 4;
+
+ while (udp_list[i].command){
+ if ((((info->cmd & udp_list[i].command) == udp_list[i].command) ||
+ ((info->cmd & udp_list[i].short_hand) == udp_list[i].short_hand)) &&
+ (hlen > udp_list[i].packet_len)) {
+ p2p_result = udp_list[i].function_name(haystack, hlen);
+ if (p2p_result){
+ if (info->debug) printk("IPP2P.debug:UDP-match: %i from: %u.%u.%u.%u:%i to: %u.%u.%u.%u:%i Length: %i\n",
+ p2p_result, NIPQUAD(ip->saddr),ntohs(udph->source), NIPQUAD(ip->daddr),ntohs(udph->dest),hlen);
+ return p2p_result;
+ }
+ }
+ i++;
+ }
+ return p2p_result;
+ }
+
+ default: return 0;
+ }
+}
+
+
+
+static int
+checkentry(const char *tablename,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
+ const void *ip,
+ const struct xt_match *match,
+#else
+ const struct ipt_ip *ip,
+#endif
+ void *matchinfo,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ unsigned int matchsize,
+#endif
+ unsigned int hook_mask)
+{
+ /* Must specify -p tcp */
+/* if (ip->proto != IPPROTO_TCP || (ip->invflags & IPT_INV_PROTO)) {
+ * printk("ipp2p: Only works on TCP packets, use -p tcp\n");
+ * return 0;
+ * }*/
+ return 1;
+}
+
+
+
+
+static struct ipt_match ipp2p_match = {
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0)
+ { NULL, NULL },
+ "ipp2p",
+ &match,
+ &checkentry,
+ NULL,
+ THIS_MODULE
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0)
+ .name = "ipp2p",
+ .match = &match,
+ .family = AF_INET,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
+ .matchsize = sizeof(struct ipt_p2p_info),
+#endif
+ .checkentry = &checkentry,
+ .me = THIS_MODULE,
+#endif
+};
+
+
+static int __init init(void)
+{
+ printk(KERN_INFO "IPP2P v%s loading\n", IPP2P_VERSION);
+ return xt_register_match(&ipp2p_match);
+}
+
+static void __exit fini(void)
+{
+ xt_unregister_match(&ipp2p_match);
+ printk(KERN_INFO "IPP2P v%s unloaded\n", IPP2P_VERSION);
+}
+
+module_init(init);
+module_exit(fini);
+
+
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -63,6 +63,12 @@
To compile it as a module, choose M here. If unsure, say N.
+config IP_NF_MATCH_IPP2P
+ tristate "IPP2P"
+ depends on IP_NF_IPTABLES
+ help
+ Module for matching traffic of various Peer-to-Peer applications
+
config IP_NF_MATCH_TOS
tristate "TOS match support"
depends on IP_NF_IPTABLES
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -49,6 +49,7 @@
obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o
obj-$(CONFIG_IP_NF_MATCH_TTL) += ipt_ttl.o
obj-$(CONFIG_IP_NF_MATCH_ADDRTYPE) += ipt_addrtype.o
+obj-$(CONFIG_IP_NF_MATCH_IPP2P) += ipt_ipp2p.o
# targets
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o

View file

@ -1,166 +0,0 @@
--- /dev/null
+++ b/include/net/xfrmudp.h
@@ -0,0 +1,10 @@
+/*
+ * pointer to function for type that xfrm4_input wants, to permit
+ * decoupling of XFRM from udp.c
+ */
+#define HAVE_XFRM4_UDP_REGISTER
+
+typedef int (*xfrm4_rcv_encap_t)(struct sk_buff *skb, __u16 encap_type);
+extern int udp4_register_esp_rcvencap(xfrm4_rcv_encap_t func
+ , xfrm4_rcv_encap_t *oldfunc);
+extern int udp4_unregister_esp_rcvencap(xfrm4_rcv_encap_t func);
--- a/net/ipv4/Kconfig
+++ b/net/ipv4/Kconfig
@@ -266,6 +266,12 @@
Network), but can be distributed all over the Internet. If you want
to do that, say Y here and to "IP multicast routing" below.
+config IPSEC_NAT_TRAVERSAL
+ bool "IPSEC NAT-Traversal (KLIPS compatible)"
+ depends on INET
+ ---help---
+ Includes support for RFC3947/RFC3948 NAT-Traversal of ESP over UDP.
+
config IP_MROUTE
bool "IP: multicast routing"
depends on IP_MULTICAST
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -101,12 +101,15 @@
#include <net/route.h>
#include <net/checksum.h>
#include <net/xfrm.h>
+#include <net/xfrmudp.h>
#include "udp_impl.h"
/*
* Snmp MIB for the UDP layer
*/
+static xfrm4_rcv_encap_t xfrm4_rcv_encap_func;
+
DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly;
struct hlist_head udp_hash[UDP_HTABLE_SIZE];
@@ -919,6 +922,42 @@
return 0;
}
+#if defined(CONFIG_XFRM) || defined(CONFIG_IPSEC_NAT_TRAVERSAL)
+
+/* if XFRM isn't a module, then register it directly. */
+#if 0 && !defined(CONFIG_XFRM_MODULE) && !defined(CONFIG_IPSEC_NAT_TRAVERSAL)
+static xfrm4_rcv_encap_t xfrm4_rcv_encap_func = xfrm4_rcv_encap;
+#else
+static xfrm4_rcv_encap_t xfrm4_rcv_encap_func = NULL;
+#endif
+
+int udp4_register_esp_rcvencap(xfrm4_rcv_encap_t func
+ , xfrm4_rcv_encap_t *oldfunc)
+{
+ if(oldfunc != NULL) {
+ *oldfunc = xfrm4_rcv_encap_func;
+ }
+
+#if 0
+ if(xfrm4_rcv_encap_func != NULL)
+ return -1;
+#endif
+
+ xfrm4_rcv_encap_func = func;
+ return 0;
+}
+
+int udp4_unregister_esp_rcvencap(xfrm4_rcv_encap_t func)
+{
+ if(xfrm4_rcv_encap_func != func)
+ return -1;
+
+ xfrm4_rcv_encap_func = NULL;
+ return 0;
+}
+#endif /* CONFIG_XFRM_MODULE || CONFIG_IPSEC_NAT_TRAVERSAL */
+
+
/* return:
* 1 if the UDP system should process it
* 0 if we should drop this packet
@@ -926,7 +965,7 @@
*/
static int udp_encap_rcv(struct sock * sk, struct sk_buff *skb)
{
-#ifndef CONFIG_XFRM
+#if !defined(CONFIG_XFRM) && !defined(CONFIG_IPSEC_NAT_TRAVERSAL)
return 1;
#else
struct udp_sock *up = udp_sk(sk);
@@ -941,11 +980,11 @@
/* if we're overly short, let UDP handle it */
len = skb->len - sizeof(struct udphdr);
if (len <= 0)
- return 1;
+ return 2;
/* if this is not encapsulated socket, then just return now */
if (!encap_type)
- return 1;
+ return 3;
/* If this is a paged skb, make sure we pull up
* whatever data we need to look at. */
@@ -968,7 +1007,7 @@
len = sizeof(struct udphdr);
} else
/* Must be an IKE packet.. pass it through */
- return 1;
+ return 4;
break;
case UDP_ENCAP_ESPINUDP_NON_IKE:
/* Check if this is a keepalive packet. If so, eat it. */
@@ -981,7 +1020,7 @@
len = sizeof(struct udphdr) + 2 * sizeof(u32);
} else
/* Must be an IKE packet.. pass it through */
- return 1;
+ return 5;
break;
}
@@ -992,6 +1031,8 @@
*/
if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
return 0;
+ if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
+ return 0;
/* Now we can update and verify the packet length... */
iph = ip_hdr(skb);
@@ -1056,9 +1097,13 @@
return 0;
}
if (ret < 0) {
- /* process the ESP packet */
- ret = xfrm4_rcv_encap(skb, up->encap_type);
- UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
+ if(xfrm4_rcv_encap_func != NULL) {
+ ret = (*xfrm4_rcv_encap_func)(skb, up->encap_type);
+ UDP_INC_STATS_BH(UDP_MIB_INDATAGRAMS, up->pcflag);
+ } else {
+ UDP_INC_STATS_BH(UDP_MIB_INERRORS, up->pcflag);
+ ret = 1;
+ }
return -ret;
}
/* FALLTHROUGH -- it's a UDP Packet */
@@ -1742,3 +1787,9 @@
EXPORT_SYMBOL(udp_proc_register);
EXPORT_SYMBOL(udp_proc_unregister);
#endif
+
+#if defined(CONFIG_IPSEC_NAT_TRAVERSAL)
+EXPORT_SYMBOL(udp4_register_esp_rcvencap);
+EXPORT_SYMBOL(udp4_unregister_esp_rcvencap);
+#endif
+

View file

@ -1,239 +0,0 @@
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_time.h
@@ -0,0 +1,18 @@
+#ifndef __ipt_time_h_included__
+#define __ipt_time_h_included__
+
+
+struct ipt_time_info {
+ u_int8_t days_match; /* 1 bit per day. -SMTWTFS */
+ u_int16_t time_start; /* 0 < time_start < 23*60+59 = 1439 */
+ u_int16_t time_stop; /* 0:0 < time_stat < 23:59 */
+
+ /* FIXME: Keep this one for userspace iptables binary compability: */
+ u_int8_t kerneltime; /* ignore skb time (and use kerneltime) or not. */
+
+ time_t date_start;
+ time_t date_stop;
+};
+
+
+#endif /* __ipt_time_h_included__ */
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_time.c
@@ -0,0 +1,180 @@
+/*
+ This is a module which is used for time matching
+ It is using some modified code from dietlibc (localtime() function)
+ that you can find at http://www.fefe.de/dietlibc/
+ This file is distributed under the terms of the GNU General Public
+ License (GPL). Copies of the GPL can be obtained from: ftp://prep.ai.mit.edu/pub/gnu/GPL
+ 2001-05-04 Fabrice MARIE <fabrice@netfilter.org> : initial development.
+ 2001-21-05 Fabrice MARIE <fabrice@netfilter.org> : bug fix in the match code,
+ thanks to "Zeng Yu" <zengy@capitel.com.cn> for bug report.
+ 2001-26-09 Fabrice MARIE <fabrice@netfilter.org> : force the match to be in LOCAL_IN or PRE_ROUTING only.
+ 2001-30-11 Fabrice : added the possibility to use the match in FORWARD/OUTPUT with a little hack,
+ added Nguyen Dang Phuoc Dong <dongnd@tlnet.com.vn> patch to support timezones.
+ 2004-05-02 Fabrice : added support for date matching, from an idea of Fabien COELHO.
+*/
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ipt_time.h>
+#include <linux/time.h>
+
+MODULE_AUTHOR("Fabrice MARIE <fabrice@netfilter.org>");
+MODULE_DESCRIPTION("Match arrival timestamp/date");
+MODULE_LICENSE("GPL");
+
+struct tm
+{
+ int tm_sec; /* Seconds. [0-60] (1 leap second) */
+ int tm_min; /* Minutes. [0-59] */
+ int tm_hour; /* Hours. [0-23] */
+ int tm_mday; /* Day. [1-31] */
+ int tm_mon; /* Month. [0-11] */
+ int tm_year; /* Year - 1900. */
+ int tm_wday; /* Day of week. [0-6] */
+ int tm_yday; /* Days in year.[0-365] */
+ int tm_isdst; /* DST. [-1/0/1]*/
+
+ long int tm_gmtoff; /* we don't care, we count from GMT */
+ const char *tm_zone; /* we don't care, we count from GMT */
+};
+
+void
+localtime(const u32 time, struct tm *r);
+
+static int
+match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const struct xt_match *match,
+ const void *matchinfo,
+ int offset,
+ unsigned int protoff,
+ int *hotdrop)
+{
+ const struct ipt_time_info *info = matchinfo; /* match info for rule */
+ struct timeval tv;
+ struct tm currenttime; /* time human readable */
+ u_int8_t days_of_week[7] = {64, 32, 16, 8, 4, 2, 1};
+ u_int16_t packet_time;
+
+ /* We might not have a timestamp, get one */
+ if (skb->tstamp.tv64 == 0)
+ __net_timestamp((struct sk_buff *)skb);
+
+ skb_get_timestamp(skb, &tv);
+ /* First we make sure we are in the date start-stop boundaries */
+ if ((tv.tv_sec < info->date_start) || (tv.tv_sec > info->date_stop))
+ return 0; /* We are outside the date boundaries */
+
+ /* Transform the timestamp of the packet, in a human readable form */
+ localtime(tv.tv_sec, &currenttime);
+
+ /* check if we match this timestamp, we start by the days... */
+ if ((days_of_week[currenttime.tm_wday] & info->days_match) != days_of_week[currenttime.tm_wday])
+ return 0; /* the day doesn't match */
+
+ /* ... check the time now */
+ packet_time = (currenttime.tm_hour * 60) + currenttime.tm_min;
+ if ((packet_time < info->time_start) || (packet_time > info->time_stop))
+ return 0;
+
+ /* here we match ! */
+ return 1;
+}
+
+static int
+checkentry(const char *tablename,
+ const void *ip,
+ const struct xt_match *match,
+ void *matchinfo,
+ unsigned int hook_mask)
+{
+ struct ipt_time_info *info = matchinfo; /* match info for rule */
+
+ /* First, check that we are in the correct hooks */
+ if (hook_mask
+ & ~((1 << NF_IP_PRE_ROUTING) | (1 << NF_IP_LOCAL_IN) | (1 << NF_IP_FORWARD) | (1 << NF_IP_LOCAL_OUT)))
+ {
+ printk("ipt_time: error, only valid for PRE_ROUTING, LOCAL_IN, FORWARD and OUTPUT)\n");
+ return 0;
+ }
+
+ /* Now check the coherence of the data ... */
+ if ((info->time_start > 1439) || /* 23*60+59 = 1439*/
+ (info->time_stop > 1439))
+ {
+ printk(KERN_WARNING "ipt_time: invalid argument\n");
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct ipt_match time_match = {
+ .name = "time",
+ .match = &match,
+ .matchsize = sizeof(struct ipt_time_info),
+ .checkentry = &checkentry,
+ .me = THIS_MODULE
+};
+
+static int __init init(void)
+{
+ printk("ipt_time loading\n");
+ return xt_register_match(&time_match);
+}
+
+static void __exit fini(void)
+{
+ xt_unregister_match(&time_match);
+ printk("ipt_time unloaded\n");
+}
+
+module_init(init);
+module_exit(fini);
+
+
+/* The part below is borowed and modified from dietlibc */
+
+/* seconds per day */
+#define SPD 24*60*60
+
+void
+localtime(const u32 time, struct tm *r) {
+ u32 i, timep;
+ extern struct timezone sys_tz;
+ const unsigned int __spm[12] =
+ { 0,
+ (31),
+ (31+28),
+ (31+28+31),
+ (31+28+31+30),
+ (31+28+31+30+31),
+ (31+28+31+30+31+30),
+ (31+28+31+30+31+30+31),
+ (31+28+31+30+31+30+31+31),
+ (31+28+31+30+31+30+31+31+30),
+ (31+28+31+30+31+30+31+31+30+31),
+ (31+28+31+30+31+30+31+31+30+31+30),
+ };
+ register u32 work;
+
+ timep = time - (sys_tz.tz_minuteswest * 60);
+ work=timep%(SPD);
+ r->tm_sec=work%60; work/=60;
+ r->tm_min=work%60; r->tm_hour=work/60;
+ work=timep/(SPD);
+ r->tm_wday=(4+work)%7;
+ for (i=1970; ; ++i) {
+ register time_t k= (!(i%4) && ((i%100) || !(i%400)))?366:365;
+ if (work>k)
+ work-=k;
+ else
+ break;
+ }
+ r->tm_year=i-1900;
+ for (i=11; i && __spm[i]>work; --i) ;
+ r->tm_mon=i;
+ r->tm_mday=work-__spm[i]+1;
+}
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -78,6 +78,22 @@
To compile it as a module, choose M here. If unsure, say N.
+
+config IP_NF_MATCH_TIME
+ tristate 'TIME match support'
+ depends on IP_NF_IPTABLES
+ help
+ This option adds a `time' match, which allows you
+ to match based on the packet arrival time/date
+ (arrival time/date at the machine which netfilter is running on) or
+ departure time/date (for locally generated packets).
+
+ If you say Y here, try iptables -m time --help for more information.
+ If you want to compile it as a module, say M here and read
+
+ Documentation/modules.txt. If unsure, say `N'.
+
+
config IP_NF_MATCH_RECENT
tristate "recent match support"
depends on IP_NF_IPTABLES
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -44,6 +44,7 @@
obj-$(CONFIG_IP_NF_MATCH_IPRANGE) += ipt_iprange.o
obj-$(CONFIG_IP_NF_MATCH_OWNER) += ipt_owner.o
obj-$(CONFIG_IP_NF_MATCH_TOS) += ipt_tos.o
+obj-$(CONFIG_IP_NF_MATCH_TIME) += ipt_time.o
obj-$(CONFIG_IP_NF_MATCH_RECENT) += ipt_recent.o
obj-$(CONFIG_IP_NF_MATCH_ECN) += ipt_ecn.o
obj-$(CONFIG_IP_NF_MATCH_AH) += ipt_ah.o

View file

@ -1,869 +0,0 @@
--- /dev/null
+++ b/drivers/net/imq.c
@@ -0,0 +1,402 @@
+/*
+ * Pseudo-driver for the intermediate queue device.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Patrick McHardy, <kaber@trash.net>
+ *
+ * The first version was written by Martin Devera, <devik@cdi.cz>
+ *
+ * Credits: Jan Rafaj <imq2t@cedric.vabo.cz>
+ * - Update patch to 2.4.21
+ * Sebastian Strollo <sstrollo@nortelnetworks.com>
+ * - Fix "Dead-loop on netdevice imq"-issue
+ * Marcel Sebek <sebek64@post.cz>
+ * - Update to 2.6.2-rc1
+ *
+ * After some time of inactivity there is a group taking care
+ * of IMQ again: http://www.linuximq.net
+ *
+ *
+ * 2004/06/30 - New version of IMQ patch to kernels <=2.6.7 including
+ * the following changes:
+ *
+ * - Correction of ipv6 support "+"s issue (Hasso Tepper)
+ * - Correction of imq_init_devs() issue that resulted in
+ * kernel OOPS unloading IMQ as module (Norbert Buchmuller)
+ * - Addition of functionality to choose number of IMQ devices
+ * during kernel config (Andre Correa)
+ * - Addition of functionality to choose how IMQ hooks on
+ * PRE and POSTROUTING (after or before NAT) (Andre Correa)
+ * - Cosmetic corrections (Norbert Buchmuller) (Andre Correa)
+ *
+ *
+ * 2005/12/16 - IMQ versions between 2.6.7 and 2.6.13 were
+ * released with almost no problems. 2.6.14-x was released
+ * with some important changes: nfcache was removed; After
+ * some weeks of trouble we figured out that some IMQ fields
+ * in skb were missing in skbuff.c - skb_clone and copy_skb_header.
+ * These functions are correctly patched by this new patch version.
+ *
+ * Thanks for all who helped to figure out all the problems with
+ * 2.6.14.x: Patrick McHardy, Rune Kock, VeNoMouS, Max CtRiX,
+ * Kevin Shanahan, Richard Lucassen, Valery Dachev (hopefully
+ * I didn't forget anybody). I apologize again for my lack of time.
+ *
+ * More info at: http://www.linuximq.net/ (Andre Correa)
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include <linux/if_arp.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ #include <linux/netfilter_ipv6.h>
+#endif
+#include <linux/imq.h>
+#include <net/pkt_sched.h>
+
+extern int qdisc_restart1(struct net_device *dev);
+
+static nf_hookfn imq_nf_hook;
+
+static struct nf_hook_ops imq_ingress_ipv4 = {
+ .hook = imq_nf_hook,
+ .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_IP_PRE_ROUTING,
+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
+ .priority = NF_IP_PRI_MANGLE + 1
+#else
+ .priority = NF_IP_PRI_NAT_DST + 1
+#endif
+};
+
+static struct nf_hook_ops imq_egress_ipv4 = {
+ .hook = imq_nf_hook,
+ .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_IP_POST_ROUTING,
+#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
+ .priority = NF_IP_PRI_LAST
+#else
+ .priority = NF_IP_PRI_NAT_SRC - 1
+#endif
+};
+
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+static struct nf_hook_ops imq_ingress_ipv6 = {
+ .hook = imq_nf_hook,
+ .owner = THIS_MODULE,
+ .pf = PF_INET6,
+ .hooknum = NF_IP6_PRE_ROUTING,
+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
+ .priority = NF_IP6_PRI_MANGLE + 1
+#else
+ .priority = NF_IP6_PRI_NAT_DST + 1
+#endif
+};
+
+static struct nf_hook_ops imq_egress_ipv6 = {
+ .hook = imq_nf_hook,
+ .owner = THIS_MODULE,
+ .pf = PF_INET6,
+ .hooknum = NF_IP6_POST_ROUTING,
+#if defined(CONFIG_IMQ_BEHAVIOR_AA) || defined(CONFIG_IMQ_BEHAVIOR_BA)
+ .priority = NF_IP6_PRI_LAST
+#else
+ .priority = NF_IP6_PRI_NAT_SRC - 1
+#endif
+};
+#endif
+
+#if defined(CONFIG_IMQ_NUM_DEVS)
+static unsigned int numdevs = CONFIG_IMQ_NUM_DEVS;
+#else
+static unsigned int numdevs = 2;
+#endif
+
+static struct net_device *imq_devs;
+
+static struct net_device_stats *imq_get_stats(struct net_device *dev)
+{
+ return (struct net_device_stats *)dev->priv;
+}
+
+/* called for packets kfree'd in qdiscs at places other than enqueue */
+static void imq_skb_destructor(struct sk_buff *skb)
+{
+ struct nf_info *info = skb->nf_info;
+
+ if (info) {
+ if (info->indev)
+ dev_put(info->indev);
+ if (info->outdev)
+ dev_put(info->outdev);
+ kfree(info);
+ }
+}
+
+static int imq_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct net_device_stats *stats = (struct net_device_stats*) dev->priv;
+
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+
+ skb->imq_flags = 0;
+ skb->destructor = NULL;
+
+ dev->trans_start = jiffies;
+ nf_reinject(skb, skb->nf_info, NF_ACCEPT);
+ return 0;
+}
+
+static int imq_nf_queue(struct sk_buff *skb, struct nf_info *info, unsigned queue_num, void *data)
+{
+ struct net_device *dev;
+ struct net_device_stats *stats;
+ struct sk_buff *skb2 = NULL;
+ struct Qdisc *q;
+ unsigned int index = skb->imq_flags&IMQ_F_IFMASK;
+ int ret = -1;
+
+ if (index > numdevs)
+ return -1;
+
+ dev = imq_devs + index;
+ if (!(dev->flags & IFF_UP)) {
+ skb->imq_flags = 0;
+ nf_reinject(skb, info, NF_ACCEPT);
+ return 0;
+ }
+ dev->last_rx = jiffies;
+
+ if (skb->destructor) {
+ skb2 = skb;
+ skb = skb_clone(skb, GFP_ATOMIC);
+ if (!skb)
+ return -1;
+ }
+ skb->nf_info = info;
+
+ stats = (struct net_device_stats *)dev->priv;
+ stats->rx_bytes+= skb->len;
+ stats->rx_packets++;
+
+ spin_lock_bh(&dev->queue_lock);
+ q = dev->qdisc;
+ if (q->enqueue) {
+ q->enqueue(skb_get(skb), q);
+ if (skb_shared(skb)) {
+ skb->destructor = imq_skb_destructor;
+ kfree_skb(skb);
+ ret = 0;
+ }
+ }
+ if (spin_is_locked(&dev->_xmit_lock))
+ netif_schedule(dev);
+ else
+ while (!netif_queue_stopped(dev) && qdisc_restart1(dev) < 0)
+ /* NOTHING */;
+
+ spin_unlock_bh(&dev->queue_lock);
+
+ if (skb2)
+ kfree_skb(ret ? skb : skb2);
+
+ return ret;
+}
+
+static struct nf_queue_handler nfqh = {
+ .name = "imq",
+ .outfn = imq_nf_queue,
+};
+
+static unsigned int imq_nf_hook(unsigned int hook, struct sk_buff **pskb,
+ const struct net_device *indev,
+ const struct net_device *outdev,
+ int (*okfn)(struct sk_buff *))
+{
+ if ((*pskb)->imq_flags & IMQ_F_ENQUEUE)
+ return NF_QUEUE;
+
+ return NF_ACCEPT;
+}
+
+
+static int __init imq_init_hooks(void)
+{
+ int err;
+
+ err = nf_register_queue_handler(PF_INET, &nfqh);
+ if (err > 0)
+ goto err1;
+ if ((err = nf_register_hook(&imq_ingress_ipv4)))
+ goto err2;
+ if ((err = nf_register_hook(&imq_egress_ipv4)))
+ goto err3;
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ if ((err = nf_register_queue_handler(PF_INET6, &nfqh)))
+ goto err4;
+ if ((err = nf_register_hook(&imq_ingress_ipv6)))
+ goto err5;
+ if ((err = nf_register_hook(&imq_egress_ipv6)))
+ goto err6;
+#endif
+
+ return 0;
+
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+err6:
+ nf_unregister_hook(&imq_ingress_ipv6);
+err5:
+ nf_unregister_queue_handler(PF_INET6);
+err4:
+ nf_unregister_hook(&imq_egress_ipv4);
+#endif
+err3:
+ nf_unregister_hook(&imq_ingress_ipv4);
+err2:
+ nf_unregister_queue_handler(PF_INET);
+err1:
+ return err;
+}
+
+static void __exit imq_unhook(void)
+{
+ nf_unregister_hook(&imq_ingress_ipv4);
+ nf_unregister_hook(&imq_egress_ipv4);
+ nf_unregister_queue_handler(PF_INET);
+#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
+ nf_unregister_hook(&imq_ingress_ipv6);
+ nf_unregister_hook(&imq_egress_ipv6);
+ nf_unregister_queue_handler(PF_INET6);
+#endif
+}
+
+static int __init imq_dev_init(struct net_device *dev)
+{
+ dev->hard_start_xmit = imq_dev_xmit;
+ dev->type = ARPHRD_VOID;
+ dev->mtu = 1500;
+ dev->tx_queue_len = 30;
+ dev->flags = IFF_NOARP;
+ dev->priv = kmalloc(sizeof(struct net_device_stats), GFP_KERNEL);
+ if (dev->priv == NULL)
+ return -ENOMEM;
+ memset(dev->priv, 0, sizeof(struct net_device_stats));
+ dev->get_stats = imq_get_stats;
+
+ return 0;
+}
+
+static void imq_dev_uninit(struct net_device *dev)
+{
+ kfree(dev->priv);
+}
+
+static int __init imq_init_devs(void)
+{
+ struct net_device *dev;
+ int i,j;
+ j = numdevs;
+
+ if (!numdevs || numdevs > IMQ_MAX_DEVS) {
+ printk(KERN_ERR "IMQ: numdevs has to be betweed 1 and %u\n",
+ IMQ_MAX_DEVS);
+ return -EINVAL;
+ }
+
+ imq_devs = kmalloc(sizeof(struct net_device) * numdevs, GFP_KERNEL);
+ if (!imq_devs)
+ return -ENOMEM;
+ memset(imq_devs, 0, sizeof(struct net_device) * numdevs);
+
+ /* we start counting at zero */
+ numdevs--;
+
+ for (i = 0, dev = imq_devs; i <= numdevs; i++, dev++) {
+ SET_MODULE_OWNER(dev);
+ strcpy(dev->name, "imq%d");
+ dev->init = imq_dev_init;
+ dev->uninit = imq_dev_uninit;
+
+ if (register_netdev(dev) < 0)
+ goto err_register;
+ }
+ printk(KERN_INFO "IMQ starting with %u devices...\n", j);
+ return 0;
+
+err_register:
+ for (; i; i--)
+ unregister_netdev(--dev);
+ kfree(imq_devs);
+ return -EIO;
+}
+
+static void imq_cleanup_devs(void)
+{
+ int i;
+ struct net_device *dev = imq_devs;
+
+ for (i = 0; i <= numdevs; i++)
+ unregister_netdev(dev++);
+
+ kfree(imq_devs);
+}
+
+static int __init imq_init_module(void)
+{
+ int err;
+
+ if ((err = imq_init_devs())) {
+ printk(KERN_ERR "IMQ: Error trying imq_init_devs()\n");
+ return err;
+ }
+ if ((err = imq_init_hooks())) {
+ printk(KERN_ERR "IMQ: Error trying imq_init_hooks()\n");
+ imq_cleanup_devs();
+ return err;
+ }
+
+ printk(KERN_INFO "IMQ driver loaded successfully.\n");
+
+#if defined(CONFIG_IMQ_BEHAVIOR_BA) || defined(CONFIG_IMQ_BEHAVIOR_BB)
+ printk(KERN_INFO "\tHooking IMQ before NAT on PREROUTING.\n");
+#else
+ printk(KERN_INFO "\tHooking IMQ after NAT on PREROUTING.\n");
+#endif
+#if defined(CONFIG_IMQ_BEHAVIOR_AB) || defined(CONFIG_IMQ_BEHAVIOR_BB)
+ printk(KERN_INFO "\tHooking IMQ before NAT on POSTROUTING.\n");
+#else
+ printk(KERN_INFO "\tHooking IMQ after NAT on POSTROUTING.\n");
+#endif
+
+ return 0;
+}
+
+static void __exit imq_cleanup_module(void)
+{
+ imq_unhook();
+ imq_cleanup_devs();
+ printk(KERN_INFO "IMQ driver unloaded successfully.\n");
+}
+
+
+module_init(imq_init_module);
+module_exit(imq_cleanup_module);
+
+module_param(numdevs, int, 0);
+MODULE_PARM_DESC(numdevs, "number of IMQ devices (how many imq* devices will be created)");
+MODULE_AUTHOR("http://www.linuximq.net");
+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
+MODULE_LICENSE("GPL");
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -96,6 +96,129 @@
To compile this driver as a module, choose M here: the module
will be called eql. If unsure, say N.
+config IMQ
+ tristate "IMQ (intermediate queueing device) support"
+ depends on NETDEVICES && NETFILTER
+ ---help---
+ The IMQ device(s) is used as placeholder for QoS queueing
+ disciplines. Every packet entering/leaving the IP stack can be
+ directed through the IMQ device where it's enqueued/dequeued to the
+ attached qdisc. This allows you to treat network devices as classes
+ and distribute bandwidth among them. Iptables is used to specify
+ through which IMQ device, if any, packets travel.
+
+ More information at: http://www.linuximq.net/
+
+ To compile this driver as a module, choose M here: the module
+ will be called imq. If unsure, say N.
+
+choice
+ prompt "IMQ behavior (PRE/POSTROUTING)"
+ depends on IMQ
+ default IMQ_BEHAVIOR_BA
+ help
+
+ This settings defines how IMQ behaves in respect to its
+ hooking in PREROUTING and POSTROUTING.
+
+ IMQ can work in any of the following ways:
+
+ PREROUTING | POSTROUTING
+ -----------------|-------------------
+ #1 After NAT | After NAT
+ #2 After NAT | Before NAT
+ #3 Before NAT | After NAT
+ #4 Before NAT | Before NAT
+
+ The default behavior is to hook before NAT on PREROUTING
+ and after NAT on POSTROUTING (#3).
+
+ This settings are specially usefull when trying to use IMQ
+ to shape NATed clients.
+
+ More information can be found at: www.linuximq.net
+
+ If not sure leave the default settings alone.
+
+config IMQ_BEHAVIOR_AA
+ bool "IMQ AA"
+ help
+ This settings defines how IMQ behaves in respect to its
+ hooking in PREROUTING and POSTROUTING.
+
+ Choosing this option will make IMQ hook like this:
+
+ PREROUTING: After NAT
+ POSTROUTING: After NAT
+
+ More information can be found at: www.linuximq.net
+
+ If not sure leave the default settings alone.
+
+config IMQ_BEHAVIOR_AB
+ bool "IMQ AB"
+ help
+ This settings defines how IMQ behaves in respect to its
+ hooking in PREROUTING and POSTROUTING.
+
+ Choosing this option will make IMQ hook like this:
+
+ PREROUTING: After NAT
+ POSTROUTING: Before NAT
+
+ More information can be found at: www.linuximq.net
+
+ If not sure leave the default settings alone.
+
+config IMQ_BEHAVIOR_BA
+ bool "IMQ BA"
+ help
+ This settings defines how IMQ behaves in respect to its
+ hooking in PREROUTING and POSTROUTING.
+
+ Choosing this option will make IMQ hook like this:
+
+ PREROUTING: Before NAT
+ POSTROUTING: After NAT
+
+ More information can be found at: www.linuximq.net
+
+ If not sure leave the default settings alone.
+
+config IMQ_BEHAVIOR_BB
+ bool "IMQ BB"
+ help
+ This settings defines how IMQ behaves in respect to its
+ hooking in PREROUTING and POSTROUTING.
+
+ Choosing this option will make IMQ hook like this:
+
+ PREROUTING: Before NAT
+ POSTROUTING: Before NAT
+
+ More information can be found at: www.linuximq.net
+
+ If not sure leave the default settings alone.
+
+endchoice
+
+config IMQ_NUM_DEVS
+
+ int "Number of IMQ devices"
+ range 2 8
+ depends on IMQ
+ default "2"
+ help
+
+ This settings defines how many IMQ devices will be
+ created.
+
+ The default value is 2.
+
+ More information can be found at: www.linuximq.net
+
+ If not sure leave the default settings alone.
+
config TUN
tristate "Universal TUN/TAP device driver support"
select CRC32
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -124,6 +124,7 @@
obj-$(CONFIG_SLHC) += slhc.o
obj-$(CONFIG_DUMMY) += dummy.o
+obj-$(CONFIG_IMQ) += imq.o
obj-$(CONFIG_IFB) += ifb.o
obj-$(CONFIG_DE600) += de600.o
obj-$(CONFIG_DE620) += de620.o
--- /dev/null
+++ b/include/linux/imq.h
@@ -0,0 +1,9 @@
+#ifndef _IMQ_H
+#define _IMQ_H
+
+#define IMQ_MAX_DEVS 16
+
+#define IMQ_F_IFMASK 0x7f
+#define IMQ_F_ENQUEUE 0x80
+
+#endif /* _IMQ_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_IMQ.h
@@ -0,0 +1,8 @@
+#ifndef _IPT_IMQ_H
+#define _IPT_IMQ_H
+
+struct ipt_imq_info {
+ unsigned int todev; /* target imq device */
+};
+
+#endif /* _IPT_IMQ_H */
--- /dev/null
+++ b/include/linux/netfilter_ipv6/ip6t_IMQ.h
@@ -0,0 +1,8 @@
+#ifndef _IP6T_IMQ_H
+#define _IP6T_IMQ_H
+
+struct ip6t_imq_info {
+ unsigned int todev; /* target imq device */
+};
+
+#endif /* _IP6T_IMQ_H */
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -285,6 +285,10 @@
struct nf_conntrack *nfct;
struct sk_buff *nfct_reasm;
#endif
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+ unsigned char imq_flags;
+ struct nf_info *nf_info;
+#endif
#ifdef CONFIG_BRIDGE_NETFILTER
struct nf_bridge_info *nf_bridge;
#endif
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -94,6 +94,9 @@
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/rtnetlink.h>
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+#include <linux/imq.h>
+#endif
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/stat.h>
@@ -1404,6 +1407,9 @@
{
if (likely(!skb->next)) {
if (!list_empty(&ptype_all))
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+ if (!(skb->imq_flags & IMQ_F_ENQUEUE))
+#endif
dev_queue_xmit_nit(skb, dev);
if (netif_needs_gso(dev, skb)) {
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -419,6 +419,10 @@
C(pkt_type);
C(ip_summed);
C(priority);
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+ C(imq_flags);
+ C(nf_info);
+#endif /*CONFIG_IMQ*/
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
C(ipvs_property);
#endif
@@ -485,6 +489,10 @@
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
new->ipvs_property = old->ipvs_property;
#endif
+#if defined(CONFIG_IMQ) || defined(CONFIG_IMQ_MODULE)
+ new->imq_flags = old->imq_flags;
+ new->nf_info = old->nf_info;
+#endif /*CONFIG_IMQ*/
#ifdef CONFIG_NET_SCHED
#ifdef CONFIG_NET_CLS_ACT
new->tc_verd = old->tc_verd;
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_IMQ.c
@@ -0,0 +1,69 @@
+/*
+ * This target marks packets to be enqueued to an imq device
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <linux/netfilter_ipv4/ipt_IMQ.h>
+#include <linux/imq.h>
+
+static unsigned int imq_target(struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
+ const struct xt_target *target,
+ const void *targinfo)
+{
+ struct ipt_imq_info *mr = (struct ipt_imq_info*)targinfo;
+
+ (*pskb)->imq_flags = mr->todev | IMQ_F_ENQUEUE;
+
+ return XT_CONTINUE;
+}
+
+static int imq_checkentry(const char *tablename,
+ const void *e,
+ const struct xt_target *target,
+ void *targinfo,
+ unsigned int hook_mask)
+{
+ struct ipt_imq_info *mr;
+
+ mr = (struct ipt_imq_info*)targinfo;
+
+ if (mr->todev > IMQ_MAX_DEVS) {
+ printk(KERN_WARNING
+ "IMQ: invalid device specified, highest is %u\n",
+ IMQ_MAX_DEVS);
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct xt_target ipt_imq_reg = {
+ .name = "IMQ",
+ .family = AF_INET,
+ .target = imq_target,
+ .targetsize = sizeof(struct ipt_imq_info),
+ .checkentry = imq_checkentry,
+ .me = THIS_MODULE,
+ .table = "mangle"
+};
+
+static int __init init(void)
+{
+ return xt_register_target(&ipt_imq_reg);
+}
+
+static void __exit fini(void)
+{
+ xt_unregister_target(&ipt_imq_reg);
+}
+
+module_init(init);
+module_exit(fini);
+
+MODULE_AUTHOR("http://www.linuximq.net");
+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
+MODULE_LICENSE("GPL");
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -333,6 +333,17 @@
To compile it as a module, choose M here. If unsure, say N.
+config IP_NF_TARGET_IMQ
+ tristate "IMQ target support"
+ depends on IP_NF_MANGLE
+ help
+ This option adds a `IMQ' target which is used to specify if and
+ to which IMQ device packets should get enqueued/dequeued.
+
+ For more information visit: http://www.linuximq.net/
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config IP_NF_TARGET_TOS
tristate "TOS target support"
depends on IP_NF_MANGLE
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -57,6 +57,7 @@
obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
obj-$(CONFIG_IP_NF_TARGET_TOS) += ipt_TOS.o
obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
+obj-$(CONFIG_IP_NF_TARGET_IMQ) += ipt_IMQ.o
obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_IMQ.c
@@ -0,0 +1,69 @@
+/*
+ * This target marks packets to be enqueued to an imq device
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter_ipv6/ip6t_IMQ.h>
+#include <linux/imq.h>
+
+static unsigned int imq_target(struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
+ const struct xt_target *target,
+ const void *targinfo)
+{
+ struct ip6t_imq_info *mr = (struct ip6t_imq_info*)targinfo;
+
+ (*pskb)->imq_flags = mr->todev | IMQ_F_ENQUEUE;
+
+ return XT_CONTINUE;
+}
+
+static int imq_checkentry(const char *tablename,
+ const void *entry,
+ const struct xt_target *target,
+ void *targinfo,
+ unsigned int hook_mask)
+{
+ struct ip6t_imq_info *mr;
+
+ mr = (struct ip6t_imq_info*)targinfo;
+
+ if (mr->todev > IMQ_MAX_DEVS) {
+ printk(KERN_WARNING
+ "IMQ: invalid device specified, highest is %u\n",
+ IMQ_MAX_DEVS);
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct xt_target ip6t_imq_reg = {
+ .name = "IMQ",
+ .family = AF_INET6,
+ .target = imq_target,
+ .targetsize = sizeof(struct ip6t_imq_info),
+ .table = "mangle",
+ .checkentry = imq_checkentry,
+ .me = THIS_MODULE
+};
+
+static int __init init(void)
+{
+ return xt_register_target(&ip6t_imq_reg);
+}
+
+static void __exit fini(void)
+{
+ xt_unregister_target(&ip6t_imq_reg);
+}
+
+module_init(init);
+module_exit(fini);
+
+MODULE_AUTHOR("http://www.linuximq.net");
+MODULE_DESCRIPTION("Pseudo-driver for the intermediate queue device. See http://www.linuximq.net/ for more information.");
+MODULE_LICENSE("GPL");
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -173,6 +173,15 @@
To compile it as a module, choose M here. If unsure, say N.
+config IP6_NF_TARGET_IMQ
+ tristate "IMQ target support"
+ depends on IP6_NF_MANGLE
+ help
+ This option adds a `IMQ' target which is used to specify if and
+ to which imq device packets should get enqueued/dequeued.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config IP6_NF_TARGET_HL
tristate 'HL (hoplimit) target support'
depends on IP6_NF_MANGLE
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -13,6 +13,7 @@
obj-$(CONFIG_IP6_NF_MATCH_OWNER) += ip6t_owner.o
obj-$(CONFIG_IP6_NF_FILTER) += ip6table_filter.o
obj-$(CONFIG_IP6_NF_MANGLE) += ip6table_mangle.o
+obj-$(CONFIG_IP6_NF_TARGET_IMQ) += ip6t_IMQ.o
obj-$(CONFIG_IP6_NF_TARGET_HL) += ip6t_HL.o
obj-$(CONFIG_IP6_NF_QUEUE) += ip6_queue.o
obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -77,7 +77,6 @@
NOTE: Called under dev->queue_lock with locally disabled BH.
*/
-
static inline int qdisc_restart(struct net_device *dev)
{
struct Qdisc *q = dev->qdisc;
@@ -177,6 +176,11 @@
return q->q.qlen;
}
+int qdisc_restart1(struct net_device *dev)
+{
+ return qdisc_restart(dev);
+}
+
void __qdisc_run(struct net_device *dev)
{
do {
@@ -608,3 +612,4 @@
EXPORT_SYMBOL(qdisc_reset);
EXPORT_SYMBOL(qdisc_lock_tree);
EXPORT_SYMBOL(qdisc_unlock_tree);
+EXPORT_SYMBOL(qdisc_restart1);

View file

@ -1,948 +0,0 @@
--- /dev/null
+++ b/include/linux/netfilter_ipv4/ipt_ROUTE.h
@@ -0,0 +1,23 @@
+/* Header file for iptables ipt_ROUTE target
+ *
+ * (C) 2002 by Cédric de Launois <delaunois@info.ucl.ac.be>
+ *
+ * This software is distributed under GNU GPL v2, 1991
+ */
+#ifndef _IPT_ROUTE_H_target
+#define _IPT_ROUTE_H_target
+
+#define IPT_ROUTE_IFNAMSIZ 16
+
+struct ipt_route_target_info {
+ char oif[IPT_ROUTE_IFNAMSIZ]; /* Output Interface Name */
+ char iif[IPT_ROUTE_IFNAMSIZ]; /* Input Interface Name */
+ u_int32_t gw; /* IP address of gateway */
+ u_int8_t flags;
+};
+
+/* Values for "flags" field */
+#define IPT_ROUTE_CONTINUE 0x01
+#define IPT_ROUTE_TEE 0x02
+
+#endif /*_IPT_ROUTE_H_target*/
--- /dev/null
+++ b/include/linux/netfilter_ipv6/ip6t_ROUTE.h
@@ -0,0 +1,23 @@
+/* Header file for iptables ip6t_ROUTE target
+ *
+ * (C) 2003 by Cédric de Launois <delaunois@info.ucl.ac.be>
+ *
+ * This software is distributed under GNU GPL v2, 1991
+ */
+#ifndef _IPT_ROUTE_H_target
+#define _IPT_ROUTE_H_target
+
+#define IP6T_ROUTE_IFNAMSIZ 16
+
+struct ip6t_route_target_info {
+ char oif[IP6T_ROUTE_IFNAMSIZ]; /* Output Interface Name */
+ char iif[IP6T_ROUTE_IFNAMSIZ]; /* Input Interface Name */
+ u_int32_t gw[4]; /* IPv6 address of gateway */
+ u_int8_t flags;
+};
+
+/* Values for "flags" field */
+#define IP6T_ROUTE_CONTINUE 0x01
+#define IP6T_ROUTE_TEE 0x02
+
+#endif /*_IP6T_ROUTE_H_target*/
--- /dev/null
+++ b/net/ipv4/netfilter/ipt_ROUTE.c
@@ -0,0 +1,483 @@
+/*
+ * This implements the ROUTE target, which enables you to setup unusual
+ * routes not supported by the standard kernel routing table.
+ *
+ * Copyright (C) 2002 Cedric de Launois <delaunois@info.ucl.ac.be>
+ *
+ * v 1.11 2004/11/23
+ *
+ * This software is distributed under GNU GPL v2, 1991
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <linux/netfilter_ipv4/ipt_ROUTE.h>
+#include <linux/netdevice.h>
+#include <linux/route.h>
+#include <linux/version.h>
+#include <linux/if_arp.h>
+#include <net/ip.h>
+#include <net/route.h>
+#include <net/icmp.h>
+#include <net/checksum.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Cedric de Launois <delaunois@info.ucl.ac.be>");
+MODULE_DESCRIPTION("iptables ROUTE target module");
+
+/* Try to route the packet according to the routing keys specified in
+ * route_info. Keys are :
+ * - ifindex :
+ * 0 if no oif preferred,
+ * otherwise set to the index of the desired oif
+ * - route_info->gw :
+ * 0 if no gateway specified,
+ * otherwise set to the next host to which the pkt must be routed
+ * If success, skb->dev is the output device to which the packet must
+ * be sent and skb->dst is not NULL
+ *
+ * RETURN: -1 if an error occured
+ * 1 if the packet was succesfully routed to the
+ * destination desired
+ * 0 if the kernel routing table could not route the packet
+ * according to the keys specified
+ */
+static int route(struct sk_buff *skb,
+ unsigned int ifindex,
+ const struct ipt_route_target_info *route_info)
+{
+ int err;
+ struct rtable *rt;
+ struct iphdr *iph = ip_hdr(skb);
+ struct flowi fl = {
+ .oif = ifindex,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = iph->daddr,
+ .saddr = 0,
+ .tos = RT_TOS(iph->tos),
+ .scope = RT_SCOPE_UNIVERSE,
+ }
+ }
+ };
+
+ /* The destination address may be overloaded by the target */
+ if (route_info->gw)
+ fl.fl4_dst = route_info->gw;
+
+ /* Trying to route the packet using the standard routing table. */
+ if ((err = ip_route_output_key(&rt, &fl))) {
+ if (net_ratelimit())
+ DEBUGP("ipt_ROUTE: couldn't route pkt (err: %i)",err);
+ return -1;
+ }
+
+ /* Drop old route. */
+ dst_release(skb->dst);
+ skb->dst = NULL;
+
+ /* Success if no oif specified or if the oif correspond to the
+ * one desired */
+ if (!ifindex || rt->u.dst.dev->ifindex == ifindex) {
+ skb->dst = &rt->u.dst;
+ skb->dev = skb->dst->dev;
+ skb->protocol = htons(ETH_P_IP);
+ return 1;
+ }
+
+ /* The interface selected by the routing table is not the one
+ * specified by the user. This may happen because the dst address
+ * is one of our own addresses.
+ */
+ if (net_ratelimit())
+ DEBUGP("ipt_ROUTE: failed to route as desired gw=%u.%u.%u.%u oif=%i (got oif=%i)\n",
+ NIPQUAD(route_info->gw), ifindex, rt->u.dst.dev->ifindex);
+
+ return 0;
+}
+
+
+/* Stolen from ip_finish_output2
+ * PRE : skb->dev is set to the device we are leaving by
+ * skb->dst is not NULL
+ * POST: the packet is sent with the link layer header pushed
+ * the packet is destroyed
+ */
+static void ip_direct_send(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb->dst;
+ struct hh_cache *hh = dst->hh;
+ struct net_device *dev = dst->dev;
+ int hh_len = LL_RESERVED_SPACE(dev);
+
+ /* Be paranoid, rather than too clever. */
+ if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
+ struct sk_buff *skb2;
+
+ skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
+ if (skb2 == NULL) {
+ kfree_skb(skb);
+ return;
+ }
+ if (skb->sk)
+ skb_set_owner_w(skb2, skb->sk);
+ kfree_skb(skb);
+ skb = skb2;
+ }
+
+ if (hh) {
+ int hh_alen;
+
+ read_lock_bh(&hh->hh_lock);
+ hh_alen = HH_DATA_ALIGN(hh->hh_len);
+ memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
+ read_unlock_bh(&hh->hh_lock);
+ skb_push(skb, hh->hh_len);
+ hh->hh_output(skb);
+ } else if (dst->neighbour)
+ dst->neighbour->output(skb);
+ else {
+ if (net_ratelimit())
+ DEBUGP(KERN_DEBUG "ipt_ROUTE: no hdr & no neighbour cache!\n");
+ kfree_skb(skb);
+ }
+}
+
+
+/* PRE : skb->dev is set to the device we are leaving by
+ * POST: - the packet is directly sent to the skb->dev device, without
+ * pushing the link layer header.
+ * - the packet is destroyed
+ */
+static inline int dev_direct_send(struct sk_buff *skb)
+{
+ return dev_queue_xmit(skb);
+}
+
+
+static unsigned int route_oif(const struct ipt_route_target_info *route_info,
+ struct sk_buff *skb)
+{
+ unsigned int ifindex = 0;
+ struct net_device *dev_out = NULL;
+
+ /* The user set the interface name to use.
+ * Getting the current interface index.
+ */
+ if ((dev_out = dev_get_by_name(route_info->oif))) {
+ ifindex = dev_out->ifindex;
+ } else {
+ /* Unknown interface name : packet dropped */
+ if (net_ratelimit())
+ DEBUGP("ipt_ROUTE: oif interface %s not found\n", route_info->oif);
+ return NF_DROP;
+ }
+
+ /* Trying the standard way of routing packets */
+ switch (route(skb, ifindex, route_info)) {
+ case 1:
+ dev_put(dev_out);
+ if (route_info->flags & IPT_ROUTE_CONTINUE)
+ return IPT_CONTINUE;
+
+ ip_direct_send(skb);
+ return NF_STOLEN;
+
+ case 0:
+ /* Failed to send to oif. Trying the hard way */
+ if (route_info->flags & IPT_ROUTE_CONTINUE)
+ return NF_DROP;
+
+ if (net_ratelimit())
+ DEBUGP("ipt_ROUTE: forcing the use of %i\n",
+ ifindex);
+
+ /* We have to force the use of an interface.
+ * This interface must be a tunnel interface since
+ * otherwise we can't guess the hw address for
+ * the packet. For a tunnel interface, no hw address
+ * is needed.
+ */
+ if ((dev_out->type != ARPHRD_TUNNEL)
+ && (dev_out->type != ARPHRD_IPGRE)) {
+ if (net_ratelimit())
+ DEBUGP("ipt_ROUTE: can't guess the hw addr !\n");
+ dev_put(dev_out);
+ return NF_DROP;
+ }
+
+ /* Send the packet. This will also free skb
+ * Do not go through the POST_ROUTING hook because
+ * skb->dst is not set and because it will probably
+ * get confused by the destination IP address.
+ */
+ skb->dev = dev_out;
+ dev_direct_send(skb);
+ dev_put(dev_out);
+ return NF_STOLEN;
+
+ default:
+ /* Unexpected error */
+ dev_put(dev_out);
+ return NF_DROP;
+ }
+}
+
+
+static unsigned int route_iif(const struct ipt_route_target_info *route_info,
+ struct sk_buff *skb)
+{
+ struct net_device *dev_in = NULL;
+
+ /* Getting the current interface index. */
+ if (!(dev_in = dev_get_by_name(route_info->iif))) {
+ if (net_ratelimit())
+ DEBUGP("ipt_ROUTE: iif interface %s not found\n", route_info->iif);
+ return NF_DROP;
+ }
+
+ skb->dev = dev_in;
+ dst_release(skb->dst);
+ skb->dst = NULL;
+
+ netif_rx(skb);
+ dev_put(dev_in);
+ return NF_STOLEN;
+}
+
+
+static unsigned int route_gw(const struct ipt_route_target_info *route_info,
+ struct sk_buff *skb)
+{
+ if (route(skb, 0, route_info)!=1)
+ return NF_DROP;
+
+ if (route_info->flags & IPT_ROUTE_CONTINUE)
+ return IPT_CONTINUE;
+
+ ip_direct_send(skb);
+ return NF_STOLEN;
+}
+
+
+/* To detect and deter routed packet loopback when using the --tee option,
+ * we take a page out of the raw.patch book: on the copied skb, we set up
+ * a fake ->nfct entry, pointing to the local &route_tee_track. We skip
+ * routing packets when we see they already have that ->nfct.
+ */
+
+static struct nf_conn route_tee_track;
+
+static unsigned int ipt_route_target(struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_target *target,
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ const void *targinfo,
+ void *userinfo)
+#else
+ const void *targinfo)
+#endif
+{
+ const struct ipt_route_target_info *route_info = targinfo;
+ struct sk_buff *skb = *pskb;
+ unsigned int res;
+
+ if (skb->nfct == &route_tee_track.ct_general) {
+ /* Loopback - a packet we already routed, is to be
+ * routed another time. Avoid that, now.
+ */
+ if (net_ratelimit())
+ DEBUGP(KERN_DEBUG "ipt_ROUTE: loopback - DROP!\n");
+ return NF_DROP;
+ }
+
+ /* If we are at PREROUTING or INPUT hook
+ * the TTL isn't decreased by the IP stack
+ */
+ if (hooknum == NF_IP_PRE_ROUTING ||
+ hooknum == NF_IP_LOCAL_IN) {
+
+ struct iphdr *iph = ip_hdr(skb);
+
+ if (iph->ttl <= 1) {
+ struct rtable *rt;
+ struct flowi fl = {
+ .oif = 0,
+ .nl_u = {
+ .ip4_u = {
+ .daddr = iph->daddr,
+ .saddr = iph->saddr,
+ .tos = RT_TOS(iph->tos),
+ .scope = ((iph->tos & RTO_ONLINK) ?
+ RT_SCOPE_LINK :
+ RT_SCOPE_UNIVERSE)
+ }
+ }
+ };
+
+ if (ip_route_output_key(&rt, &fl)) {
+ return NF_DROP;
+ }
+
+ if (skb->dev == rt->u.dst.dev) {
+ /* Drop old route. */
+ dst_release(skb->dst);
+ skb->dst = &rt->u.dst;
+
+ /* this will traverse normal stack, and
+ * thus call conntrack on the icmp packet */
+ icmp_send(skb, ICMP_TIME_EXCEEDED,
+ ICMP_EXC_TTL, 0);
+ }
+
+ return NF_DROP;
+ }
+
+ /*
+ * If we are at INPUT the checksum must be recalculated since
+ * the length could change as the result of a defragmentation.
+ */
+ if(hooknum == NF_IP_LOCAL_IN) {
+ iph->ttl = iph->ttl - 1;
+ iph->check = 0;
+ iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+ } else {
+ ip_decrease_ttl(iph);
+ }
+ }
+
+ if ((route_info->flags & IPT_ROUTE_TEE)) {
+ /*
+ * Copy the *pskb, and route the copy. Will later return
+ * IPT_CONTINUE for the original skb, which should continue
+ * on its way as if nothing happened. The copy should be
+ * independantly delivered to the ROUTE --gw.
+ */
+ skb = skb_copy(*pskb, GFP_ATOMIC);
+ if (!skb) {
+ if (net_ratelimit())
+ DEBUGP(KERN_DEBUG "ipt_ROUTE: copy failed!\n");
+ return IPT_CONTINUE;
+ }
+ }
+
+ /* Tell conntrack to forget this packet since it may get confused
+ * when a packet is leaving with dst address == our address.
+ * Good idea ? Dunno. Need advice.
+ *
+ * NEW: mark the skb with our &route_tee_track, so we avoid looping
+ * on any already routed packet.
+ */
+ if (!(route_info->flags & IPT_ROUTE_CONTINUE)) {
+ nf_conntrack_put(skb->nfct);
+ skb->nfct = &route_tee_track.ct_general;
+ skb->nfctinfo = IP_CT_NEW;
+ nf_conntrack_get(skb->nfct);
+ }
+
+ if (route_info->oif[0] != '\0') {
+ res = route_oif(route_info, skb);
+ } else if (route_info->iif[0] != '\0') {
+ res = route_iif(route_info, skb);
+ } else if (route_info->gw) {
+ res = route_gw(route_info, skb);
+ } else {
+ if (net_ratelimit())
+ DEBUGP(KERN_DEBUG "ipt_ROUTE: no parameter !\n");
+ res = IPT_CONTINUE;
+ }
+
+ if ((route_info->flags & IPT_ROUTE_TEE))
+ res = IPT_CONTINUE;
+
+ return res;
+}
+
+
+static int ipt_route_checkentry(const char *tablename,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+ const void *e,
+#else
+ const struct ipt_ip *ip,
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_target *target,
+#endif
+ void *targinfo,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ unsigned int targinfosize,
+#endif
+ unsigned int hook_mask)
+{
+ if (strcmp(tablename, "mangle") != 0) {
+ printk("ipt_ROUTE: bad table `%s', use the `mangle' table.\n",
+ tablename);
+ return 0;
+ }
+
+ if (hook_mask & ~( (1 << NF_IP_PRE_ROUTING)
+ | (1 << NF_IP_LOCAL_IN)
+ | (1 << NF_IP_FORWARD)
+ | (1 << NF_IP_LOCAL_OUT)
+ | (1 << NF_IP_POST_ROUTING))) {
+ printk("ipt_ROUTE: bad hook\n");
+ return 0;
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ if (targinfosize != IPT_ALIGN(sizeof(struct ipt_route_target_info))) {
+ printk(KERN_WARNING "ipt_ROUTE: targinfosize %u != %Zu\n",
+ targinfosize,
+ IPT_ALIGN(sizeof(struct ipt_route_target_info)));
+ return 0;
+ }
+#endif
+
+ return 1;
+}
+
+
+static struct ipt_target ipt_route_reg = {
+ .name = "ROUTE",
+ .target = ipt_route_target,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ .targetsize = sizeof(struct ipt_route_target_info),
+#endif
+ .checkentry = ipt_route_checkentry,
+ .me = THIS_MODULE,
+};
+
+static int __init init(void)
+{
+ /* Set up fake conntrack (stolen from raw.patch):
+ - to never be deleted, not in any hashes */
+ atomic_set(&route_tee_track.ct_general.use, 1);
+ /* - and look it like as a confirmed connection */
+ set_bit(IPS_CONFIRMED_BIT, &route_tee_track.status);
+ /* Initialize fake conntrack so that NAT will skip it */
+ route_tee_track.status |= IPS_NAT_DONE_MASK;
+
+ return xt_register_target(&ipt_route_reg);
+}
+
+
+static void __exit fini(void)
+{
+ xt_unregister_target(&ipt_route_reg);
+}
+
+module_init(init);
+module_exit(fini);
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -552,5 +552,22 @@
To compile it as a module, choose M here. If unsure, say N.
+config IP_NF_TARGET_ROUTE
+ tristate 'ROUTE target support'
+ depends on IP_NF_MANGLE
+ help
+ This option adds a `ROUTE' target, which enables you to setup unusual
+ routes. For example, the ROUTE lets you route a received packet through
+ an interface or towards a host, even if the regular destination of the
+ packet is the router itself. The ROUTE target is also able to change the
+ incoming interface of a packet.
+
+ The target can be or not a final target. It has to be used inside the
+ mangle table.
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. The module will be called ipt_ROUTE.o.
+ If unsure, say `N'.
+
endmenu
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -60,6 +60,7 @@
obj-$(CONFIG_IP_NF_TARGET_IMQ) += ipt_IMQ.o
obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
obj-$(CONFIG_IP_NF_TARGET_REDIRECT) += ipt_REDIRECT.o
+obj-$(CONFIG_IP_NF_TARGET_ROUTE) += ipt_ROUTE.o
obj-$(CONFIG_IP_NF_TARGET_NETMAP) += ipt_NETMAP.o
obj-$(CONFIG_IP_NF_TARGET_SAME) += ipt_SAME.o
obj-$(CONFIG_IP_NF_TARGET_LOG) += ipt_LOG.o
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -154,6 +154,8 @@
.gc_thresh3 = 1024,
};
+EXPORT_SYMBOL(nd_tbl);
+
/* ND options */
struct ndisc_options {
struct nd_opt_hdr *nd_opt_array[__ND_OPT_ARRAY_MAX];
--- /dev/null
+++ b/net/ipv6/netfilter/ip6t_ROUTE.c
@@ -0,0 +1,330 @@
+/*
+ * This implements the ROUTE v6 target, which enables you to setup unusual
+ * routes not supported by the standard kernel routing table.
+ *
+ * Copyright (C) 2003 Cedric de Launois <delaunois@info.ucl.ac.be>
+ *
+ * v 1.1 2004/11/23
+ *
+ * This software is distributed under GNU GPL v2, 1991
+ */
+
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter_ipv6/ip6t_ROUTE.h>
+#include <linux/netdevice.h>
+#include <linux/version.h>
+#include <net/ipv6.h>
+#include <net/ndisc.h>
+#include <net/ip6_route.h>
+#include <linux/icmpv6.h>
+
+#if 1
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+#define NIP6(addr) \
+ ntohs((addr).s6_addr16[0]), \
+ ntohs((addr).s6_addr16[1]), \
+ ntohs((addr).s6_addr16[2]), \
+ ntohs((addr).s6_addr16[3]), \
+ ntohs((addr).s6_addr16[4]), \
+ ntohs((addr).s6_addr16[5]), \
+ ntohs((addr).s6_addr16[6]), \
+ ntohs((addr).s6_addr16[7])
+
+/* Route the packet according to the routing keys specified in
+ * route_info. Keys are :
+ * - ifindex :
+ * 0 if no oif preferred,
+ * otherwise set to the index of the desired oif
+ * - route_info->gw :
+ * 0 if no gateway specified,
+ * otherwise set to the next host to which the pkt must be routed
+ * If success, skb->dev is the output device to which the packet must
+ * be sent and skb->dst is not NULL
+ *
+ * RETURN: 1 if the packet was succesfully routed to the
+ * destination desired
+ * 0 if the kernel routing table could not route the packet
+ * according to the keys specified
+ */
+static int
+route6(struct sk_buff *skb,
+ unsigned int ifindex,
+ const struct ip6t_route_target_info *route_info)
+{
+ struct rt6_info *rt = NULL;
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct in6_addr *gw = (struct in6_addr*)&route_info->gw;
+
+ DEBUGP("ip6t_ROUTE: called with: ");
+ DEBUGP("DST=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(ipv6h->daddr));
+ DEBUGP("GATEWAY=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(*gw));
+ DEBUGP("OUT=%s\n", route_info->oif);
+
+ if (ipv6_addr_any(gw))
+ rt = rt6_lookup(&ipv6h->daddr, &ipv6h->saddr, ifindex, 1);
+ else
+ rt = rt6_lookup(gw, &ipv6h->saddr, ifindex, 1);
+
+ if (!rt)
+ goto no_route;
+
+ DEBUGP("ip6t_ROUTE: routing gives: ");
+ DEBUGP("DST=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(rt->rt6i_dst.addr));
+ DEBUGP("GATEWAY=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x ", NIP6(rt->rt6i_gateway));
+ DEBUGP("OUT=%s\n", rt->rt6i_dev->name);
+
+ if (ifindex && rt->rt6i_dev->ifindex!=ifindex)
+ goto wrong_route;
+
+ if (!rt->rt6i_nexthop) {
+ DEBUGP("ip6t_ROUTE: discovering neighbour\n");
+ rt->rt6i_nexthop = ndisc_get_neigh(rt->rt6i_dev, &rt->rt6i_dst.addr);
+ }
+
+ /* Drop old route. */
+ dst_release(skb->dst);
+ skb->dst = &rt->u.dst;
+ skb->dev = rt->rt6i_dev;
+ return 1;
+
+ wrong_route:
+ dst_release(&rt->u.dst);
+ no_route:
+ if (!net_ratelimit())
+ return 0;
+
+ printk("ip6t_ROUTE: no explicit route found ");
+ if (ifindex)
+ printk("via interface %s ", route_info->oif);
+ if (!ipv6_addr_any(gw))
+ printk("via gateway %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x", NIP6(*gw));
+ printk("\n");
+ return 0;
+}
+
+
+/* Stolen from ip6_output_finish
+ * PRE : skb->dev is set to the device we are leaving by
+ * skb->dst is not NULL
+ * POST: the packet is sent with the link layer header pushed
+ * the packet is destroyed
+ */
+static void ip_direct_send(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb->dst;
+ struct hh_cache *hh = dst->hh;
+
+ if (hh) {
+ read_lock_bh(&hh->hh_lock);
+ memcpy(skb->data - 16, hh->hh_data, 16);
+ read_unlock_bh(&hh->hh_lock);
+ skb_push(skb, hh->hh_len);
+ hh->hh_output(skb);
+ } else if (dst->neighbour)
+ dst->neighbour->output(skb);
+ else {
+ if (net_ratelimit())
+ DEBUGP(KERN_DEBUG "ip6t_ROUTE: no hdr & no neighbour cache!\n");
+ kfree_skb(skb);
+ }
+}
+
+
+static unsigned int
+route6_oif(const struct ip6t_route_target_info *route_info,
+ struct sk_buff *skb)
+{
+ unsigned int ifindex = 0;
+ struct net_device *dev_out = NULL;
+
+ /* The user set the interface name to use.
+ * Getting the current interface index.
+ */
+ if ((dev_out = dev_get_by_name(route_info->oif))) {
+ ifindex = dev_out->ifindex;
+ } else {
+ /* Unknown interface name : packet dropped */
+ if (net_ratelimit())
+ DEBUGP("ip6t_ROUTE: oif interface %s not found\n", route_info->oif);
+
+ if (route_info->flags & IP6T_ROUTE_CONTINUE)
+ return IP6T_CONTINUE;
+ else
+ return NF_DROP;
+ }
+
+ /* Trying the standard way of routing packets */
+ if (route6(skb, ifindex, route_info)) {
+ dev_put(dev_out);
+ if (route_info->flags & IP6T_ROUTE_CONTINUE)
+ return IP6T_CONTINUE;
+
+ ip_direct_send(skb);
+ return NF_STOLEN;
+ } else
+ return NF_DROP;
+}
+
+
+static unsigned int
+route6_gw(const struct ip6t_route_target_info *route_info,
+ struct sk_buff *skb)
+{
+ if (route6(skb, 0, route_info)) {
+ if (route_info->flags & IP6T_ROUTE_CONTINUE)
+ return IP6T_CONTINUE;
+
+ ip_direct_send(skb);
+ return NF_STOLEN;
+ } else
+ return NF_DROP;
+}
+
+
+static unsigned int
+ip6t_route_target(struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_target *target,
+#endif
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ const void *targinfo,
+ void *userinfo)
+#else
+ const void *targinfo)
+#endif
+{
+ const struct ip6t_route_target_info *route_info = targinfo;
+ struct sk_buff *skb = *pskb;
+ struct in6_addr *gw = (struct in6_addr*)&route_info->gw;
+ unsigned int res;
+
+ if (route_info->flags & IP6T_ROUTE_CONTINUE)
+ goto do_it;
+
+ /* If we are at PREROUTING or INPUT hook
+ * the TTL isn't decreased by the IP stack
+ */
+ if (hooknum == NF_IP6_PRE_ROUTING ||
+ hooknum == NF_IP6_LOCAL_IN) {
+
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
+ if (ipv6h->hop_limit <= 1) {
+ /* Force OUTPUT device used as source address */
+ skb->dev = skb->dst->dev;
+
+ icmpv6_send(skb, ICMPV6_TIME_EXCEED,
+ ICMPV6_EXC_HOPLIMIT, 0, skb->dev);
+
+ return NF_DROP;
+ }
+
+ ipv6h->hop_limit--;
+ }
+
+ if ((route_info->flags & IP6T_ROUTE_TEE)) {
+ /*
+ * Copy the *pskb, and route the copy. Will later return
+ * IP6T_CONTINUE for the original skb, which should continue
+ * on its way as if nothing happened. The copy should be
+ * independantly delivered to the ROUTE --gw.
+ */
+ skb = skb_copy(*pskb, GFP_ATOMIC);
+ if (!skb) {
+ if (net_ratelimit())
+ DEBUGP(KERN_DEBUG "ip6t_ROUTE: copy failed!\n");
+ return IP6T_CONTINUE;
+ }
+ }
+
+do_it:
+ if (route_info->oif[0]) {
+ res = route6_oif(route_info, skb);
+ } else if (!ipv6_addr_any(gw)) {
+ res = route6_gw(route_info, skb);
+ } else {
+ if (net_ratelimit())
+ DEBUGP(KERN_DEBUG "ip6t_ROUTE: no parameter !\n");
+ res = IP6T_CONTINUE;
+ }
+
+ if ((route_info->flags & IP6T_ROUTE_TEE))
+ res = IP6T_CONTINUE;
+
+ return res;
+}
+
+
+static int
+ip6t_route_checkentry(const char *tablename,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
+ const void *entry,
+#else
+ const struct ip6t_entry *entry
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ const struct xt_target *target,
+#endif
+ void *targinfo,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ unsigned int targinfosize,
+#endif
+ unsigned int hook_mask)
+{
+ if (strcmp(tablename, "mangle") != 0) {
+ printk("ip6t_ROUTE: can only be called from \"mangle\" table.\n");
+ return 0;
+ }
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
+ if (targinfosize != IP6T_ALIGN(sizeof(struct ip6t_route_target_info))) {
+ printk(KERN_WARNING "ip6t_ROUTE: targinfosize %u != %Zu\n",
+ targinfosize,
+ IP6T_ALIGN(sizeof(struct ip6t_route_target_info)));
+ return 0;
+ }
+#endif
+
+ return 1;
+}
+
+
+static struct ip6t_target ip6t_route_reg = {
+ .name = "ROUTE",
+ .target = ip6t_route_target,
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
+ .targetsize = sizeof(struct ip6t_route_target_info),
+#endif
+ .checkentry = ip6t_route_checkentry,
+ .me = THIS_MODULE
+};
+
+
+static int __init init(void)
+{
+ printk(KERN_DEBUG "registering ipv6 ROUTE target\n");
+ if (xt_register_target(&ip6t_route_reg))
+ return -EINVAL;
+
+ return 0;
+}
+
+
+static void __exit fini(void)
+{
+ xt_unregister_target(&ip6t_route_reg);
+}
+
+module_init(init);
+module_exit(fini);
+MODULE_LICENSE("GPL");
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -209,5 +209,18 @@
If you want to compile it as a module, say M here and read
<file:Documentation/kbuild/modules.txt>. If unsure, say `N'.
+config IP6_NF_TARGET_ROUTE
+ tristate 'ROUTE target support'
+ depends on IP6_NF_MANGLE
+ help
+ This option adds a `ROUTE' target, which enables you to setup unusual
+ routes. The ROUTE target is also able to change the incoming interface
+ of a packet.
+
+ The target can be or not a final target. It has to be used inside the
+ mangle table.
+
+ Not working as a module.
+
endmenu
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -20,6 +20,7 @@
obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
+obj-$(CONFIG_IP6_NF_TARGET_ROUTE) += ip6t_ROUTE.o
obj-$(CONFIG_IP6_NF_MATCH_MH) += ip6t_mh.o
# objects for l3 independent conntrack

View file

@ -1,844 +0,0 @@
--- /dev/null
+++ b/include/linux/netfilter/oot_conntrack.h
@@ -0,0 +1,5 @@
+#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
+# include <linux/netfilter_ipv4/ip_conntrack.h>
+#else /* linux-2.6.20+ */
+# include <net/netfilter/nf_nat_rule.h>
+#endif
--- /dev/null
+++ b/include/linux/netfilter/oot_trans.h
@@ -0,0 +1,14 @@
+/* Out of tree workarounds */
+#include <linux/version.h>
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 18)
+# define HAVE_MATCHINFOSIZE 1
+# define HAVE_TARGUSERINFO 1
+# define HAVE_TARGINFOSIZE 1
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20)
+# define nfmark mark
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21)
+# define tcp_v4_check(tcph, tcph_sz, s, d, csp) \
+ tcp_v4_check((tcph_sz), (s), (d), (csp))
+#endif
--- /dev/null
+++ b/include/linux/netfilter/xt_CHAOS.h
@@ -0,0 +1,14 @@
+#ifndef _LINUX_NETFILTER_XT_CHAOS_H
+#define _LINUX_NETFILTER_XT_CHAOS_H 1
+
+enum xt_chaos_target_variant {
+ XTCHAOS_NORMAL,
+ XTCHAOS_TARPIT,
+ XTCHAOS_DELUDE,
+};
+
+struct xt_chaos_target_info {
+ uint8_t variant;
+};
+
+#endif /* _LINUX_NETFILTER_XT_CHAOS_H */
--- /dev/null
+++ b/include/linux/netfilter/xt_portscan.h
@@ -0,0 +1,8 @@
+#ifndef _LINUX_NETFILTER_XT_PORTSCAN_H
+#define _LINUX_NETFILTER_XT_PORTSCAN_H 1
+
+struct xt_portscan_match_info {
+ uint8_t match_stealth, match_syn, match_cn, match_gr;
+};
+
+#endif /* _LINUX_NETFILTER_XT_PORTSCAN_H */
--- /dev/null
+++ b/net/netfilter/find_match.c
@@ -0,0 +1,39 @@
+/*
+ xt_request_find_match
+ by Jan Engelhardt <jengelh [at] gmx de>, 2006 - 2007
+
+ Based upon linux-2.6.18.5/net/netfilter/x_tables.c:
+ Copyright (C) 2006-2006 Harald Welte <laforge@netfilter.org>
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License version 2 as
+ published by the Free Software Foundation.
+*/
+#include <linux/err.h>
+#include <linux/netfilter_arp.h>
+#include <linux/socket.h>
+#include <linux/netfilter/x_tables.h>
+
+/*
+ * Yeah this code is sub-optimal, but the function is missing in
+ * mainline so far. -jengelh
+ */
+static struct xt_match *xt_request_find_match_lo(int af, const char *name,
+ u8 revision)
+{
+ static const char *const xt_prefix[] = {
+ [AF_INET] = "ip",
+ [AF_INET6] = "ip6",
+ [NF_ARP] = "arp",
+ };
+ struct xt_match *match;
+
+ match = try_then_request_module(xt_find_match(af, name, revision),
+ "%st_%s", xt_prefix[af], name);
+ if (IS_ERR(match) || match == NULL)
+ return NULL;
+
+ return match;
+}
+
+/* In case it goes into mainline, let this out-of-tree package compile */
+#define xt_request_find_match xt_request_find_match_lo
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -255,6 +255,14 @@
# alphabetically ordered list of targets
+config NETFILTER_XT_TARGET_CHAOS
+ tristate '"CHAOS" target support'
+ depends on NETFILTER_XTABLES
+ help
+ This option adds a `CHAOS' target.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_TARGET_CLASSIFY
tristate '"CLASSIFY" target support'
depends on NETFILTER_XTABLES
@@ -282,6 +290,14 @@
<file:Documentation/kbuild/modules.txt>. The module will be called
ipt_CONNMARK.ko. If unsure, say `N'.
+config NETFILTER_XT_TARGET_DELUDE
+ tristate '"DELUDE" target support'
+ depends on NETFILTER_XTABLES
+ help
+ This option adds a `DELUDE' target.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_TARGET_DSCP
tristate '"DSCP" target support'
depends on NETFILTER_XTABLES
@@ -526,6 +542,14 @@
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_MATCH_PORTSCAN
+ tristate '"portscan" match support'
+ depends on NETFILTER_XTABLES
+ help
+ This option adds a 'portscan' match support.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
config NETFILTER_XT_MATCH_MULTIPORT
tristate "Multiple port match support"
depends on NETFILTER_XTABLES
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -47,6 +47,8 @@
obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_CHAOS) += xt_CHAOS.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_DELUDE) += xt_DELUDE.o
# matches
obj-$(CONFIG_NETFILTER_XT_MATCH_COMMENT) += xt_comment.o
@@ -74,3 +76,4 @@
obj-$(CONFIG_NETFILTER_XT_MATCH_TCPMSS) += xt_tcpmss.o
obj-$(CONFIG_NETFILTER_XT_MATCH_PHYSDEV) += xt_physdev.o
obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
+obj-$(CONFIG_NETFILTER_XT_MATCH_PORTSCAN) += xt_portscan.o
--- /dev/null
+++ b/net/netfilter/xt_CHAOS.c
@@ -0,0 +1,200 @@
+/*
+ * CHAOS target for netfilter
+ * Copyright © CC Computer Consultants GmbH, 2006 - 2007
+ * Contact: Jan Engelhardt <jengelh@computergmbh.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License; either version
+ * 2 or 3 as published by the Free Software Foundation.
+ */
+#include <linux/icmp.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/stat.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_tcpudp.h>
+#include <linux/netfilter_ipv4/ipt_REJECT.h>
+#include <net/ip.h>
+#if defined(_LOCAL)
+# include "xt_CHAOS.h"
+# include "find_match.c"
+#elif defined(CONFIG_NETFILTER_XT_TARGET_CHAOS) || \
+ defined(CONFIG_NETFILTER_XT_TARGET_CHAOS_MODULE)
+# include <linux/netfilter/xt_CHAOS.h>
+# include "find_match.c"
+#else
+# include "xt_CHAOS.h"
+# include "find_match.c"
+#endif
+#define PFX KBUILD_MODNAME ": "
+
+/* Module parameters */
+static unsigned int reject_percentage = ~0U * .01;
+static unsigned int delude_percentage = ~0U * .0101;
+module_param(reject_percentage, uint, S_IRUGO | S_IWUSR);
+module_param(delude_percentage, uint, S_IRUGO | S_IWUSR);
+
+/* References to other matches/targets */
+static struct xt_match *xm_tcp;
+static struct xt_target *xt_delude, *xt_reject, *xt_tarpit;
+
+static int have_delude, have_tarpit;
+
+/* Static data for other matches/targets */
+static const struct ipt_reject_info reject_params = {
+ .with = ICMP_HOST_UNREACH,
+};
+
+static const struct xt_tcp tcp_params = {
+ .spts = {0, ~0},
+ .dpts = {0, ~0},
+};
+
+/* CHAOS functions */
+static void xt_chaos_total(const struct xt_chaos_target_info *info,
+ struct sk_buff **pskb, const struct net_device *in,
+ const struct net_device *out, unsigned int hooknum)
+{
+ const struct iphdr *iph = ip_hdr(*pskb);
+ const int protoff = 4 * iph->ihl;
+ const int offset = ntohs(iph->frag_off) & IP_OFFSET;
+ const struct xt_target *destiny;
+ int hotdrop = 0, ret;
+
+ ret = xm_tcp->match(*pskb, in, out, xm_tcp, &tcp_params,
+ offset, protoff, &hotdrop);
+ if (!ret || hotdrop || (unsigned int)net_random() > delude_percentage)
+ return;
+
+ destiny = (info->variant == XTCHAOS_TARPIT) ? xt_tarpit : xt_delude;
+ destiny->target(pskb, in, out, hooknum, destiny, NULL);
+ return;
+}
+
+static unsigned int chaos_tg(struct sk_buff **pskb,
+ const struct net_device *in, const struct net_device *out,
+ unsigned int hooknum, const struct xt_target *target, const void *targinfo)
+{
+ /*
+ * Equivalent to:
+ * -A chaos -m statistic --mode random --probability \
+ * $reject_percentage -j REJECT --reject-with host-unreach;
+ * -A chaos -p tcp -m statistic --mode random --probability \
+ * $delude_percentage -j DELUDE;
+ * -A chaos -j DROP;
+ */
+ const struct xt_chaos_target_info *info = targinfo;
+ const struct iphdr *iph = ip_hdr(*pskb);
+
+ if ((unsigned int)net_random() <= reject_percentage)
+ return xt_reject->target(pskb, in, out, hooknum, target,
+ &reject_params);
+
+ /* TARPIT/DELUDE may not be called from the OUTPUT chain */
+ if (iph->protocol == IPPROTO_TCP &&
+ info->variant != XTCHAOS_NORMAL && hooknum != NF_IP_LOCAL_OUT)
+ xt_chaos_total(info, pskb, in, out, hooknum);
+
+ return NF_DROP;
+}
+
+static int chaos_tg_check(const char *tablename, const void *entry,
+ const struct xt_target *target, void *targinfo, unsigned int hook_mask)
+{
+ const struct xt_chaos_target_info *info = targinfo;
+
+ if (info->variant == XTCHAOS_DELUDE && !have_delude) {
+ printk(KERN_WARNING PFX "Error: Cannot use --delude when "
+ "DELUDE module not available\n");
+ return false;
+ }
+ if (info->variant == XTCHAOS_TARPIT && !have_tarpit) {
+ printk(KERN_WARNING PFX "Error: Cannot use --tarpit when "
+ "TARPIT module not available\n");
+ return false;
+ }
+
+ return true;
+}
+
+static struct xt_target chaos_tg_reg = {
+ .name = "CHAOS",
+ .family = AF_INET,
+ .table = "filter",
+ .hooks = (1 << NF_IP_LOCAL_IN) | (1 << NF_IP_FORWARD) |
+ (1 << NF_IP_LOCAL_OUT),
+ .checkentry = chaos_tg_check,
+ .target = chaos_tg,
+ .targetsize = sizeof(struct xt_chaos_target_info),
+ .me = THIS_MODULE,
+};
+
+static int __init chaos_tg_init(void)
+{
+ int ret = -EINVAL;
+
+ xm_tcp = xt_request_find_match(AF_INET, "tcp", 0);
+ if (xm_tcp == NULL) {
+ printk(KERN_WARNING PFX "Error: Could not find or load "
+ "\"tcp\" match\n");
+ return -EINVAL;
+ }
+
+ xt_reject = xt_request_find_target(AF_INET, "REJECT", 0);
+ if (xt_reject == NULL) {
+ printk(KERN_WARNING PFX "Error: Could not find or load "
+ "\"REJECT\" target\n");
+ goto out2;
+ }
+
+ xt_tarpit = xt_request_find_target(AF_INET, "TARPIT", 0);
+ have_tarpit = xt_tarpit != NULL;
+ if (!have_tarpit)
+ printk(KERN_WARNING PFX "Warning: Could not find or load "
+ "\"TARPIT\" target\n");
+
+ xt_delude = xt_request_find_target(AF_INET, "DELUDE", 0);
+ have_delude = xt_delude != NULL;
+ if (!have_delude)
+ printk(KERN_WARNING PFX "Warning: Could not find or load "
+ "\"DELUDE\" target\n");
+
+ if ((ret = xt_register_target(&chaos_tg_reg)) != 0) {
+ printk(KERN_WARNING PFX "xt_register_target returned "
+ "error %d\n", ret);
+ goto out3;
+ }
+
+ return 0;
+
+ out3:
+ if (have_delude)
+ module_put(xt_delude->me);
+ if (have_tarpit)
+ module_put(xt_tarpit->me);
+ module_put(xt_reject->me);
+ out2:
+ module_put(xm_tcp->me);
+ return ret;
+}
+
+static void __exit chaos_tg_exit(void)
+{
+ xt_unregister_target(&chaos_tg_reg);
+ module_put(xm_tcp->me);
+ module_put(xt_reject->me);
+ if (have_delude)
+ module_put(xt_delude->me);
+ if (have_tarpit)
+ module_put(xt_tarpit->me);
+ return;
+}
+
+module_init(chaos_tg_init);
+module_exit(chaos_tg_exit);
+MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
+MODULE_DESCRIPTION("netfilter \"CHAOS\" target");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_CHAOS");
--- /dev/null
+++ b/net/netfilter/xt_DELUDE.c
@@ -0,0 +1,197 @@
+/*
+ * DELUDE target
+ * Copyright © CC Computer Consultants GmbH, 2007
+ * Contact: Jan Engelhardt <jengelh@computergmbh.de>
+ *
+ * Based upon linux-2.6.18.5/net/ipv4/netfilter/ipt_REJECT.c:
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * xt_DELUDE acts like REJECT, but does reply with SYN-ACK on SYN.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/netfilter/x_tables.h>
+#ifdef CONFIG_BRIDGE_NETFILTER
+# include <linux/netfilter_bridge.h>
+#endif
+#include <net/tcp.h>
+#define PFX KBUILD_MODNAME ": "
+
+static void delude_send_reset(struct sk_buff *oldskb, unsigned int hook)
+{
+ struct tcphdr _otcph, *oth, *tcph;
+ unsigned int addr_type;
+ struct sk_buff *nskb;
+ u_int16_t tmp_port;
+ u_int32_t tmp_addr;
+ struct iphdr *niph;
+ bool needs_ack;
+
+ /* IP header checks: fragment. */
+ if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
+ return;
+
+ oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
+ sizeof(_otcph), &_otcph);
+ if (oth == NULL)
+ return;
+
+ /* No RST for RST. */
+ if (oth->rst)
+ return;
+
+ /* Check checksum */
+ if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
+ return;
+
+ /* We need a linear, writeable skb. We also need to expand
+ headroom in case hh_len of incoming interface < hh_len of
+ outgoing interface */
+ nskb = skb_copy_expand(oldskb, LL_MAX_HEADER, skb_tailroom(oldskb),
+ GFP_ATOMIC);
+ if (!nskb)
+ return;
+
+ /* This packet will not be the same as the other: clear nf fields */
+ nf_reset(nskb);
+ nskb->mark = 0;
+ skb_init_secmark(nskb);
+
+ skb_shinfo(nskb)->gso_size = 0;
+ skb_shinfo(nskb)->gso_segs = 0;
+ skb_shinfo(nskb)->gso_type = 0;
+
+ tcph = (struct tcphdr *)(skb_network_header(nskb) + ip_hdrlen(nskb));
+
+ /* Swap source and dest */
+ niph = ip_hdr(nskb);
+ tmp_addr = niph->saddr;
+ niph->saddr = niph->daddr;
+ niph->daddr = tmp_addr;
+ tmp_port = tcph->source;
+ tcph->source = tcph->dest;
+ tcph->dest = tmp_port;
+
+ /* Truncate to length (no data) */
+ tcph->doff = sizeof(struct tcphdr) / 4;
+ skb_trim(nskb, ip_hdrlen(nskb) + sizeof(struct tcphdr));
+ niph->tot_len = htons(nskb->len);
+
+ if (oth->syn && !oth->ack && !oth->rst && !oth->fin) {
+ /* DELUDE essential part */
+ tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
+ oldskb->len - ip_hdrlen(oldskb) -
+ (oth->doff << 2));
+ tcph->seq = false;
+ tcph->ack = true;
+ } else {
+ if (!tcph->ack) {
+ needs_ack = true;
+ tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn +
+ oth->fin + oldskb->len -
+ ip_hdrlen(oldskb) - (oth->doff<<2));
+ tcph->seq = false;
+ } else {
+ needs_ack = false;
+ tcph->seq = oth->ack_seq;
+ tcph->ack_seq = false;
+ }
+
+ /* Reset flags */
+ ((u_int8_t *)tcph)[13] = 0;
+ tcph->rst = true;
+ tcph->ack = needs_ack;
+ }
+
+ tcph->window = 0;
+ tcph->urg_ptr = 0;
+
+ /* Adjust TCP checksum */
+ tcph->check = 0;
+ tcph->check = tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
+ niph->daddr, csum_partial((char *)tcph,
+ sizeof(struct tcphdr), 0));
+
+ /* Set DF, id = 0 */
+ niph->frag_off = htons(IP_DF);
+ niph->id = 0;
+
+ addr_type = RTN_UNSPEC;
+#ifdef CONFIG_BRIDGE_NETFILTER
+ if (hook != NF_IP_FORWARD || (nskb->nf_bridge != NULL &&
+ nskb->nf_bridge->mask & BRNF_BRIDGED))
+#else
+ if (hook != NF_IP_FORWARD)
+#endif
+ addr_type = RTN_LOCAL;
+
+ if (ip_route_me_harder(&nskb, addr_type))
+ goto free_nskb;
+
+ nskb->ip_summed = CHECKSUM_NONE;
+
+ /* Adjust IP TTL */
+ niph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT);
+
+ /* Adjust IP checksum */
+ niph->check = 0;
+ niph->check = ip_fast_csum(skb_network_header(nskb), niph->ihl);
+
+ /* "Never happens" */
+ if (nskb->len > dst_mtu(nskb->dst))
+ goto free_nskb;
+
+ nf_ct_attach(nskb, oldskb);
+
+ NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
+ dst_output);
+ return;
+
+ free_nskb:
+ kfree_skb(nskb);
+}
+
+static unsigned int delude_tg(struct sk_buff **pskb,
+ const struct net_device *in, const struct net_device *out,
+ unsigned int hooknum, const struct xt_target *target, const void *targinfo)
+{
+ /* WARNING: This code causes reentry within iptables.
+ This means that the iptables jump stack is now crap. We
+ must return an absolute verdict. --RR */
+ delude_send_reset(*pskb, hooknum);
+ return NF_DROP;
+}
+
+static struct xt_target delude_tg_reg = {
+ .name = "DELUDE",
+ .family = AF_INET,
+ .table = "filter",
+ .hooks = (1 << NF_IP_LOCAL_IN) | (1 << NF_IP_FORWARD),
+ .target = delude_tg,
+ .proto = IPPROTO_TCP,
+ .me = THIS_MODULE,
+};
+
+static int __init delude_tg_init(void)
+{
+ return xt_register_target(&delude_tg_reg);
+}
+
+static void __exit delude_tg_exit(void)
+{
+ xt_unregister_target(&delude_tg_reg);
+}
+
+module_init(delude_tg_init);
+module_exit(delude_tg_exit);
+MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
+MODULE_DESCRIPTION("netfilter \"DELUDE\" target");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_DELUDE");
--- /dev/null
+++ b/net/netfilter/xt_portscan.c
@@ -0,0 +1,269 @@
+/*
+ * portscan match for netfilter
+ * Copyright © CC Computer Consultants GmbH, 2006 - 2007
+ * Contact: Jan Engelhardt <jengelh@computergmbh.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License; either version
+ * 2 or 3 as published by the Free Software Foundation.
+ */
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/skbuff.h>
+#include <linux/stat.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/netfilter/x_tables.h>
+#include <linux/netfilter/xt_tcpudp.h>
+#include <net/netfilter/nf_nat_rule.h>
+#if defined(_LOCAL)
+# include "xt_portscan.h"
+#elif defined(CONFIG_NETFILTER_XT_MATCH_PORTSCAN) || \
+ defined(CONFIG_NETFILTER_XT_MATCH_PORTSCAN_MODULE)
+# include <linux/netfilter/xt_portscan.h>
+#else
+# include "xt_portscan.h"
+#endif
+#define PFX KBUILD_MODNAME ": "
+
+enum {
+ TCP_FLAGS_ALL3 = TCP_FLAG_FIN | TCP_FLAG_RST | TCP_FLAG_SYN,
+ TCP_FLAGS_ALL4 = TCP_FLAGS_ALL3 | TCP_FLAG_ACK,
+ TCP_FLAGS_ALL6 = TCP_FLAGS_ALL4 | TCP_FLAG_PSH | TCP_FLAG_URG,
+};
+
+/* Module parameters */
+static unsigned int
+ connmark_mask = ~0,
+ packet_mask = ~0,
+ mark_seen = 0x9,
+ mark_synrcv = 0x1,
+ mark_closed = 0x2,
+ mark_synscan = 0x3,
+ mark_estab1 = 0x4,
+ mark_estab2 = 0x5,
+ mark_cnscan = 0x6,
+ mark_grscan = 0x7,
+ mark_valid = 0x8;
+
+module_param(connmark_mask, uint, S_IRUGO | S_IWUSR);
+module_param(packet_mask, uint, S_IRUGO | S_IWUSR);
+module_param(mark_seen, uint, S_IRUGO | S_IWUSR);
+module_param(mark_synrcv, uint, S_IRUGO | S_IWUSR);
+module_param(mark_closed, uint, S_IRUGO | S_IWUSR);
+module_param(mark_synscan, uint, S_IRUGO | S_IWUSR);
+module_param(mark_estab1, uint, S_IRUGO | S_IWUSR);
+module_param(mark_estab2, uint, S_IRUGO | S_IWUSR);
+module_param(mark_cnscan, uint, S_IRUGO | S_IWUSR);
+module_param(mark_grscan, uint, S_IRUGO | S_IWUSR);
+module_param(mark_valid, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(connmark_mask, "only set specified bits in connection mark");
+MODULE_PARM_DESC(packet_mask, "only set specified bits in packet mark");
+MODULE_PARM_DESC(mark_seen, "nfmark value for packet-seen state");
+MODULE_PARM_DESC(mark_synrcv, "connmark value for SYN Received state");
+MODULE_PARM_DESC(mark_closed, "connmark value for closed state");
+MODULE_PARM_DESC(mark_synscan, "connmark value for SYN Scan state");
+MODULE_PARM_DESC(mark_estab1, "connmark value for Established-1 state");
+MODULE_PARM_DESC(mark_estab2, "connmark value for Established-2 state");
+MODULE_PARM_DESC(mark_cnscan, "connmark value for Connect Scan state");
+MODULE_PARM_DESC(mark_grscan, "connmark value for Grab Scan state");
+MODULE_PARM_DESC(mark_valid, "connmark value for Valid state");
+
+/* TCP flag functions */
+static inline bool tflg_ack4(const struct tcphdr *th)
+{
+ return (tcp_flag_word(th) & TCP_FLAGS_ALL4) == TCP_FLAG_ACK;
+}
+
+static inline bool tflg_ack6(const struct tcphdr *th)
+{
+ return (tcp_flag_word(th) & TCP_FLAGS_ALL6) == TCP_FLAG_ACK;
+}
+
+static inline bool tflg_fin(const struct tcphdr *th)
+{
+ return (tcp_flag_word(th) & TCP_FLAGS_ALL3) == TCP_FLAG_FIN;
+}
+
+static inline bool tflg_rst(const struct tcphdr *th)
+{
+ return (tcp_flag_word(th) & TCP_FLAGS_ALL3) == TCP_FLAG_RST;
+}
+
+static inline bool tflg_rstack(const struct tcphdr *th)
+{
+ return (tcp_flag_word(th) & TCP_FLAGS_ALL4) ==
+ (TCP_FLAG_ACK | TCP_FLAG_RST);
+}
+
+static inline bool tflg_syn(const struct tcphdr *th)
+{
+ return (tcp_flag_word(th) & TCP_FLAGS_ALL4) == TCP_FLAG_SYN;
+}
+
+static inline bool tflg_synack(const struct tcphdr *th)
+{
+ return (tcp_flag_word(th) & TCP_FLAGS_ALL4) ==
+ (TCP_FLAG_SYN | TCP_FLAG_ACK);
+}
+
+/* portscan functions */
+static inline bool portscan_mt_stealth(const struct tcphdr *th)
+{
+ /*
+ * "Connection refused" replies to our own probes must not be matched.
+ */
+ if (tflg_rstack(th))
+ return false;
+
+ if (tflg_rst(th) && printk_ratelimit()) {
+ printk(KERN_WARNING PFX "Warning: Pure RST received\n");
+ return false;
+ }
+
+ /*
+ * -p tcp ! --syn -m conntrack --ctstate INVALID: Looking for non-start
+ * packets that are not associated with any connection -- this will
+ * match most scan types (NULL, XMAS, FIN) and ridiculous flag
+ * combinations (SYN-RST, SYN-FIN, SYN-FIN-RST, FIN-RST, etc.).
+ */
+ return !tflg_syn(th);
+}
+
+static inline unsigned int portscan_mt_full(int mark,
+ enum ip_conntrack_info ctstate, bool loopback, const struct tcphdr *tcph,
+ unsigned int payload_len)
+{
+ if (mark == mark_estab2) {
+ /*
+ * -m connmark --mark $ESTAB2
+ */
+ if (tflg_ack4(tcph) && payload_len == 0)
+ return mark; /* keep mark */
+ else if (tflg_rst(tcph) || tflg_fin(tcph))
+ return mark_grscan;
+ else
+ return mark_valid;
+ } else if (mark == mark_estab1) {
+ /*
+ * -m connmark --mark $ESTAB1
+ */
+ if (tflg_rst(tcph) || tflg_fin(tcph))
+ return mark_cnscan;
+ else if (!loopback && tflg_ack4(tcph) && payload_len == 0)
+ return mark_estab2;
+ else
+ return mark_valid;
+ } else if (mark == mark_synrcv) {
+ /*
+ * -m connmark --mark $SYN
+ */
+ if (loopback && tflg_synack(tcph))
+ return mark; /* keep mark */
+ else if (loopback && tflg_rstack(tcph))
+ return mark_closed;
+ else if (tflg_ack6(tcph))
+ return mark_estab1;
+ else
+ return mark_synscan;
+ } else if (ctstate == IP_CT_NEW && tflg_syn(tcph)) {
+ /*
+ * -p tcp --syn --ctstate NEW
+ */
+ return mark_synrcv;
+ }
+ return mark;
+}
+
+static int portscan_mt(const struct sk_buff *skb,
+ const struct net_device *in, const struct net_device *out,
+ const struct xt_match *match, const void *matchinfo, int offset,
+ unsigned int protoff, int *hotdrop)
+{
+ const struct xt_portscan_match_info *info = matchinfo;
+ enum ip_conntrack_info ctstate;
+ const struct tcphdr *tcph;
+ struct nf_conn *ctdata;
+ struct tcphdr tcph_buf;
+
+ tcph = skb_header_pointer(skb, protoff, sizeof(tcph_buf), &tcph_buf);
+ if (tcph == NULL)
+ return false;
+
+ /* Check for invalid packets: -m conntrack --ctstate INVALID */
+ if ((ctdata = nf_ct_get(skb, &ctstate)) == NULL) {
+ if (info->match_stealth)
+ return portscan_mt_stealth(tcph);
+ /*
+ * If @ctdata is NULL, we cannot match the other scan
+ * types, return.
+ */
+ return false;
+ }
+
+ /*
+ * If -m portscan was previously applied to this packet, the rules we
+ * simulate must not be run through again. And for speedup, do not call
+ * it either when the connection is already VALID.
+ */
+ if ((ctdata->mark & connmark_mask) == mark_valid ||
+ (skb->mark & packet_mask) != mark_seen) {
+ unsigned int n;
+
+ n = portscan_mt_full(ctdata->mark & connmark_mask, ctstate,
+ in == &loopback_dev, tcph,
+ skb->len - protoff - 4 * tcph->doff);
+
+ ctdata->mark = (ctdata->mark & ~connmark_mask) | n;
+ ((struct sk_buff *)skb)->mark =
+ (skb->mark & ~packet_mask) ^ mark_seen;
+ }
+
+ return (info->match_syn && ctdata->mark == mark_synscan) ||
+ (info->match_cn && ctdata->mark == mark_cnscan) ||
+ (info->match_gr && ctdata->mark == mark_grscan);
+}
+
+static int portscan_mt_check(const char *tablename, const void *entry,
+ const struct xt_match *match, void *matchinfo, unsigned int hook_mask)
+{
+ const struct xt_portscan_match_info *info = matchinfo;
+
+ if ((info->match_stealth & ~1) || (info->match_syn & ~1) ||
+ (info->match_cn & ~1) || (info->match_gr & ~1)) {
+ printk(KERN_WARNING PFX "Invalid flags\n");
+ return false;
+ }
+ return true;
+}
+
+static struct xt_match portscan_mt_reg __read_mostly = {
+ .name = "portscan",
+ .family = AF_INET,
+ .match = portscan_mt,
+ .checkentry = portscan_mt_check,
+ .matchsize = sizeof(struct xt_portscan_match_info),
+ .proto = IPPROTO_TCP,
+ .me = THIS_MODULE,
+};
+
+static int __init portscan_mt_init(void)
+{
+ return xt_register_match(&portscan_mt_reg);
+}
+
+static void __exit portscan_mt_exit(void)
+{
+ xt_unregister_match(&portscan_mt_reg);
+ return;
+}
+
+module_init(portscan_mt_init);
+module_exit(portscan_mt_exit);
+MODULE_AUTHOR("Jan Engelhardt <jengelh@computergmbh.de>");
+MODULE_DESCRIPTION("netfilter \"portscan\" match");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_portscan");
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1564,6 +1564,8 @@
return seq;
}
+EXPORT_SYMBOL(secure_tcp_sequence_number);
+
/* Generate secure starting point for ephemeral IPV4 transport port search */
u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
{

View file

@ -1,319 +0,0 @@
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -379,6 +379,23 @@
To compile it as a module, choose M here. If unsure, say N.
+config NETFILTER_XT_TARGET_TARPIT
+ tristate '"TARPIT" target support'
+ depends on NETFILTER_XTABLES
+ ---help---
+ Adds a TARPIT target to iptables, which captures and holds
+ incoming TCP connections using no local per-connection resources.
+ Connections are accepted, but immediately switched to the persist
+ state (0 byte window), in which the remote side stops sending data
+ and asks to continue every 60-240 seconds. Attempts to close the
+ connection are ignored, forcing the remote side to time out the
+ connection in 12-24 minutes.
+
+ This offers similar functionality to LaBrea
+ <http://www.hackbusters.net/LaBrea/>, but does not require dedicated
+ hardware or IPs. Any TCP port that you would normally DROP or REJECT
+ can instead become a tarpit.
+
config NETFILTER_XT_TARGET_TCPMSS
tristate '"TCPMSS" target support'
depends on NETFILTER_XTABLES && (IPV6 || IPV6=n)
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -45,6 +45,7 @@
obj-$(CONFIG_NETFILTER_XT_TARGET_NFLOG) += xt_NFLOG.o
obj-$(CONFIG_NETFILTER_XT_TARGET_NOTRACK) += xt_NOTRACK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_SECMARK) += xt_SECMARK.o
+obj-$(CONFIG_NETFILTER_XT_TARGET_TARPIT) += xt_TARPIT.o
obj-$(CONFIG_NETFILTER_XT_TARGET_TCPMSS) += xt_TCPMSS.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CONNSECMARK) += xt_CONNSECMARK.o
obj-$(CONFIG_NETFILTER_XT_TARGET_CHAOS) += xt_CHAOS.o
--- /dev/null
+++ b/net/netfilter/xt_TARPIT.c
@@ -0,0 +1,280 @@
+/*
+ * Kernel module to capture and hold incoming TCP connections using
+ * no local per-connection resources.
+ *
+ * Based on ipt_REJECT.c and offering functionality similar to
+ * LaBrea <http://www.hackbusters.net/LaBrea/>.
+ *
+ * Copyright (c) 2002 Aaron Hopkins <tools@die.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * Goal:
+ * - Allow incoming TCP connections to be established.
+ * - Passing data should result in the connection being switched to the
+ * persist state (0 byte window), in which the remote side stops sending
+ * data and asks to continue every 60 seconds.
+ * - Attempts to shut down the connection should be ignored completely, so
+ * the remote side ends up having to time it out.
+ *
+ * This means:
+ * - Reply to TCP SYN,!ACK,!RST,!FIN with SYN-ACK, window 5 bytes
+ * - Reply to TCP SYN,ACK,!RST,!FIN with RST to prevent spoofing
+ * - Reply to TCP !SYN,!RST,!FIN with ACK, window 0 bytes, rate-limited
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/icmp.h>
+struct in_device;
+#include <net/route.h>
+#include <linux/random.h>
+#include <linux/netfilter_ipv4/ip_tables.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+/* Stolen from ip_finish_output2 */
+static int ip_direct_send(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb->dst;
+
+ if (dst->hh != NULL)
+ return neigh_hh_output(dst->hh, skb);
+ else if (dst->neighbour != NULL)
+ return dst->neighbour->output(skb);
+
+ if (net_ratelimit())
+ printk(KERN_DEBUG "TARPIT ip_direct_send: no header cache and no neighbor!\n");
+
+ kfree_skb(skb);
+ return -EINVAL;
+}
+
+
+/* Send reply */
+static void tarpit_tcp(const struct sk_buff *oskb, struct rtable *ort,
+ unsigned int local)
+{
+ struct sk_buff *nskb;
+ struct rtable *nrt;
+ struct tcphdr *otcph, *ntcph;
+ struct flowi fl = {};
+ unsigned int otcplen;
+ u_int16_t tmp;
+
+ const struct iphdr *oiph = ip_hdr(oskb);
+ struct iphdr *niph;
+
+ /* A truncated TCP header is not going to be useful */
+ if (oskb->len < ip_hdrlen(oskb) + sizeof(struct tcphdr))
+ return;
+
+ otcph = (void *)oiph + ip_hdrlen(oskb);
+ otcplen = oskb->len - ip_hdrlen(oskb);
+
+ /* No replies for RST or FIN */
+ if (otcph->rst || otcph->fin)
+ return;
+
+ /* No reply to !SYN,!ACK. Rate-limit replies to !SYN,ACKs */
+ if (!otcph->syn && (!otcph->ack || !xrlim_allow(&ort->u.dst, 1*HZ)))
+ return;
+
+ /* Check checksum. */
+ if (tcp_v4_check(otcplen, oiph->saddr, oiph->daddr,
+ csum_partial((char *)otcph, otcplen, 0)) != 0)
+ return;
+
+ /*
+ * Copy skb (even if skb is about to be dropped, we cannot just
+ * clone it because there may be other things, such as tcpdump,
+ * interested in it)
+ */
+ nskb = skb_copy(oskb, GFP_ATOMIC);
+ if (nskb == NULL)
+ return;
+
+ niph = ip_hdr(nskb);
+
+ /* This packet will not be the same as the other: clear nf fields */
+ nf_conntrack_put(nskb->nfct);
+ nskb->nfct = NULL;
+#ifdef CONFIG_NETFILTER_DEBUG
+ nskb->nf_debug = 0;
+#endif
+
+ ntcph = (void *)niph + ip_hdrlen(nskb);
+
+ /* Truncate to length (no data) */
+ ntcph->doff = sizeof(struct tcphdr)/4;
+ skb_trim(nskb, ip_hdrlen(nskb) + sizeof(struct tcphdr));
+ niph->tot_len = htons(nskb->len);
+
+ /* Swap source and dest */
+ niph->daddr = xchg(&niph->saddr, niph->daddr);
+ tmp = ntcph->source;
+ ntcph->source = ntcph->dest;
+ ntcph->dest = tmp;
+
+ /* Use supplied sequence number or make a new one */
+ ntcph->seq = otcph->ack ? otcph->ack_seq
+ : htonl(secure_tcp_sequence_number(niph->saddr,
+ niph->daddr,
+ ntcph->source,
+ ntcph->dest));
+
+ /* Our SYN-ACKs must have a >0 window */
+ ntcph->window = (otcph->syn && !otcph->ack) ? htons(5) : 0;
+
+ ntcph->urg_ptr = 0;
+
+ /* Reset flags */
+ ((u_int8_t *)ntcph)[13] = 0;
+
+ if (otcph->syn && otcph->ack) {
+ ntcph->rst = 1;
+ ntcph->ack_seq = 0;
+ } else {
+ ntcph->syn = otcph->syn;
+ ntcph->ack = 1;
+ ntcph->ack_seq = htonl(ntohl(otcph->seq) + otcph->syn);
+ }
+
+ /* Adjust TCP checksum */
+ ntcph->check = 0;
+ ntcph->check = tcp_v4_check(sizeof(struct tcphdr),
+ niph->saddr,
+ niph->daddr,
+ csum_partial((char *)ntcph,
+ sizeof(struct tcphdr), 0));
+
+ fl.nl_u.ip4_u.daddr = niph->daddr;
+ fl.nl_u.ip4_u.saddr = local ? niph->saddr : 0;
+ fl.nl_u.ip4_u.tos = RT_TOS(niph->tos) | RTO_CONN;
+ fl.oif = 0;
+
+ if (ip_route_output_key(&nrt, &fl))
+ goto free_nskb;
+
+ dst_release(nskb->dst);
+ nskb->dst = &nrt->u.dst;
+
+ /* Adjust IP TTL */
+ niph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT);
+
+ /* Set DF, id = 0 */
+ niph->frag_off = htons(IP_DF);
+ niph->id = 0;
+
+ /* Adjust IP checksum */
+ niph->check = 0;
+ niph->check = ip_fast_csum((unsigned char *)niph, niph->ihl);
+
+ /* "Never happens" */
+ if (nskb->len > dst_mtu(nskb->dst))
+ goto free_nskb;
+
+ ip_direct_send(nskb);
+ return;
+
+ free_nskb:
+ kfree_skb(nskb);
+}
+
+static unsigned int xt_tarpit_target(struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ unsigned int hooknum,
+ const struct xt_target *target,
+ const void *targinfo)
+{
+ const struct sk_buff *skb = *pskb;
+ const struct iphdr *iph = ip_hdr(skb);
+ struct rtable *rt = (void *)skb->dst;
+
+ /* Do we have an input route cache entry? */
+ if (rt == NULL)
+ return NF_DROP;
+
+ /* No replies to physical multicast/broadcast */
+ if (skb->pkt_type != PACKET_HOST && skb->pkt_type != PACKET_OTHERHOST)
+ return NF_DROP;
+
+ /* Now check at the protocol level */
+ if (rt->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
+ return NF_DROP;
+
+ /*
+ * Our naive response construction does not deal with IP
+ * options, and probably should not try.
+ */
+ if (iph->ihl * 4 != sizeof(struct iphdr))
+ return NF_DROP;
+
+ /* We are not interested in fragments */
+ if (iph->frag_off & htons(IP_OFFSET))
+ return NF_DROP;
+
+ tarpit_tcp(skb, rt, hooknum == NF_IP_LOCAL_IN);
+ return NF_DROP;
+}
+
+static int xt_tarpit_check(const char *tablename, const void *entry,
+ const struct xt_target *target, void *targinfo,
+ unsigned int hook_mask)
+{
+ bool invalid;
+
+ if (strcmp(tablename, "raw") == 0 && hook_mask == NF_IP_PRE_ROUTING)
+ return true;
+ if (strcmp(tablename, "filter") != 0)
+ return false;
+ invalid = hook_mask & ~((1 << NF_IP_LOCAL_IN) | (1 << NF_IP_FORWARD));
+ return !invalid;
+}
+
+static struct xt_target xt_tarpit_reg = {
+ .name = "TARPIT",
+ .family = AF_INET,
+ .proto = IPPROTO_TCP,
+ .target = xt_tarpit_target,
+ .checkentry = xt_tarpit_check,
+ .me = THIS_MODULE,
+};
+
+static int __init xt_tarpit_init(void)
+{
+ return xt_register_target(&xt_tarpit_reg);
+}
+
+static void __exit xt_tarpit_exit(void)
+{
+ xt_unregister_target(&xt_tarpit_reg);
+}
+
+module_init(xt_tarpit_init);
+module_exit(xt_tarpit_exit);
+MODULE_DESCRIPTION("netfilter xt_TARPIT target module");
+MODULE_AUTHOR("Jan Engelhardt <jengelh@gmx.de>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("ipt_TARPIT");

View file

@ -1,20 +0,0 @@
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -133,7 +133,7 @@
config NF_CONNTRACK_H323
tristate "H.323 protocol support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && NF_CONNTRACK && (IPV6 || IPV6=n)
+ depends on EXPERIMENTAL && NF_CONNTRACK
help
H.323 is a VoIP signalling protocol from ITU-T. As one of the most
important VoIP protocols, it is widely used by voice hardware and
@@ -398,7 +398,7 @@
config NETFILTER_XT_TARGET_TCPMSS
tristate '"TCPMSS" target support'
- depends on NETFILTER_XTABLES && (IPV6 || IPV6=n)
+ depends on NETFILTER_XTABLES
---help---
This option adds a `TCPMSS' target, which allows you to alter the
MSS value of TCP SYN packets, to control the maximum size for that

View file

@ -1,789 +0,0 @@
--- a/include/linux/pkt_sched.h
+++ b/include/linux/pkt_sched.h
@@ -146,8 +146,40 @@
*
* The only reason for this is efficiency, it is possible
* to change these parameters in compile time.
+ *
+ * If you need to play with these values use esfq instead.
*/
+/* ESFQ section */
+
+enum
+{
+ /* traditional */
+ TCA_SFQ_HASH_CLASSIC,
+ TCA_SFQ_HASH_DST,
+ TCA_SFQ_HASH_SRC,
+ TCA_SFQ_HASH_FWMARK,
+ /* direct */
+ TCA_SFQ_HASH_DSTDIR,
+ TCA_SFQ_HASH_SRCDIR,
+ TCA_SFQ_HASH_FWMARKDIR,
+ /* conntrack */
+ TCA_SFQ_HASH_CTORIGDST,
+ TCA_SFQ_HASH_CTORIGSRC,
+ TCA_SFQ_HASH_CTREPLDST,
+ TCA_SFQ_HASH_CTREPLSRC,
+};
+
+struct tc_esfq_qopt
+{
+ unsigned quantum; /* Bytes per round allocated to flow */
+ int perturb_period; /* Period of hash perturbation */
+ __u32 limit; /* Maximal packets in queue */
+ unsigned divisor; /* Hash divisor */
+ unsigned flows; /* Maximal number of flows */
+ unsigned hash_kind; /* Hash function to use for flow identification */
+};
+
/* RED section */
enum
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -133,6 +133,26 @@
To compile this code as a module, choose M here: the
module will be called sch_sfq.
+config NET_SCH_ESFQ
+ tristate "Enhanced Stochastic Fairness Queueing (ESFQ)"
+ ---help---
+ Say Y here if you want to use the Enhanced Stochastic Fairness
+ Queueing (ESFQ) packet scheduling algorithm for some of your network
+ devices or as a leaf discipline for a classful qdisc such as HTB or
+ CBQ (see the top of <file:net/sched/sch_esfq.c> for details and
+ references to the SFQ algorithm).
+
+ This is an enchanced SFQ version which allows you to control some
+ hardcoded values in the SFQ scheduler.
+
+ ESFQ also adds control of the hash function used to identify packet
+ flows. The original SFQ discipline hashes by connection; ESFQ add
+ several other hashing methods, such as by src IP or by dst IP, which
+ can be more fair to users in some networking situations.
+
+ To compile this code as a module, choose M here: the
+ module will be called sch_esfq.
+
config NET_SCH_TEQL
tristate "True Link Equalizer (TEQL)"
---help---
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -23,6 +23,7 @@
obj-$(CONFIG_NET_SCH_INGRESS) += sch_ingress.o
obj-$(CONFIG_NET_SCH_DSMARK) += sch_dsmark.o
obj-$(CONFIG_NET_SCH_SFQ) += sch_sfq.o
+obj-$(CONFIG_NET_SCH_ESFQ) += sch_esfq.o
obj-$(CONFIG_NET_SCH_TBF) += sch_tbf.o
obj-$(CONFIG_NET_SCH_TEQL) += sch_teql.o
obj-$(CONFIG_NET_SCH_PRIO) += sch_prio.o
--- /dev/null
+++ b/net/sched/sch_esfq.c
@@ -0,0 +1,704 @@
+/*
+ * net/sched/sch_esfq.c Extended Stochastic Fairness Queueing discipline.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
+ *
+ * Changes: Alexander Atanasov, <alex@ssi.bg>
+ * Added dynamic depth,limit,divisor,hash_kind options.
+ * Added dst and src hashes.
+ *
+ * Alexander Clouter, <alex@digriz.org.uk>
+ * Ported ESFQ to Linux 2.6.
+ *
+ * Corey Hickey, <bugfood-c@fatooh.org>
+ * Maintenance of the Linux 2.6 port.
+ * Added fwmark hash (thanks to Robert Kurjata).
+ * Added direct hashing for src, dst, and fwmark.
+ * Added usage of jhash.
+ *
+ */
+
+#include <linux/module.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/jiffies.h>
+#include <linux/string.h>
+#include <linux/mm.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/in.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/notifier.h>
+#include <linux/init.h>
+#include <net/ip.h>
+#include <linux/ipv6.h>
+#include <net/route.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/pkt_sched.h>
+#include <linux/jhash.h>
+
+#ifdef CONFIG_NF_CONNTRACK_ENABLED
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
+/* Stochastic Fairness Queuing algorithm.
+ For more comments look at sch_sfq.c.
+ The difference is that you can change limit, depth,
+ hash table size and choose alternate hash types.
+
+ classic: same as in sch_sfq.c
+ dst: destination IP address
+ src: source IP address
+ fwmark: netfilter mark value
+ dst_direct:
+ src_direct:
+ fwmark_direct: direct hashing of the above sources
+ ctorigdst: original destination IP address
+ ctorigsrc: original source IP address
+ ctrepldst: reply destination IP address
+ ctreplsrc: reply source IP
+
+*/
+
+
+/* This type should contain at least SFQ_DEPTH*2 values */
+typedef unsigned int esfq_index;
+
+struct esfq_head
+{
+ esfq_index next;
+ esfq_index prev;
+};
+
+struct esfq_sched_data
+{
+/* Parameters */
+ int perturb_period;
+ unsigned quantum; /* Allotment per round: MUST BE >= MTU */
+ int limit;
+ unsigned depth;
+ unsigned hash_divisor;
+ unsigned hash_kind;
+/* Variables */
+ struct timer_list perturb_timer;
+ int perturbation;
+ esfq_index tail; /* Index of current slot in round */
+ esfq_index max_depth; /* Maximal depth */
+
+ esfq_index *ht; /* Hash table */
+ esfq_index *next; /* Active slots link */
+ short *allot; /* Current allotment per slot */
+ unsigned short *hash; /* Hash value indexed by slots */
+ struct sk_buff_head *qs; /* Slot queue */
+ struct esfq_head *dep; /* Linked list of slots, indexed by depth */
+ unsigned dyn_min; /* For dynamic divisor adjustment; minimum value seen */
+ unsigned dyn_max; /* maximum value seen */
+ unsigned dyn_range; /* saved range */
+};
+
+/* This contains the info we will hash. */
+struct esfq_packet_info
+{
+ u32 proto; /* protocol or port */
+ u32 src; /* source from packet header */
+ u32 dst; /* destination from packet header */
+ u32 ctorigsrc; /* original source from conntrack */
+ u32 ctorigdst; /* original destination from conntrack */
+ u32 ctreplsrc; /* reply source from conntrack */
+ u32 ctrepldst; /* reply destination from conntrack */
+ u32 mark; /* netfilter mark (fwmark) */
+};
+
+/* Hash input values directly into the "nearest" slot, taking into account the
+ * range of input values seen. This is most useful when the hash table is at
+ * least as large as the range of possible values.
+ * Note: this functionality was added before the change to using jhash, and may
+ * no longer be useful. */
+static __inline__ unsigned esfq_hash_direct(struct esfq_sched_data *q, u32 h)
+{
+ /* adjust minimum and maximum */
+ if (h < q->dyn_min || h > q->dyn_max) {
+ q->dyn_min = h < q->dyn_min ? h : q->dyn_min;
+ q->dyn_max = h > q->dyn_max ? h : q->dyn_max;
+
+ /* find new range */
+ if ((q->dyn_range = q->dyn_max - q->dyn_min) >= q->hash_divisor)
+ printk(KERN_WARNING "ESFQ: (direct hash) Input range %u is larger than hash "
+ "table. See ESFQ README for details.\n", q->dyn_range);
+ }
+
+ /* hash input values into slot numbers */
+ if (q->dyn_min == q->dyn_max)
+ return 0; /* only one value seen; avoid division by 0 */
+ else
+ return (h - q->dyn_min) * (q->hash_divisor - 1) / q->dyn_range;
+}
+
+static __inline__ unsigned esfq_jhash_1word(struct esfq_sched_data *q,u32 a)
+{
+ return jhash_1word(a, q->perturbation) & (q->hash_divisor-1);
+}
+
+static __inline__ unsigned esfq_jhash_2words(struct esfq_sched_data *q, u32 a, u32 b)
+{
+ return jhash_2words(a, b, q->perturbation) & (q->hash_divisor-1);
+}
+
+static __inline__ unsigned esfq_jhash_3words(struct esfq_sched_data *q, u32 a, u32 b, u32 c)
+{
+ return jhash_3words(a, b, c, q->perturbation) & (q->hash_divisor-1);
+}
+
+
+static unsigned esfq_hash(struct esfq_sched_data *q, struct sk_buff *skb)
+{
+ struct esfq_packet_info info;
+#ifdef CONFIG_NF_CONNTRACK_ENABLED
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+#endif
+
+ switch (skb->protocol) {
+ case __constant_htons(ETH_P_IP):
+ {
+ struct iphdr *iph = ip_hdr(skb);
+ info.dst = iph->daddr;
+ info.src = iph->saddr;
+ if (!(iph->frag_off&htons(IP_MF|IP_OFFSET)) &&
+ (iph->protocol == IPPROTO_TCP ||
+ iph->protocol == IPPROTO_UDP ||
+ iph->protocol == IPPROTO_SCTP ||
+ iph->protocol == IPPROTO_DCCP ||
+ iph->protocol == IPPROTO_ESP))
+ info.proto = *(((u32*)iph) + iph->ihl);
+ else
+ info.proto = iph->protocol;
+ break;
+ }
+ case __constant_htons(ETH_P_IPV6):
+ {
+ struct ipv6hdr *iph = ipv6_hdr(skb);
+ /* Hash ipv6 addresses into a u32. This isn't ideal,
+ * but the code is simple. */
+ info.dst = jhash2(iph->daddr.s6_addr32, 4, q->perturbation);
+ info.src = jhash2(iph->saddr.s6_addr32, 4, q->perturbation);
+ if (iph->nexthdr == IPPROTO_TCP ||
+ iph->nexthdr == IPPROTO_UDP ||
+ iph->nexthdr == IPPROTO_SCTP ||
+ iph->nexthdr == IPPROTO_DCCP ||
+ iph->nexthdr == IPPROTO_ESP)
+ info.proto = *(u32*)&iph[1];
+ else
+ info.proto = iph->nexthdr;
+ break;
+ }
+ default:
+ info.dst = (u32)(unsigned long)skb->dst;
+ info.src = (u32)(unsigned long)skb->sk;
+ info.proto = skb->protocol;
+ }
+
+ info.mark = skb->mark;
+
+#ifdef CONFIG_NF_CONNTRACK_ENABLED
+ /* defaults if there is no conntrack info */
+ info.ctorigsrc = info.src;
+ info.ctorigdst = info.dst;
+ info.ctreplsrc = info.dst;
+ info.ctrepldst = info.src;
+ /* collect conntrack info */
+ if (ct && ct != &nf_conntrack_untracked) {
+ if (skb->protocol == __constant_htons(ETH_P_IP)) {
+ info.ctorigsrc = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip;
+ info.ctorigdst = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip;
+ info.ctreplsrc = ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip;
+ info.ctrepldst = ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip;
+ }
+ else if (skb->protocol == __constant_htons(ETH_P_IPV6)) {
+ /* Again, hash ipv6 addresses into a single u32. */
+ info.ctorigsrc = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip6, 4, q->perturbation);
+ info.ctorigdst = jhash2(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip6, 4, q->perturbation);
+ info.ctreplsrc = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip6, 4, q->perturbation);
+ info.ctrepldst = jhash2(ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip6, 4, q->perturbation);
+ }
+
+ }
+#endif
+
+ switch(q->hash_kind)
+ {
+ case TCA_SFQ_HASH_CLASSIC:
+ return esfq_jhash_3words(q, info.dst, info.src, info.proto);
+ case TCA_SFQ_HASH_DST:
+ return esfq_jhash_1word(q, info.dst);
+ case TCA_SFQ_HASH_DSTDIR:
+ return esfq_hash_direct(q, ntohl(info.dst));
+ case TCA_SFQ_HASH_SRC:
+ return esfq_jhash_1word(q, info.src);
+ case TCA_SFQ_HASH_SRCDIR:
+ return esfq_hash_direct(q, ntohl(info.src));
+ case TCA_SFQ_HASH_FWMARK:
+ return esfq_jhash_1word(q, info.mark);
+ case TCA_SFQ_HASH_FWMARKDIR:
+ return esfq_hash_direct(q, info.mark);
+#ifdef CONFIG_NF_CONNTRACK_ENABLED
+ case TCA_SFQ_HASH_CTORIGDST:
+ return esfq_jhash_1word(q, info.ctorigdst);
+ case TCA_SFQ_HASH_CTORIGSRC:
+ return esfq_jhash_1word(q, info.ctorigsrc);
+ case TCA_SFQ_HASH_CTREPLDST:
+ return esfq_jhash_1word(q, info.ctrepldst);
+ case TCA_SFQ_HASH_CTREPLSRC:
+ return esfq_jhash_1word(q, info.ctreplsrc);
+#endif
+ default:
+ if (net_ratelimit())
+ printk(KERN_WARNING "ESFQ: Unknown hash method. Falling back to classic.\n");
+ }
+ return esfq_jhash_3words(q, info.dst, info.src, info.proto);
+}
+
+static inline void esfq_link(struct esfq_sched_data *q, esfq_index x)
+{
+ esfq_index p, n;
+ int d = q->qs[x].qlen + q->depth;
+
+ p = d;
+ n = q->dep[d].next;
+ q->dep[x].next = n;
+ q->dep[x].prev = p;
+ q->dep[p].next = q->dep[n].prev = x;
+}
+
+static inline void esfq_dec(struct esfq_sched_data *q, esfq_index x)
+{
+ esfq_index p, n;
+
+ n = q->dep[x].next;
+ p = q->dep[x].prev;
+ q->dep[p].next = n;
+ q->dep[n].prev = p;
+
+ if (n == p && q->max_depth == q->qs[x].qlen + 1)
+ q->max_depth--;
+
+ esfq_link(q, x);
+}
+
+static inline void esfq_inc(struct esfq_sched_data *q, esfq_index x)
+{
+ esfq_index p, n;
+ int d;
+
+ n = q->dep[x].next;
+ p = q->dep[x].prev;
+ q->dep[p].next = n;
+ q->dep[n].prev = p;
+ d = q->qs[x].qlen;
+ if (q->max_depth < d)
+ q->max_depth = d;
+
+ esfq_link(q, x);
+}
+
+static unsigned int esfq_drop(struct Qdisc *sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ esfq_index d = q->max_depth;
+ struct sk_buff *skb;
+ unsigned int len;
+
+ /* Queue is full! Find the longest slot and
+ drop a packet from it */
+
+ if (d > 1) {
+ esfq_index x = q->dep[d+q->depth].next;
+ skb = q->qs[x].prev;
+ len = skb->len;
+ __skb_unlink(skb, &q->qs[x]);
+ kfree_skb(skb);
+ esfq_dec(q, x);
+ sch->q.qlen--;
+ sch->qstats.drops++;
+ sch->qstats.backlog -= len;
+ return len;
+ }
+
+ if (d == 1) {
+ /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
+ d = q->next[q->tail];
+ q->next[q->tail] = q->next[d];
+ q->allot[q->next[d]] += q->quantum;
+ skb = q->qs[d].prev;
+ len = skb->len;
+ __skb_unlink(skb, &q->qs[d]);
+ kfree_skb(skb);
+ esfq_dec(q, d);
+ sch->q.qlen--;
+ q->ht[q->hash[d]] = q->depth;
+ sch->qstats.drops++;
+ sch->qstats.backlog -= len;
+ return len;
+ }
+
+ return 0;
+}
+
+static int
+esfq_enqueue(struct sk_buff *skb, struct Qdisc* sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ unsigned hash = esfq_hash(q, skb);
+ unsigned depth = q->depth;
+ esfq_index x;
+
+ x = q->ht[hash];
+ if (x == depth) {
+ q->ht[hash] = x = q->dep[depth].next;
+ q->hash[x] = hash;
+ }
+ sch->qstats.backlog += skb->len;
+ __skb_queue_tail(&q->qs[x], skb);
+ esfq_inc(q, x);
+ if (q->qs[x].qlen == 1) { /* The flow is new */
+ if (q->tail == depth) { /* It is the first flow */
+ q->tail = x;
+ q->next[x] = x;
+ q->allot[x] = q->quantum;
+ } else {
+ q->next[x] = q->next[q->tail];
+ q->next[q->tail] = x;
+ q->tail = x;
+ }
+ }
+ if (++sch->q.qlen < q->limit-1) {
+ sch->bstats.bytes += skb->len;
+ sch->bstats.packets++;
+ return 0;
+ }
+
+ esfq_drop(sch);
+ return NET_XMIT_CN;
+}
+
+static int
+esfq_requeue(struct sk_buff *skb, struct Qdisc* sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ unsigned hash = esfq_hash(q, skb);
+ unsigned depth = q->depth;
+ esfq_index x;
+
+ x = q->ht[hash];
+ if (x == depth) {
+ q->ht[hash] = x = q->dep[depth].next;
+ q->hash[x] = hash;
+ }
+ sch->qstats.backlog += skb->len;
+ __skb_queue_head(&q->qs[x], skb);
+ esfq_inc(q, x);
+ if (q->qs[x].qlen == 1) { /* The flow is new */
+ if (q->tail == depth) { /* It is the first flow */
+ q->tail = x;
+ q->next[x] = x;
+ q->allot[x] = q->quantum;
+ } else {
+ q->next[x] = q->next[q->tail];
+ q->next[q->tail] = x;
+ q->tail = x;
+ }
+ }
+ if (++sch->q.qlen < q->limit - 1) {
+ sch->qstats.requeues++;
+ return 0;
+ }
+
+ sch->qstats.drops++;
+ esfq_drop(sch);
+ return NET_XMIT_CN;
+}
+
+
+
+
+static struct sk_buff *
+esfq_dequeue(struct Qdisc* sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ struct sk_buff *skb;
+ unsigned depth = q->depth;
+ esfq_index a, old_a;
+
+ /* No active slots */
+ if (q->tail == depth)
+ return NULL;
+
+ a = old_a = q->next[q->tail];
+
+ /* Grab packet */
+ skb = __skb_dequeue(&q->qs[a]);
+ esfq_dec(q, a);
+ sch->q.qlen--;
+ sch->qstats.backlog -= skb->len;
+
+ /* Is the slot empty? */
+ if (q->qs[a].qlen == 0) {
+ q->ht[q->hash[a]] = depth;
+ a = q->next[a];
+ if (a == old_a) {
+ q->tail = depth;
+ return skb;
+ }
+ q->next[q->tail] = a;
+ q->allot[a] += q->quantum;
+ } else if ((q->allot[a] -= skb->len) <= 0) {
+ q->tail = a;
+ a = q->next[a];
+ q->allot[a] += q->quantum;
+ }
+
+ return skb;
+}
+
+static void
+esfq_reset(struct Qdisc* sch)
+{
+ struct sk_buff *skb;
+
+ while ((skb = esfq_dequeue(sch)) != NULL)
+ kfree_skb(skb);
+}
+
+static void esfq_perturbation(unsigned long arg)
+{
+ struct Qdisc *sch = (struct Qdisc*)arg;
+ struct esfq_sched_data *q = qdisc_priv(sch);
+
+ q->perturbation = net_random()&0x1F;
+
+ if (q->perturb_period) {
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ }
+}
+
+static int esfq_change(struct Qdisc *sch, struct rtattr *opt)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ struct tc_esfq_qopt *ctl = RTA_DATA(opt);
+ int old_perturb = q->perturb_period;
+
+ if (opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
+ return -EINVAL;
+
+ sch_tree_lock(sch);
+ q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
+ q->perturb_period = ctl->perturb_period*HZ;
+// q->hash_divisor = ctl->divisor;
+// q->tail = q->limit = q->depth = ctl->flows;
+
+ if (ctl->limit)
+ q->limit = min_t(u32, ctl->limit, q->depth);
+
+ if (ctl->hash_kind) {
+ q->hash_kind = ctl->hash_kind;
+ if (q->hash_kind != TCA_SFQ_HASH_CLASSIC)
+ q->perturb_period = 0;
+ }
+
+ // is sch_tree_lock enough to do this ?
+ while (sch->q.qlen >= q->limit-1)
+ esfq_drop(sch);
+
+ if (old_perturb)
+ del_timer(&q->perturb_timer);
+ if (q->perturb_period) {
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ } else {
+ q->perturbation = 0;
+ }
+ sch_tree_unlock(sch);
+ return 0;
+}
+
+static int esfq_init(struct Qdisc *sch, struct rtattr *opt)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ struct tc_esfq_qopt *ctl;
+ esfq_index p = ~0U/2;
+ int i;
+
+ if (opt && opt->rta_len < RTA_LENGTH(sizeof(*ctl)))
+ return -EINVAL;
+
+ init_timer(&q->perturb_timer);
+ q->perturb_timer.data = (unsigned long)sch;
+ q->perturb_timer.function = esfq_perturbation;
+ q->perturbation = 0;
+ q->hash_kind = TCA_SFQ_HASH_CLASSIC;
+ q->max_depth = 0;
+ q->dyn_min = ~0U; /* maximum value for this type */
+ q->dyn_max = 0; /* dyn_min/dyn_max will be set properly upon first packet */
+ if (opt == NULL) {
+ q->quantum = psched_mtu(sch->dev);
+ q->perturb_period = 0;
+ q->hash_divisor = 1024;
+ q->tail = q->limit = q->depth = 128;
+
+ } else {
+ ctl = RTA_DATA(opt);
+ q->quantum = ctl->quantum ? : psched_mtu(sch->dev);
+ q->perturb_period = ctl->perturb_period*HZ;
+ q->hash_divisor = ctl->divisor ? : 1024;
+ q->tail = q->limit = q->depth = ctl->flows ? : 128;
+
+ if ( q->depth > p - 1 )
+ return -EINVAL;
+
+ if (ctl->limit)
+ q->limit = min_t(u32, ctl->limit, q->depth);
+
+ if (ctl->hash_kind) {
+ q->hash_kind = ctl->hash_kind;
+ }
+
+ if (q->perturb_period) {
+ q->perturb_timer.expires = jiffies + q->perturb_period;
+ add_timer(&q->perturb_timer);
+ }
+ }
+
+ q->ht = kmalloc(q->hash_divisor*sizeof(esfq_index), GFP_KERNEL);
+ if (!q->ht)
+ goto err_case;
+
+ q->dep = kmalloc((1+q->depth*2)*sizeof(struct esfq_head), GFP_KERNEL);
+ if (!q->dep)
+ goto err_case;
+ q->next = kmalloc(q->depth*sizeof(esfq_index), GFP_KERNEL);
+ if (!q->next)
+ goto err_case;
+
+ q->allot = kmalloc(q->depth*sizeof(short), GFP_KERNEL);
+ if (!q->allot)
+ goto err_case;
+ q->hash = kmalloc(q->depth*sizeof(unsigned short), GFP_KERNEL);
+ if (!q->hash)
+ goto err_case;
+ q->qs = kmalloc(q->depth*sizeof(struct sk_buff_head), GFP_KERNEL);
+ if (!q->qs)
+ goto err_case;
+
+ for (i=0; i< q->hash_divisor; i++)
+ q->ht[i] = q->depth;
+ for (i=0; i<q->depth; i++) {
+ skb_queue_head_init(&q->qs[i]);
+ q->dep[i+q->depth].next = i+q->depth;
+ q->dep[i+q->depth].prev = i+q->depth;
+ }
+
+ for (i=0; i<q->depth; i++)
+ esfq_link(q, i);
+ return 0;
+err_case:
+ del_timer(&q->perturb_timer);
+ if (q->ht)
+ kfree(q->ht);
+ if (q->dep)
+ kfree(q->dep);
+ if (q->next)
+ kfree(q->next);
+ if (q->allot)
+ kfree(q->allot);
+ if (q->hash)
+ kfree(q->hash);
+ if (q->qs)
+ kfree(q->qs);
+ return -ENOBUFS;
+}
+
+static void esfq_destroy(struct Qdisc *sch)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ del_timer(&q->perturb_timer);
+ if(q->ht)
+ kfree(q->ht);
+ if(q->dep)
+ kfree(q->dep);
+ if(q->next)
+ kfree(q->next);
+ if(q->allot)
+ kfree(q->allot);
+ if(q->hash)
+ kfree(q->hash);
+ if(q->qs)
+ kfree(q->qs);
+}
+
+static int esfq_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+ struct esfq_sched_data *q = qdisc_priv(sch);
+ unsigned char *b = skb->tail;
+ struct tc_esfq_qopt opt;
+
+ opt.quantum = q->quantum;
+ opt.perturb_period = q->perturb_period/HZ;
+
+ opt.limit = q->limit;
+ opt.divisor = q->hash_divisor;
+ opt.flows = q->depth;
+ opt.hash_kind = q->hash_kind;
+
+ RTA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt);
+
+ return skb->len;
+
+rtattr_failure:
+ skb_trim(skb, b - skb->data);
+ return -1;
+}
+
+static struct Qdisc_ops esfq_qdisc_ops =
+{
+ .next = NULL,
+ .cl_ops = NULL,
+ .id = "esfq",
+ .priv_size = sizeof(struct esfq_sched_data),
+ .enqueue = esfq_enqueue,
+ .dequeue = esfq_dequeue,
+ .requeue = esfq_requeue,
+ .drop = esfq_drop,
+ .init = esfq_init,
+ .reset = esfq_reset,
+ .destroy = esfq_destroy,
+ .change = NULL, /* esfq_change - needs more work */
+ .dump = esfq_dump,
+ .owner = THIS_MODULE,
+};
+
+static int __init esfq_module_init(void)
+{
+ return register_qdisc(&esfq_qdisc_ops);
+}
+static void __exit esfq_module_exit(void)
+{
+ unregister_qdisc(&esfq_qdisc_ops);
+}
+module_init(esfq_module_init)
+module_exit(esfq_module_exit)
+MODULE_LICENSE("GPL");

View file

@ -1,336 +0,0 @@
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -293,6 +293,8 @@
#define RTNH_F_DEAD 1 /* Nexthop is dead (used by multipath) */
#define RTNH_F_PERVASIVE 2 /* Do recursive gateway lookup */
#define RTNH_F_ONLINK 4 /* Gateway is forced on link */
+#define RTNH_F_SUSPECT 8 /* We don't know the real state */
+#define RTNH_F_BADSTATE (RTNH_F_DEAD | RTNH_F_SUSPECT)
/* Macros to handle hexthops */
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -19,6 +19,8 @@
struct {
__be32 daddr;
__be32 saddr;
+ __u32 lsrc;
+ __u32 gw;
__u8 tos;
__u8 scope;
} ip4_u;
@@ -43,6 +45,8 @@
#define fl6_flowlabel nl_u.ip6_u.flowlabel
#define fl4_dst nl_u.ip4_u.daddr
#define fl4_src nl_u.ip4_u.saddr
+#define fl4_lsrc nl_u.ip4_u.lsrc
+#define fl4_gw nl_u.ip4_u.gw
#define fl4_tos nl_u.ip4_u.tos
#define fl4_scope nl_u.ip4_u.scope
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1208,6 +1208,7 @@
/* Gateway is different ... */
rt->rt_gateway = new_gw;
+ if (rt->fl.fl4_gw) rt->fl.fl4_gw = new_gw;
/* Redirect received -> path was valid */
dst_confirm(&rth->u.dst);
@@ -1643,6 +1644,7 @@
rth->fl.fl4_tos = tos;
rth->fl.mark = skb->mark;
rth->fl.fl4_src = saddr;
+ rth->fl.fl4_lsrc = 0;
rth->rt_src = saddr;
#ifdef CONFIG_NET_CLS_ROUTE
rth->u.dst.tclassid = itag;
@@ -1653,6 +1655,7 @@
dev_hold(rth->u.dst.dev);
rth->idev = in_dev_get(rth->u.dst.dev);
rth->fl.oif = 0;
+ rth->fl.fl4_gw = 0;
rth->rt_gateway = daddr;
rth->rt_spec_dst= spec_dst;
rth->rt_type = RTN_MULTICAST;
@@ -1716,7 +1719,7 @@
static inline int __mkroute_input(struct sk_buff *skb,
struct fib_result* res,
struct in_device *in_dev,
- __be32 daddr, __be32 saddr, u32 tos,
+ __be32 daddr, __be32 saddr, u32 tos, u32 lsrc,
struct rtable **result)
{
@@ -1751,6 +1754,7 @@
flags |= RTCF_DIRECTSRC;
if (out_dev == in_dev && err && !(flags & (RTCF_NAT | RTCF_MASQ)) &&
+ !lsrc &&
(IN_DEV_SHARED_MEDIA(out_dev) ||
inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
flags |= RTCF_DOREDIRECT;
@@ -1788,6 +1792,7 @@
rth->fl.mark = skb->mark;
rth->fl.fl4_src = saddr;
rth->rt_src = saddr;
+ rth->fl.fl4_lsrc = lsrc;
rth->rt_gateway = daddr;
rth->rt_iif =
rth->fl.iif = in_dev->dev->ifindex;
@@ -1795,6 +1800,7 @@
dev_hold(rth->u.dst.dev);
rth->idev = in_dev_get(rth->u.dst.dev);
rth->fl.oif = 0;
+ rth->fl.fl4_gw = 0;
rth->rt_spec_dst= spec_dst;
rth->u.dst.input = ip_forward;
@@ -1816,19 +1822,21 @@
struct fib_result* res,
const struct flowi *fl,
struct in_device *in_dev,
- __be32 daddr, __be32 saddr, u32 tos)
+ __be32 daddr, __be32 saddr, u32 tos,
+ u32 lsrc)
{
struct rtable* rth = NULL;
int err;
unsigned hash;
+ fib_select_default(fl, res);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (res->fi && res->fi->fib_nhs > 1 && fl->oif == 0)
+ if (res->fi && res->fi->fib_nhs > 1)
fib_select_multipath(fl, res);
#endif
/* create a routing cache entry */
- err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, &rth);
+ err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos, lsrc, &rth);
if (err)
return err;
@@ -1841,7 +1849,8 @@
struct fib_result* res,
const struct flowi *fl,
struct in_device *in_dev,
- __be32 daddr, __be32 saddr, u32 tos)
+ __be32 daddr, __be32 saddr, u32 tos,
+ u32 lsrc)
{
#ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
struct rtable* rth = NULL, *rtres;
@@ -1857,7 +1866,7 @@
/* distinguish between multipath and singlepath */
if (hopcount < 2)
return ip_mkroute_input_def(skb, res, fl, in_dev, daddr,
- saddr, tos);
+ saddr, tos, 0);
/* add all alternatives to the routing cache */
for (hop = 0; hop < hopcount; hop++) {
@@ -1869,7 +1878,7 @@
/* create a routing cache entry */
err = __mkroute_input(skb, res, in_dev, daddr, saddr, tos,
- &rth);
+ 0, &rth);
if (err)
return err;
@@ -1889,7 +1898,7 @@
skb->dst = &rtres->u.dst;
return err;
#else /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
- return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, saddr, tos);
+ return ip_mkroute_input_def(skb, res, fl, in_dev, daddr, saddr, tos, lsrc);
#endif /* CONFIG_IP_ROUTE_MULTIPATH_CACHED */
}
@@ -1905,18 +1914,18 @@
*/
static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev)
+ u8 tos, struct net_device *dev, u32 lsrc)
{
struct fib_result res;
struct in_device *in_dev = in_dev_get(dev);
struct flowi fl = { .nl_u = { .ip4_u =
{ .daddr = daddr,
- .saddr = saddr,
+ .saddr = lsrc ? : saddr,
.tos = tos,
.scope = RT_SCOPE_UNIVERSE,
} },
.mark = skb->mark,
- .iif = dev->ifindex };
+ .iif = lsrc? loopback_dev.ifindex : dev->ifindex };
unsigned flags = 0;
u32 itag = 0;
struct rtable * rth;
@@ -1949,6 +1958,12 @@
if (BADCLASS(daddr) || ZERONET(daddr) || LOOPBACK(daddr))
goto martian_destination;
+ if (lsrc) {
+ if (MULTICAST(lsrc) || BADCLASS(lsrc) ||
+ ZERONET(lsrc) || LOOPBACK(lsrc))
+ goto e_inval;
+ }
+
/*
* Now we are ready to route packet.
*/
@@ -1958,6 +1973,10 @@
goto no_route;
}
free_res = 1;
+ if (lsrc && res.type != RTN_UNICAST && res.type != RTN_NAT)
+ goto e_inval;
+ fl.iif = dev->ifindex;
+ fl.fl4_src = saddr;
RT_CACHE_STAT_INC(in_slow_tot);
@@ -1982,7 +2001,7 @@
if (res.type != RTN_UNICAST)
goto martian_destination;
- err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos);
+ err = ip_mkroute_input(skb, &res, &fl, in_dev, daddr, saddr, tos, lsrc);
if (err == -ENOBUFS)
goto e_nobufs;
if (err == -EINVAL)
@@ -1997,6 +2016,8 @@
brd_input:
if (skb->protocol != htons(ETH_P_IP))
goto e_inval;
+ if (lsrc)
+ goto e_inval;
if (ZERONET(saddr))
spec_dst = inet_select_addr(dev, 0, RT_SCOPE_LINK);
@@ -2037,6 +2058,7 @@
rth->u.dst.dev = &loopback_dev;
dev_hold(rth->u.dst.dev);
rth->idev = in_dev_get(rth->u.dst.dev);
+ rth->fl.fl4_gw = 0;
rth->rt_gateway = daddr;
rth->rt_spec_dst= spec_dst;
rth->u.dst.input= ip_local_deliver;
@@ -2086,8 +2108,9 @@
goto e_inval;
}
-int ip_route_input(struct sk_buff *skb, __be32 daddr, __be32 saddr,
- u8 tos, struct net_device *dev)
+static inline int
+ip_route_input_cached(struct sk_buff *skb, __be32 daddr, __be32 saddr,
+ u8 tos, struct net_device *dev, u32 lsrc)
{
struct rtable * rth;
unsigned hash;
@@ -2102,6 +2125,7 @@
if (rth->fl.fl4_dst == daddr &&
rth->fl.fl4_src == saddr &&
rth->fl.iif == iif &&
+ rth->fl.fl4_lsrc == lsrc &&
rth->fl.oif == 0 &&
rth->fl.mark == skb->mark &&
rth->fl.fl4_tos == tos) {
@@ -2148,7 +2172,19 @@
rcu_read_unlock();
return -EINVAL;
}
- return ip_route_input_slow(skb, daddr, saddr, tos, dev);
+ return ip_route_input_slow(skb, daddr, saddr, tos, dev, lsrc);
+}
+
+int ip_route_input(struct sk_buff *skb, u32 daddr, u32 saddr,
+ u8 tos, struct net_device *dev)
+{
+ return ip_route_input_cached(skb, daddr, saddr, tos, dev, 0);
+}
+
+int ip_route_input_lookup(struct sk_buff *skb, u32 daddr, u32 saddr,
+ u8 tos, struct net_device *dev, u32 lsrc)
+{
+ return ip_route_input_cached(skb, daddr, saddr, tos, dev, lsrc);
}
static inline int __mkroute_output(struct rtable **result,
@@ -2227,6 +2263,7 @@
rth->fl.fl4_tos = tos;
rth->fl.fl4_src = oldflp->fl4_src;
rth->fl.oif = oldflp->oif;
+ rth->fl.fl4_gw = oldflp->fl4_gw;
rth->fl.mark = oldflp->mark;
rth->rt_dst = fl->fl4_dst;
rth->rt_src = fl->fl4_src;
@@ -2367,6 +2404,7 @@
struct flowi fl = { .nl_u = { .ip4_u =
{ .daddr = oldflp->fl4_dst,
.saddr = oldflp->fl4_src,
+ .gw = oldflp->fl4_gw,
.tos = tos & IPTOS_RT_MASK,
.scope = ((tos & RTO_ONLINK) ?
RT_SCOPE_LINK :
@@ -2470,6 +2508,7 @@
dev_out = &loopback_dev;
dev_hold(dev_out);
fl.oif = loopback_dev.ifindex;
+ fl.fl4_gw = 0;
res.type = RTN_LOCAL;
flags |= RTCF_LOCAL;
goto make_route;
@@ -2477,7 +2516,7 @@
if (fib_lookup(&fl, &res)) {
res.fi = NULL;
- if (oldflp->oif) {
+ if (oldflp->oif && dev_out->flags & IFF_UP) {
/* Apparently, routing tables are wrong. Assume,
that the destination is on link.
@@ -2517,6 +2556,7 @@
dev_out = &loopback_dev;
dev_hold(dev_out);
fl.oif = dev_out->ifindex;
+ fl.fl4_gw = 0;
if (res.fi)
fib_info_put(res.fi);
res.fi = NULL;
@@ -2524,13 +2564,12 @@
goto make_route;
}
+ if (res.type == RTN_UNICAST)
+ fib_select_default(&fl, &res);
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (res.fi->fib_nhs > 1 && fl.oif == 0)
+ if (res.fi->fib_nhs > 1)
fib_select_multipath(&fl, &res);
- else
#endif
- if (!res.prefixlen && res.type == RTN_UNICAST && !fl.oif)
- fib_select_default(&fl, &res);
if (!fl.fl4_src)
fl.fl4_src = FIB_RES_PREFSRC(res);
@@ -2567,6 +2606,7 @@
rth->fl.fl4_src == flp->fl4_src &&
rth->fl.iif == 0 &&
rth->fl.oif == flp->oif &&
+ rth->fl.fl4_gw == flp->fl4_gw &&
rth->fl.mark == flp->mark &&
!((rth->fl.fl4_tos ^ flp->fl4_tos) &
(IPTOS_RT_MASK | RTO_ONLINK))) {
@@ -3267,3 +3307,4 @@
EXPORT_SYMBOL(__ip_select_ident);
EXPORT_SYMBOL(ip_route_input);
EXPORT_SYMBOL(ip_route_output_key);
+EXPORT_SYMBOL(ip_route_input_lookup);

View file

@ -1,12 +0,0 @@
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -589,6 +589,9 @@
core-$(CONFIG_TOSHIBA_RBTX4938) += arch/mips/tx4938/common/
load-$(CONFIG_TOSHIBA_RBTX4938) += 0xffffffff80100000
+# temporary until string.h is fixed
+cflags-y += -ffreestanding
+
cflags-y += -Iinclude/asm-mips/mach-generic
drivers-$(CONFIG_PCI) += arch/mips/pci/

View file

@ -1,56 +0,0 @@
--- a/fs/jffs2/build.c
+++ b/fs/jffs2/build.c
@@ -105,6 +105,17 @@
dbg_fsbuild("scanned flash completely\n");
jffs2_dbg_dump_block_lists_nolock(c);
+ if (c->flags & (1 << 7)) {
+ printk("%s(): unlocking the mtd device... ", __func__);
+ if (c->mtd->unlock)
+ c->mtd->unlock(c->mtd, 0, c->mtd->size);
+ printk("done.\n");
+
+ printk("%s(): erasing all blocks after the end marker... ", __func__);
+ jffs2_erase_pending_blocks(c, -1);
+ printk("done.\n");
+ }
+
dbg_fsbuild("pass 1 starting\n");
c->flags |= JFFS2_SB_FLAG_BUILDING;
/* Now scan the directory tree, increasing nlink according to every dirent found. */
--- a/fs/jffs2/scan.c
+++ b/fs/jffs2/scan.c
@@ -142,9 +142,12 @@
/* reset summary info for next eraseblock scan */
jffs2_sum_reset_collected(s);
-
- ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
- buf_size, s);
+
+ if (c->flags & (1 << 7))
+ ret = BLK_STATE_ALLFF;
+ else
+ ret = jffs2_scan_eraseblock(c, jeb, buf_size?flashbuf:(flashbuf+jeb->offset),
+ buf_size, s);
if (ret < 0)
goto out;
@@ -545,6 +548,17 @@
return err;
}
+ if ((buf[0] == 0xde) &&
+ (buf[1] == 0xad) &&
+ (buf[2] == 0xc0) &&
+ (buf[3] == 0xde)) {
+ /* end of filesystem. erase everything after this point */
+ printk("%s(): End of filesystem marker found at 0x%x\n", __func__, jeb->offset);
+ c->flags |= (1 << 7);
+
+ return BLK_STATE_ALLFF;
+ }
+
/* We temporarily use 'ofs' as a pointer into the buffer/jeb */
ofs = 0;

View file

@ -1,9 +0,0 @@
--- /dev/null
+++ b/include/asm-powerpc/segment.h
@@ -0,0 +1,6 @@
+#ifndef _ASM_SEGMENT_H
+#define _ASM_SEGMENT_H
+
+/* Only here because we have some old header files that expect it.. */
+
+#endif /* _ASM_SEGMENT_H */

View file

@ -1,24 +0,0 @@
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -494,7 +494,7 @@
#endif
static const u16 rtl8169_intr_mask =
- SYSErr | LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
+ LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK;
static const u16 rtl8169_napi_event =
RxOK | RxOverflow | RxFIFOOver | TxOK | TxErr;
static const unsigned int rtl8169_rx_config =
@@ -2640,10 +2640,12 @@
if (!(status & rtl8169_intr_mask))
break;
+#if 0
if (unlikely(status & SYSErr)) {
rtl8169_pcierr_interrupt(dev);
break;
}
+#endif
if (status & LinkChg)
rtl8169_check_link_status(dev, tp, ioaddr);

File diff suppressed because it is too large Load diff

View file

@ -1,806 +0,0 @@
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -106,6 +106,75 @@
} __attribute__ ((packed));
+struct ieee80211_ht_capability {
+ __le16 capabilities_info;
+ u8 mac_ht_params_info;
+ u8 supported_mcs_set[16];
+ __le16 extended_ht_capability_info;
+ __le32 tx_BF_capability_info;
+ u8 antenna_selection_info;
+}__attribute__ ((packed));
+
+struct ieee80211_ht_additional_info {
+ u8 control_chan;
+ u8 ht_param;
+ __le16 operation_mode;
+ __le16 stbc_param;
+ u8 basic_set[16];
+}__attribute__ ((packed));
+
+
+#define IEEE80211_TSINFO_TYPE(a) ((a.byte1 & 0x01) >> 0)
+#define IEEE80211_TSINFO_TSID(a) ((a.byte1 & 0x1E) >> 1)
+#define IEEE80211_TSINFO_DIR(a) ((a.byte1 & 0x60) >> 5)
+#define IEEE80211_TSINFO_POLICY(a) ((a.byte1 & 0x80) >> 7 + \
+ (a.byte2 & 0x01) << 1)
+#define IEEE80211_TSINFO_AGG(a) ((a.byte2 & 0x02) >> 1)
+#define IEEE80211_TSINFO_APSD(a) ((a.byte2 & 0x04) >> 2)
+#define IEEE80211_TSINFO_UP(a) ((a.byte2 & 0x38) >> 3)
+#define IEEE80211_TSINFO_ACK(a) ((a.byte2 & 0xC0) >> 6)
+#define IEEE80211_TSINFO_SCHEDULE(a) ((a.byte3 & 0x01) >> 0)
+
+#define IEEE80211_SET_TSINFO_TYPE(i, d) (i.byte1 |= (d << 0) & 0x01)
+#define IEEE80211_SET_TSINFO_TSID(i, d) (i.byte1 |= (d << 1) & 0x1E)
+#define IEEE80211_SET_TSINFO_DIR(i, d) (i.byte1 |= (d << 5) & 0x60)
+#define IEEE80211_SET_TSINFO_POLICY(i, d) \
+do { \
+ i.byte1 |= (d & 0x01) << 7; \
+ i.byte2 |= (d & 0x02) >> 1; \
+} while(0)
+#define IEEE80211_SET_TSINFO_AGG(i, d) (i.byte2 |= (d << 1) & 0x02)
+#define IEEE80211_SET_TSINFO_APSD(i, d) (i.byte2 |= (d << 2) & 0x04)
+#define IEEE80211_SET_TSINFO_UP(i, d) (i.byte2 |= (d << 3) & 0x38)
+#define IEEE80211_SET_TSINFO_ACK(i, d) (i.byte2 |= (d << 6) & 0xC0)
+#define IEEE80211_SET_TSINFO_SCHEDULE(i, d) (i.byte3 |= (d << 0) & 0x01)
+
+struct ieee80211_ts_info {
+ u8 byte1;
+ u8 byte2;
+ u8 byte3;
+} __attribute__ ((packed));
+
+struct ieee80211_elem_tspec {
+ struct ieee80211_ts_info ts_info;
+ __le16 nominal_msdu_size;
+ __le16 max_msdu_size;
+ __le32 min_service_interval;
+ __le32 max_service_interval;
+ __le32 inactivity_interval;
+ __le32 suspension_interval;
+ __le32 service_start_time;
+ __le32 min_data_rate;
+ __le32 mean_data_rate;
+ __le32 peak_data_rate;
+ __le32 burst_size;
+ __le32 delay_bound;
+ __le32 min_phy_rate;
+ __le16 surplus_band_allow;
+ __le16 medium_time;
+} __attribute__ ((packed));
+
+
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
@@ -173,9 +242,51 @@
struct {
u8 action_code;
u8 dialog_token;
+ u8 variable[0];
+ } __attribute__ ((packed)) addts_req;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
+ __le16 status_code;
+ u8 variable[0];
+ } __attribute__ ((packed)) addts_resp;
+ struct {
+ u8 action_code;
+ struct ieee80211_ts_info ts_info;
+ __le16 reason_code;
+ } __attribute__ ((packed)) delts;
+ struct {
+ u8 action_code;
+ u8 dialog_token;
u8 status_code;
u8 variable[0];
} __attribute__ ((packed)) wme_action;
+ struct {
+ u8 action_code;
+ u8 dest[6];
+ u8 src[6];
+ __le16 capab_info;
+ __le16 timeout;
+ /* Followed by Supported Rates and
+ * Extended Supported Rates */
+ u8 variable[0];
+ } __attribute__ ((packed)) dls_req;
+ struct {
+ u8 action_code;
+ __le16 status_code;
+ u8 dest[6];
+ u8 src[6];
+ /* Followed by Capability Information,
+ * Supported Rates and Extended
+ * Supported Rates */
+ u8 variable[0];
+ } __attribute__ ((packed)) dls_resp;
+ struct {
+ u8 action_code;
+ u8 dest[6];
+ u8 src[6];
+ __le16 reason_code;
+ } __attribute__ ((packed)) dls_teardown;
struct{
u8 action_code;
u8 element_id;
@@ -184,6 +295,25 @@
u8 new_chan;
u8 switch_count;
} __attribute__((packed)) chan_switch;
+ struct{
+ u8 action_code;
+ u8 dialog_token;
+ __le16 capab;
+ __le16 timeout;
+ __le16 start_seq_num;
+ } __attribute__((packed)) addba_req;
+ struct{
+ u8 action_code;
+ u8 dialog_token;
+ __le16 status;
+ __le16 capab;
+ __le16 timeout;
+ } __attribute__((packed)) addba_resp;
+ struct{
+ u8 action_code;
+ __le16 params;
+ __le16 reason_code;
+ }__attribute__((packed)) delba;
} u;
} __attribute__ ((packed)) action;
} u;
@@ -259,6 +389,18 @@
WLAN_STATUS_UNSUPP_RSN_VERSION = 44,
WLAN_STATUS_INVALID_RSN_IE_CAP = 45,
WLAN_STATUS_CIPHER_SUITE_REJECTED = 46,
+ /* 802.11e */
+ WLAN_STATUS_UNSPECIFIED_QOS = 32,
+ WLAN_STATUS_ASSOC_DENIED_NOBANDWIDTH = 33,
+ WLAN_STATUS_ASSOC_DENIED_LOWACK = 34,
+ WLAN_STATUS_ASSOC_DENIED_UNSUPP_QOS = 35,
+ WLAN_STATUS_REQUEST_DECLINED = 37,
+ WLAN_STATUS_INVALID_QOS_PARAM = 38,
+ WLAN_STATUS_CHANGE_TSPEC = 39,
+ WLAN_STATUS_WAIT_TS_DELAY = 47,
+ WLAN_STATUS_NO_DIRECT_LINK = 48,
+ WLAN_STATUS_STA_NOT_PRESENT = 49,
+ WLAN_STATUS_STA_NOT_QSTA = 50,
};
@@ -289,9 +431,50 @@
WLAN_REASON_INVALID_RSN_IE_CAP = 22,
WLAN_REASON_IEEE8021X_FAILED = 23,
WLAN_REASON_CIPHER_SUITE_REJECTED = 24,
+ /* 802.11e */
+ WLAN_REASON_DISASSOC_UNSPECIFIED_QOS = 32,
+ WLAN_REASON_DISASSOC_QAP_NO_BANDWIDTH = 33,
+ WLAN_REASON_DISASSOC_LOW_ACK = 34,
+ WLAN_REASON_DISASSOC_QAP_EXCEED_TXOP = 35,
+ WLAN_REASON_QSTA_LEAVE_QBSS = 36,
+ WLAN_REASON_QSTA_NOT_USE = 37,
+ WLAN_REASON_QSTA_REQUIRE_SETUP = 38,
+ WLAN_REASON_QSTA_TIMEOUT = 39,
+ WLAN_REASON_QSTA_CIPHER_NOT_SUPP = 45,
};
+/* Category Code */
+enum ieee80211_category {
+ WLAN_CATEGORY_SPECTRUM_MGMT = 0,
+ WLAN_CATEGORY_QOS = 1,
+ WLAN_CATEGORY_DLS = 2,
+ WLAN_CATEGORY_BACK = 3,
+ WLAN_CATEGORY_WMM = 17,
+};
+
+/* QoS Action Code */
+enum ieee80211_qos_actioncode {
+ WLAN_ACTION_QOS_ADDTS_REQ = 0,
+ WLAN_ACTION_QOS_ADDTS_RESP = 1,
+ WLAN_ACTION_QOS_DELTS = 2,
+ WLAN_ACTION_QOS_SCHEDULE = 3,
+};
+
+/* DLS Action Code */
+enum ieee80211_dls_actioncode {
+ WLAN_ACTION_DLS_REQ = 0,
+ WLAN_ACTION_DLS_RESP = 1,
+ WLAN_ACTION_DLS_TEARDOWN = 2,
+};
+
+/* BACK Action Code */
+enum ieee80211_back_actioncode {
+ WLAN_ACTION_ADDBA_REQ = 0,
+ WLAN_ACTION_ADDBA_RESP = 1,
+ WLAN_ACTION_DELBA = 2,
+};
+
/* Information Element IDs */
enum ieee80211_eid {
WLAN_EID_SSID = 0,
@@ -307,6 +490,15 @@
WLAN_EID_HP_PARAMS = 8,
WLAN_EID_HP_TABLE = 9,
WLAN_EID_REQUEST = 10,
+ /* 802.11e */
+ WLAN_EID_QBSS_LOAD = 11,
+ WLAN_EID_EDCA_PARAM_SET = 12,
+ WLAN_EID_TSPEC = 13,
+ WLAN_EID_TCLAS = 14,
+ WLAN_EID_SCHEDULE = 15,
+ WLAN_EID_TS_DELAY = 43,
+ WLAN_EID_TCLAS_PROCESSING = 44,
+ WLAN_EID_QOS_CAPA = 46,
/* 802.11h */
WLAN_EID_PWR_CONSTRAINT = 32,
WLAN_EID_PWR_CAPABILITY = 33,
@@ -321,6 +513,9 @@
/* 802.11g */
WLAN_EID_ERP_INFO = 42,
WLAN_EID_EXT_SUPP_RATES = 50,
+ /* 802.11n */
+ WLAN_EID_HT_CAPABILITY = 45,
+ WLAN_EID_HT_EXTRA_INFO = 61,
/* 802.11i */
WLAN_EID_RSN = 48,
WLAN_EID_WPA = 221,
@@ -329,6 +524,9 @@
WLAN_EID_QOS_PARAMETER = 222
};
+/* 80211n */
+#define IEEE80211_QOS_CONTROL_A_MSDU_PRESENT 0x0080
+
/* cipher suite selectors */
#define WLAN_CIPHER_SUITE_USE_GROUP 0x000FAC00
#define WLAN_CIPHER_SUITE_WEP40 0x000FAC01
@@ -339,4 +537,37 @@
#define WLAN_MAX_KEY_LEN 32
+enum ieee80211_tsinfo_direction {
+ WLAN_TSINFO_UPLINK = 0,
+ WLAN_TSINFO_DOWNLINK = 1,
+ WLAN_TSINFO_DIRECTLINK = 2,
+ WLAN_TSINFO_BIDIRECTIONAL = 3,
+};
+
+enum ieee80211_tsinfo_access {
+ WLAN_TSINFO_EDCA = 1,
+ WLAN_TSINFO_HCCA = 2,
+ WLAN_TSINFO_HEMM = 3,
+};
+
+enum ieee80211_tsinfo_psb {
+ WLAN_TSINFO_PSB_LEGACY = 0,
+ WLAN_TSINFO_PSB_APSD = 1,
+};
+
+
+/* WI-FI Alliance OUI Type and Subtype */
+enum wifi_oui_type {
+ WIFI_OUI_TYPE_WPA = 1,
+ WIFI_OUI_TYPE_WMM = 2,
+ WIFI_OUI_TYPE_WSC = 4,
+ WIFI_OUI_TYPE_PSD = 6,
+};
+
+enum wifi_oui_stype_wmm {
+ WIFI_OUI_STYPE_WMM_INFO = 0,
+ WIFI_OUI_STYPE_WMM_PARAM = 1,
+ WIFI_OUI_STYPE_WMM_TSPEC = 2,
+};
+
#endif /* IEEE80211_H */
--- a/include/linux/nl80211.h
+++ b/include/linux/nl80211.h
@@ -7,6 +7,217 @@
*/
/**
+ * enum nl80211_commands - supported nl80211 commands
+ * @NL80211_CMD_UNSPEC: unspecified command to catch errors
+ * @NL80211_CMD_RENAME_WIPHY: rename a wiphy, needs
+ * %NL80211_ATTR_WIPHY and %NL80211_ATTR_WIPHY_NAME
+ * @NL80211_CMD_WIPHY_NEWNAME: rename notification
+ * @NL80211_CMD_GET_CMDLIST: TO BE DEFINED PROPERLY. currently the code makes
+ * it depend on the wiphy only but it really should depend on the
+ * interface type too....
+ * @NL80211_CMD_NEW_CMDLIST: command list result
+ * @NL80211_CMD_ADD_VIRTUAL_INTERFACE: create a virtual interface for the
+ * wiphy identified by an %NL80211_ATTR_WIPHY attribute with the given
+ * %NL80211_ATTR_IFTYPE and %NL80211_ATTR_IFNAME.
+ * @NL80211_CMD_DEL_VIRTUAL_INTERFACE: destroy a virtual interface identified
+ * by %NL80211_ATTR_IFINDEX.
+ * @NL80211_CMD_CHANGE_VIRTUAL_INTERFACE: change type of virtual interface to
+ * the type given by %NL80211_ATTR_IFTYPE, the interface is identified by
+ * %NL80211_ATTR_IFINDEX.
+ * @NL80211_CMD_GET_WIPHYS: request a list of all wiphys present in the system
+ * @NL80211_CMD_NEW_WIPHYS: returned list of all wiphys
+ * @NL80211_CMD_GET_INTERFACES: request a list of all interfaces belonging to
+ * the wiphy identified by %NL80211_ATTR_WIPHY
+ * @NL80211_CMD_NEW_INTERFACES: result for %NL80211_CMD_GET_INTERFACES
+ * @NL80211_CMD_INITIATE_SCAN: initiate a scan with the passed parameters. THe
+ * parameters may contain %NL80211_ATTR_FLAG_SCAN_ACTIVE,
+ * %NL80211_ATTR_PHYMODE and a list of channels in an
+ * %NL80211_ATTR_CHANNEL_LIST attribute (an array of nested attributes)
+ * containing %NL80211_ATTR_CHANNEL, %NL80211_ATTR_PHYMODE, and possibly
+ * %NL80211_ATTR_FLAG_SCAN_ACTIVE. The outer %NL80211_ATTR_FLAG_SCAN_ACTIVE
+ * is ignored when a channel list is present.
+ * @NL80211_CMD_SCAN_RESULT: scan result, contains an array in
+ * %NL80211_ATTR_BSS_LIST.
+ * @NL80211_CMD_ASSOCIATE: associate with the given parameters
+ * (%NL80211_ATTR_SSID is mandatory, %NL80211_ATTR_TIMEOUT_TU,
+ * %NL80211_ATTR_BSSID, %NL80211_ATTR_CHANNEL, %NL80211_ATTR_PHYMODE,
+ * and %NL80211_ATTR_IE may be given)
+ * @NL80211_CMD_ADD_KEY: add a key with given %NL80211_ATTR_KEY_DATA,
+ * %NL80211_ATTR_KEY_ID, %NL80211_ATTR_KEY_TYPE, %NL80211_ATTR_MAC and
+ * %NL80211_ATTR_KEY_CIPHER attributes.
+ * @NL80211_CMD_DEL_KEY: delete a key identified by %NL80211_ATTR_KEY_ID,
+ * %NL80211_ATTR_KEY_TYPE and %NL80211_ATTR_MAC or all keys.
+ * @__NL80211_CMD_AFTER_LAST: internal use
+ */
+enum nl80211_commands {
+/* don't change the order or add anything inbetween, this is ABI! */
+ NL80211_CMD_UNSPEC,
+ /* %input: wiphy, wiphy_name */
+ NL80211_CMD_RENAME_WIPHY,
+ NL80211_CMD_WIPHY_NEWNAME,
+ /* %input: wiphy|ifindex */
+ NL80211_CMD_GET_CMDLIST,
+ NL80211_CMD_NEW_CMDLIST,
+ /* %input: wiphy, ifname, {iftype} */
+ NL80211_CMD_ADD_VIRTUAL_INTERFACE,
+ /* %input: wiphy, ifindex */
+ NL80211_CMD_DEL_VIRTUAL_INTERFACE,
+ /* %input: ifindex, iftype */
+ NL80211_CMD_CHANGE_VIRTUAL_INTERFACE,
+ /* %input: */
+ NL80211_CMD_GET_WIPHYS,
+ NL80211_CMD_NEW_WIPHYS,
+ /* %input: wiphy */
+ NL80211_CMD_GET_INTERFACES,
+ NL80211_CMD_NEW_INTERFACES,
+ NL80211_CMD_INITIATE_SCAN,
+ NL80211_CMD_SCAN_RESULT,
+ NL80211_CMD_GET_ASSOCIATION,
+ NL80211_CMD_ASSOCIATION_CHANGED,
+ NL80211_CMD_ASSOCIATE,
+ NL80211_CMD_DISASSOCIATE,
+ NL80211_CMD_DEAUTH,
+ NL80211_CMD_GET_AUTH_LIST,
+ NL80211_CMD_NEW_AUTH_LIST,
+ NL80211_CMD_AUTHENTICATION_CHANGED,
+ NL80211_CMD_AP_SET_BEACON,
+ NL80211_CMD_AP_ADD_STA,
+ NL80211_CMD_AP_UPDATE_STA,
+ NL80211_CMD_AP_GET_STA_INFO,
+ NL80211_CMD_AP_SET_RATESETS,
+ NL80211_CMD_ADD_KEY,
+ NL80211_CMD_DEL_KEY,
+
+ /* add commands here */
+
+ /* used to define NL80211_CMD_MAX below */
+ __NL80211_CMD_AFTER_LAST
+};
+#define NL80211_CMD_MAX (__NL80211_CMD_AFTER_LAST - 1)
+
+
+/**
+ * enum nl80211_attrs - nl80211 netlink attributes
+ * @NL80211_ATTR_UNSPEC: unspecified attribute to catch errors
+ * @NL80211_ATTR_IFINDEX: network interface index of the device to operate on
+ * @NL80211_ATTR_IFNAME: network interface name
+ * @NL80211_ATTR_WIPHY: index of wiphy to operate on, cf.
+ * /sys/class/ieee80211/<phyname>/index
+ * @NL80211_ATTR_WIPHY_NAME: wiphy name (used for renaming)
+ * @NL80211_ATTR_CMDS: list of u8's identifying commands a device supports
+ * @NL80211_ATTR_IFTYPE: type of virtual interface, see &enum nl80211_iftype
+ * @NL80211_ATTR_INTERFACE_LIST: interface array, nested netlink attribute
+ * @NL80211_ATTR_WIPHY_LIST: wiphy array, nested netlink attribute
+ * @NL80211_ATTR_BSSID: BSSID (must be 6 bytes)
+ * @NL80211_ATTR_SSID: SSID (1-32 bytes)
+ * @NL80211_ATTR_CHANNEL: channel number
+ * @NL80211_ATTR_PHYMODE: PHY mode, see &enum nl80211_phymode
+ * @NL80211_ATTR_CHANNEL_LIST: netlink nested attribute array containing scan
+ * parameters for channels
+ * @NL80211_ATTR_BSS_LIST: nested attribute containing an array
+ * @NL80211_ATTR_BSSTYPE: BSS type, see &enum nl80211_bsstype
+ * @NL80211_ATTR_BEACON_PERIOD: beacon period
+ * @NL80211_ATTR_DTIM_PERIOD: DTIM period
+ * @NL80211_ATTR_TIMESTAMP: 64-bit timestamp of received beacon/probe response
+ * @NL80211_ATTR_IE: information element(s), maximum length %NL80211_MAX_IE_LEN
+ * @NL80211_ATTR_AUTH_ALGORITHM: authentication algorithm
+ * @NL80211_ATTR_TIMEOUT_TU: timeout in TU (TO BE USED)
+ * @NL80211_ATTR_REASON_CODE: 802.11 reason code
+ * @NL80211_ATTR_ASSOCIATION_ID: association ID (u16, 1-2007)
+ * @NL80211_ATTR_DEAUTHENTICATED: TO BE USED
+ * @NL80211_ATTR_RX_SENSITIVITY: receiver sensitivity in dBm
+ * @NL80211_ATTR_TRANSMIT_POWER: transmit power in mW
+ * @NL80211_ATTR_FRAG_THRESHOLD: fragmentation threshold (bytes)
+ * @NL80211_ATTR_FLAG_SCAN_ACTIVE: netlink flag indiciating active scan
+ * @NL80211_ATTR_KEY_DATA: temporal key data
+ * @NL80211_ATTR_KEY_ID: key ID (u8, 0-3)
+ * @NL80211_ATTR_KEY_TYPE: key type (see &enum nl80211_keytype)
+ * @NL80211_ATTR_MAC: MAC address
+ * @NL80211_ATTR_KEY_CIPHER: key cipher suite (u32)
+ * @__NL80211_ATTR_AFTER_LAST: internal use
+ */
+enum nl80211_attrs {
+/* don't change the order or add anything inbetween, this is ABI! */
+ NL80211_ATTR_UNSPEC,
+ /* %type: u32 */
+ NL80211_ATTR_IFINDEX,
+ /* %type: nulstring */
+ NL80211_ATTR_IFNAME,
+ /* %type: u32 */
+ NL80211_ATTR_WIPHY,
+ /* %type: nulstring */
+ NL80211_ATTR_WIPHY_NAME,
+ NL80211_ATTR_CMDS,
+ /* %type: u32 */
+ NL80211_ATTR_IFTYPE,
+ NL80211_ATTR_INTERFACE_LIST,
+ NL80211_ATTR_WIPHY_LIST,
+ NL80211_ATTR_BSSID,
+ NL80211_ATTR_SSID,
+ NL80211_ATTR_CHANNEL,
+ NL80211_ATTR_PHYMODE,
+ NL80211_ATTR_CHANNEL_LIST,
+ NL80211_ATTR_BSS_LIST,
+ NL80211_ATTR_BSSTYPE,
+ NL80211_ATTR_BEACON_PERIOD,
+ NL80211_ATTR_DTIM_PERIOD,
+ NL80211_ATTR_TIMESTAMP,
+ NL80211_ATTR_IE,
+ NL80211_ATTR_AUTH_ALGORITHM,
+ NL80211_ATTR_TIMEOUT_TU,
+ NL80211_ATTR_REASON_CODE,
+ NL80211_ATTR_ASSOCIATION_ID,
+ NL80211_ATTR_DEAUTHENTICATED,
+ NL80211_ATTR_RX_SENSITIVITY,
+ NL80211_ATTR_TRANSMIT_POWER,
+ NL80211_ATTR_FRAG_THRESHOLD,
+ NL80211_ATTR_FLAG_SCAN_ACTIVE,
+
+ NL80211_ATTR_KEY_DATA,
+ NL80211_ATTR_KEY_ID,
+ NL80211_ATTR_KEY_TYPE,
+ NL80211_ATTR_MAC,
+ NL80211_ATTR_KEY_CIPHER,
+
+ NL80211_ATTR_BEACON_HEAD,
+ NL80211_ATTR_BEACON_TAIL,
+
+ /* add attributes here, update the policy in nl80211.c */
+
+ /* used to define NL80211_ATTR_MAX below */
+ __NL80211_ATTR_AFTER_LAST,
+};
+#define NL80211_ATTR_MAX (__NL80211_ATTR_AFTER_LAST - 1)
+
+/**
+ * enum nl80211_multicast_groups - multicast groups for nl80211
+ * @NL80211_GROUP_CONFIG: members of this group are notified of
+ * configuration changes
+ */
+enum nl80211_multicast_groups {
+ /* be notified of configuration changes like wiphy renames */
+ NL80211_GROUP_CONFIG,
+
+ /* add groups here */
+
+ /* keep last */
+ __NL80211_GROUP_AFTER_LAST
+};
+#define NL80211_GROUP_MAX (__NL80211_GROUP_AFTER_LAST - 1)
+
+/*
+ * maximum length of IE(s) passed in an NL80211_ATTR_IE.
+ * this is an arbitrary limit, 774 means three full-length
+ * IEs would fit... increase if necessary */
+#define NL80211_MAX_IE_LEN 774
+
+/*
+ * maximum number of items in an ATTR_CHANNEL_LIST,
+ * just to avoid too large allocations
+ */
+#define NL80211_MAX_CHANNEL_LIST_ITEM 200
+
+/**
* enum nl80211_iftype - (virtual) interface types
* @NL80211_IFTYPE_UNSPECIFIED: unspecified type, driver decides
* @NL80211_IFTYPE_ADHOC: independent BSS member
@@ -35,4 +246,56 @@
};
#define NL80211_IFTYPE_MAX (__NL80211_IFTYPE_AFTER_LAST - 1)
+/**
+ * enum nl80211_phymode - PHY modes
+ * @NL80211_PHYMODE_A: 5 GHz PHY
+ * @NL80211_PHYMODE_B: 2.4 GHz PHY (B mode)
+ * @NL80211_PHYMODE_G: 2.4 GHz PHY (G, compatible with B)
+ * @__NL80211_PHYMODE_AFTER_LAST: internal use
+ *
+ * These values are used for %NL80211_ATTR_PHYMODE.
+ */
+enum nl80211_phymode {
+ NL80211_PHYMODE_A,
+ NL80211_PHYMODE_B,
+ NL80211_PHYMODE_G,
+
+ /* keep last */
+ __NL80211_PHYMODE_AFTER_LAST
+};
+#define NL80211_PHYMODE_MAX (__NL80211_PHYMODE_AFTER_LAST - 1)
+
+/**
+ * enum nl80211_bsstype - BSS types
+ * @NL80211_BSSTYPE_INFRASTRUCTURE: infrastructure BSS
+ * @NL80211_BSSTYPE_INDEPENDENT: independent BSS (ad-hoc network)
+ * @__NL80211_BSSTYPE_AFTER_LAST: internal use
+ *
+ * These values are used for %NL80211_ATTR_BSSTYPE.
+ */
+enum nl80211_bsstype {
+ NL80211_BSSTYPE_INFRASTRUCTURE,
+ NL80211_BSSTYPE_INDEPENDENT,
+
+ /* keep last */
+ __NL80211_BSSTYPE_AFTER_LAST
+};
+#define NL80211_BSSTYPE_MAX (__NL80211_BSSTYPE_AFTER_LAST - 1)
+
+/**
+ * enum nl80211_keytype - key types
+ * @NL80211_KEYTYPE_GROUP: group key
+ * @NL80211_KEYTYPE_PAIRWISE: pairwise key
+ * @NL80211_KEYTYPE_PEER: peer key
+ */
+enum nl80211_keytype {
+ NL80211_KEYTYPE_GROUP,
+ NL80211_KEYTYPE_PAIRWISE,
+ NL80211_KEYTYPE_PEER,
+
+ /* keep last */
+ __NL80211_KEYTYPE_AFTER_LAST
+};
+#define NL80211_KEYTYPE_MAX (__NL80211_KEYTYPE_AFTER_LAST - 1)
+
#endif /* __LINUX_NL80211_H */
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -3,6 +3,7 @@
#include <linux/netlink.h>
#include <linux/skbuff.h>
+#include <linux/nl80211.h>
#include <net/genetlink.h>
/*
@@ -11,6 +12,69 @@
* Copyright 2006 Johannes Berg <johannes@sipsolutions.net>
*/
+/**
+ * struct scan_channel - describes a single channel to scan
+ * @phymode: PHY mode for this channel
+ * @channel: channel number (1-14, ...)
+ * @active: scan actively or passively on this channel
+ */
+struct scan_channel {
+ enum nl80211_phymode phymode;
+ u32 channel;
+ int active;
+};
+
+/**
+ * struct scan_params - describes scan parameters
+ * @n_channels: number of items in @channels array or -1 to indicate all
+ * channels should be scanned (in that case @channels will be %NULL)
+ * @active: when n_channels is -1 this determines active/passive scanning.
+ * @phymode: when n_channels is -1 this determines PHY mode to scan. It is
+ * not possible to scan different PHY modes in one request w/o giving
+ * a channel list.
+ * @channels: array containing @n_channels &struct scan_channel items
+ */
+struct scan_params {
+ int n_channels;
+ int active;
+ enum nl80211_phymode phymode;
+ struct scan_channel *channels;
+};
+
+/**
+ * struct association_params - describes association parameters
+ * @valid: this member contains flags which items are valid
+ * @bssid: the BSSID of the BSS to associate [%ASSOC_PARAMS_BSSID]
+ * @timeout: timeout (in TU) [%ASSOC_PARAMS_TIMEOUT]
+ * @ie: information element(s) to include in the association frames [%ASSOC_PARAMS_IE]
+ * @ie_len: length of the information element(s)
+ * @ssid: the SSID, always valid.
+ * @ssid_len: length of the SSID
+ */
+struct association_params {
+ u8 *bssid;
+ u32 timeout;
+ u8 *ie;
+ int ie_len;
+ u8 *ssid;
+ int ssid_len;
+
+ unsigned int valid;
+};
+#define ASSOC_PARAMS_TIMEOUT (1<<0)
+
+/**
+ * struct key_params - key information
+ */
+struct key_params {
+ u8 *key;
+ int key_len;
+ int key_id;
+ u32 key_type;
+ u8 *macaddress;
+ u32 cipher;
+};
+
/* from net/wireless.h */
struct wiphy;
@@ -30,11 +94,62 @@
* @add_virtual_intf: create a new virtual interface with the given name
*
* @del_virtual_intf: remove the virtual interface determined by ifindex.
+ *
+ * @change_virtual_intf: change type of virtual interface
+ *
+ * @associate: associate with given parameters
+ *
+ * @disassociate: disassociate from current AP
+ *
+ * @deauth: deauth from current AP
+ *
+ * @initiate_scan: scan with the given information (see &struct scan_params above)
+ *
+ * @get_association: get BSSID of the BSS that the device is currently
+ * associated to and return 1, or return 0 if not
+ * associated (or a negative error code)
+ * @get_auth_list: get list of BSSIDs of all BSSs the device has
+ * authenticated with, must call next_bssid for each,
+ * next_bssid returns non-zero on error, the given data
+ * is to be passed to that callback
+ * @add_key: add a key using &struct key_params
+ * @del_key: delete a key using info from &struct key_params
*/
struct cfg80211_ops {
int (*add_virtual_intf)(struct wiphy *wiphy, char *name,
- unsigned int type);
+ enum nl80211_iftype type);
int (*del_virtual_intf)(struct wiphy *wiphy, int ifindex);
+ int (*change_virtual_intf)(struct wiphy *wiphy, int ifindex,
+ enum nl80211_iftype type);
+
+ int (*associate)(struct wiphy *wiphy, struct net_device *dev,
+ struct association_params *params);
+ int (*disassociate)(struct wiphy *wiphy, struct net_device *dev);
+ int (*deauth)(struct wiphy *wiphy, struct net_device *dev);
+
+
+ int (*initiate_scan)(struct wiphy *wiphy, struct net_device *dev,
+ struct scan_params *params);
+
+
+ int (*get_association)(struct wiphy *wiphy, struct net_device *dev,
+ u8 *bssid);
+
+ int (*get_auth_list)(struct wiphy *wiphy, struct net_device *dev,
+ void *data,
+ int (*next_bssid)(void *data, u8 *bssid));
+
+ int (*add_key)(struct wiphy *wiphy, struct net_device *dev,
+ struct key_params *params);
+ int (*del_key)(struct wiphy *wiphy, struct net_device *dev,
+ struct key_params *params);
};
+
+/* helper functions specific to nl80211 */
+extern void *nl80211hdr_put(struct sk_buff *skb, u32 pid,
+ u32 seq, int flags, u8 cmd);
+extern void *nl80211msg_new(struct sk_buff **skb, u32 pid,
+ u32 seq, int flags, u8 cmd);
+
#endif /* __NET_CFG80211_H */
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -431,7 +431,13 @@
* Those may be called only within the kernel.
*/
-/* functions that may be called by driver modules */
+/* First : function strictly used inside the kernel */
+
+/* Handle /proc/net/wireless, called in net/code/dev.c */
+extern int dev_get_wireless_info(char * buffer, char **start, off_t offset,
+ int length);
+
+/* Second : functions that may be called by driver modules */
/* Send a single event to user space */
extern void wireless_send_event(struct net_device * dev,
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -300,7 +300,6 @@
/* Following five fields are used for IEEE 802.11H */
unsigned int radar_detect;
unsigned int spect_mgmt;
- /* All following fields are currently unused. */
unsigned int quiet_duration; /* duration of quiet period */
unsigned int quiet_offset; /* how far into the beacon is the quiet
* period */
@@ -514,6 +513,9 @@
* per-packet RC4 key with each TX frame when doing hwcrypto */
#define IEEE80211_HW_TKIP_REQ_PHASE2_KEY (1<<14)
+ /* The device capable of supporting 11n */
+#define IEEE80211_HW_SUPPORT_HT_MODE (1<<15)
+
u32 flags; /* hardware flags defined above */
/* Set to the size of a needed device specific skb headroom for TX skbs. */
@@ -641,8 +643,7 @@
* used if the wlan hardware or low-level driver implements PAE.
* 80211.o module will anyway filter frames based on authorization
* state, so this function pointer can be NULL if low-level driver does
- * not require event notification about port state changes.
- * Currently unused. */
+ * not require event notification about port state changes. */
int (*set_port_auth)(struct ieee80211_hw *hw, u8 *addr,
int authorized);
@@ -694,8 +695,9 @@
/* Get statistics of the current TX queue status. This is used to get
* number of currently queued packets (queue length), maximum queue
* size (limit), and total number of packets sent using each TX queue
- * (count).
- * Currently unused. */
+ * (count). This information is used for WMM to find out which TX
+ * queues have room for more packets and by hostapd to provide
+ * statistics about the current queueing state to external programs. */
int (*get_tx_stats)(struct ieee80211_hw *hw,
struct ieee80211_tx_queue_stats *stats);
@@ -705,12 +707,25 @@
* Must be atomic. */
u64 (*get_tsf)(struct ieee80211_hw *hw);
+ /* Call low level driver with 11n Block Ack action */
+ int (*handle_ba_action)(struct ieee80211_hw *hw,
+ struct ieee80211_mgmt *mgmt);
+
/* Reset the TSF timer and allow firmware/hardware to synchronize with
* other STAs in the IBSS. This is only used in IBSS mode. This
* function is optional if the firmware/hardware takes full care of
* TSF synchronization. */
void (*reset_tsf)(struct ieee80211_hw *hw);
+ /* Configure ht parameters. */
+ int (*conf_ht)(struct ieee80211_hw *hw,
+ struct ieee80211_ht_capability *ht_cap_param,
+ struct ieee80211_ht_additional_info *ht_extra_param);
+
+ /* Get ht capabilities from the device */
+ void (*get_ht_capab)(struct ieee80211_hw *hw,
+ struct ieee80211_ht_capability *ht_cap_param);
+
/* Setup beacon data for IBSS beacons. Unlike access point (Master),
* IBSS uses a fixed beacon frame which is configured using this
* function. This handler is required only for IBSS mode. */

View file

@ -1,37 +0,0 @@
--- a/lib/kobject_uevent.c
+++ b/lib/kobject_uevent.c
@@ -30,9 +30,22 @@
char uevent_helper[UEVENT_HELPER_PATH_LEN] = "/sbin/hotplug";
static DEFINE_SPINLOCK(sequence_lock);
#if defined(CONFIG_NET)
-static struct sock *uevent_sock;
+struct sock *uevent_sock = NULL;
+EXPORT_SYMBOL_GPL(uevent_sock);
#endif
+u64 uevent_next_seqnum(void)
+{
+ u64 seq;
+
+ spin_lock(&sequence_lock);
+ seq = ++uevent_seqnum;
+ spin_unlock(&sequence_lock);
+
+ return seq;
+}
+EXPORT_SYMBOL_GPL(uevent_next_seqnum);
+
static char *action_to_string(enum kobject_action action)
{
switch (action) {
@@ -169,9 +182,7 @@
}
/* we will send an event, request a new sequence number */
- spin_lock(&sequence_lock);
- seq = ++uevent_seqnum;
- spin_unlock(&sequence_lock);
+ seq = uevent_next_seqnum();
sprintf(seq_buff, "SEQNUM=%llu", (unsigned long long)seq);
#if defined(CONFIG_NET)

View file

@ -1,19 +0,0 @@
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -128,5 +128,9 @@
load average.
If unsure, say Y.
+config LEDS_TRIGGER_MORSE
+ tristate "LED Morse Trigger"
+ depends on LEDS_TRIGGERS
+
endmenu
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -21,3 +21,4 @@
obj-$(CONFIG_LEDS_TRIGGER_TIMER) += ledtrig-timer.o
obj-$(CONFIG_LEDS_TRIGGER_IDE_DISK) += ledtrig-ide-disk.o
obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
+obj-$(CONFIG_LEDS_TRIGGER_MORSE) += ledtrig-morse.o

View file

@ -1,30 +0,0 @@
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -178,4 +178,20 @@
Say Y here if you want to support the built-in real time clock
of the HP SDC controller.
+config INPUT_GPIO_BUTTONS
+ tristate "Polled GPIO buttons interface"
+ depends on GENERIC_GPIO
+ select INPUT_POLLDEV
+ help
+ This driver implements support for buttons connected
+ to GPIO pins of various CPUs (and some other chips).
+
+ Say Y here if your device has buttons connected
+ directly to such GPIO pins. Your board-specific
+ setup logic must also provide a platform device,
+ with configuration data saying which GPIOs are used.
+
+ To compile this driver as a module, choose M here: the
+ module will be called gpio-buttons.
+
endif
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -18,3 +18,4 @@
obj-$(CONFIG_INPUT_YEALINK) += yealink.o
obj-$(CONFIG_HP_SDC_RTC) += hp_sdc_rtc.o
obj-$(CONFIG_INPUT_UINPUT) += uinput.o
+obj-$(CONFIG_INPUT_GPIO_BUTTONS) += gpio_buttons.o

View file

@ -1,17 +0,0 @@
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -419,6 +419,7 @@
source "fs/xfs/Kconfig"
source "fs/gfs2/Kconfig"
+source "fs/yaffs2/Kconfig"
config OCFS2_FS
tristate "OCFS2 file system support"
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -120,3 +120,4 @@
obj-$(CONFIG_DEBUG_FS) += debugfs/
obj-$(CONFIG_OCFS2_FS) += ocfs2/
obj-$(CONFIG_GFS2_FS) += gfs2/
+obj-$(CONFIG_YAFFS_FS) += yaffs2/

View file

@ -1,80 +0,0 @@
--- a/fs/yaffs2/yaffs_fs.c
+++ b/fs/yaffs2/yaffs_fs.c
@@ -965,7 +965,7 @@
f->f_version = inode->i_version;
}
- list_for_each(i, &obj->variant.directoryVariant.children) {
+ list_for_each(i, (struct list_head *)&obj->variant.directoryVariant.children) {
curoffs++;
if (curoffs >= offset) {
l = list_entry(i, yaffs_Object, siblings);
@@ -1269,7 +1269,7 @@
if (target &&
target->variantType == YAFFS_OBJECT_TYPE_DIRECTORY &&
- !list_empty(&target->variant.directoryVariant.children)) {
+ !list_empty((struct list_head *)&target->variant.directoryVariant.children)) {
T(YAFFS_TRACE_OS, (KERN_DEBUG "target is non-empty dir\n"));
@@ -1503,7 +1503,7 @@
yaffs_GrossUnlock(dev);
/* we assume this is protected by lock_kernel() in mount/umount */
- list_del(&dev->devList);
+ list_del((struct list_head *)&dev->devList);
if(dev->spareBuffer){
YFREE(dev->spareBuffer);
@@ -1847,7 +1847,7 @@
dev->skipCheckpointWrite = options.skip_checkpoint_write;
/* we assume this is protected by lock_kernel() in mount/umount */
- list_add_tail(&dev->devList, &yaffs_dev_list);
+ list_add_tail((struct list_head *)&dev->devList, &yaffs_dev_list);
init_MUTEX(&dev->grossLock);
--- a/fs/yaffs2/yaffs_mtdif1.c
+++ b/fs/yaffs2/yaffs_mtdif1.c
@@ -323,7 +323,7 @@
* Always returns YAFFS_OK.
*/
int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
- yaffs_BlockState * pState, int *pSequenceNumber)
+ yaffs_BlockState * pState, __u32 *pSequenceNumber)
{
struct mtd_info * mtd = dev->genericDevice;
int chunkNo = blockNo * dev->nChunksPerBlock;
--- a/fs/yaffs2/yaffs_mtdif1.h
+++ b/fs/yaffs2/yaffs_mtdif1.h
@@ -23,6 +23,6 @@
int nandmtd1_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
int nandmtd1_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
- yaffs_BlockState * state, int *sequenceNumber);
+ yaffs_BlockState * state, __u32 *sequenceNumber);
#endif
--- a/fs/yaffs2/yaffs_mtdif2.c
+++ b/fs/yaffs2/yaffs_mtdif2.c
@@ -188,7 +188,7 @@
}
int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
- yaffs_BlockState * state, int *sequenceNumber)
+ yaffs_BlockState * state, __u32 *sequenceNumber)
{
struct mtd_info *mtd = (struct mtd_info *)(dev->genericDevice);
int retval;
--- a/fs/yaffs2/yaffs_mtdif2.h
+++ b/fs/yaffs2/yaffs_mtdif2.h
@@ -24,6 +24,6 @@
__u8 * data, yaffs_ExtendedTags * tags);
int nandmtd2_MarkNANDBlockBad(struct yaffs_DeviceStruct *dev, int blockNo);
int nandmtd2_QueryNANDBlock(struct yaffs_DeviceStruct *dev, int blockNo,
- yaffs_BlockState * state, int *sequenceNumber);
+ yaffs_BlockState * state, __u32 *sequenceNumber);
#endif

View file

@ -1,348 +0,0 @@
From: Ivo van Doorn <ivdoorn@gmail.com>
Date: Fri, 11 May 2007 19:59:40 +0000 (-0400)
Subject: [PATCH] Add 93cx6 eeprom library
X-Git-Tag: v2.6.23-rc1~1201^2~74
X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=9467d64b0e88763914c01f71ddf591b166c4f526
[PATCH] Add 93cx6 eeprom library
This patch adds a library for reading from 93cx6 eeproms.
Signed-off-by: Michael Wu <flamingice@sourmilk.net>
Signed-off-by: John W. Linville <linville@tuxdriver.com>
---
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -34,6 +34,11 @@
If you choose to build module, its name will be phantom. If unsure,
say N here.
+config EEPROM_93CX6
+ tristate "EEPROM 93CX6 support"
+ ---help---
+ This is a driver for the EEPROM chipsets 93c46 and 93c66.
+ The driver supports both read as well as write commands.
If unsure, say N.
@@ -187,5 +192,4 @@
If you are not sure, say Y here.
-
endmenu
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -14,3 +14,4 @@
obj-$(CONFIG_SGI_IOC4) += ioc4.o
obj-$(CONFIG_SONY_LAPTOP) += sony-laptop.o
obj-$(CONFIG_THINKPAD_ACPI) += thinkpad_acpi.o
+obj-$(CONFIG_EEPROM_93CX6) += eeprom_93cx6.o
--- /dev/null
+++ b/drivers/misc/eeprom_93cx6.c
@@ -0,0 +1,229 @@
+/*
+ Copyright (C) 2004 - 2006 rt2x00 SourceForge Project
+ <http://rt2x00.serialmonkey.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the
+ Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ Module: eeprom_93cx6
+ Abstract: EEPROM reader routines for 93cx6 chipsets.
+ Supported chipsets: 93c46 & 93c66.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <linux/eeprom_93cx6.h>
+
+MODULE_AUTHOR("http://rt2x00.serialmonkey.com");
+MODULE_VERSION("1.0");
+MODULE_DESCRIPTION("EEPROM 93cx6 chip driver");
+MODULE_LICENSE("GPL");
+
+static inline void eeprom_93cx6_pulse_high(struct eeprom_93cx6 *eeprom)
+{
+ eeprom->reg_data_clock = 1;
+ eeprom->register_write(eeprom);
+ udelay(1);
+}
+
+static inline void eeprom_93cx6_pulse_low(struct eeprom_93cx6 *eeprom)
+{
+ eeprom->reg_data_clock = 0;
+ eeprom->register_write(eeprom);
+ udelay(1);
+}
+
+static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom)
+{
+ /*
+ * Clear all flags, and enable chip select.
+ */
+ eeprom->register_read(eeprom);
+ eeprom->reg_data_in = 0;
+ eeprom->reg_data_out = 0;
+ eeprom->reg_data_clock = 0;
+ eeprom->reg_chip_select = 1;
+ eeprom->register_write(eeprom);
+
+ /*
+ * kick a pulse.
+ */
+ eeprom_93cx6_pulse_high(eeprom);
+ eeprom_93cx6_pulse_low(eeprom);
+}
+
+static void eeprom_93cx6_cleanup(struct eeprom_93cx6 *eeprom)
+{
+ /*
+ * Clear chip_select and data_in flags.
+ */
+ eeprom->register_read(eeprom);
+ eeprom->reg_data_in = 0;
+ eeprom->reg_chip_select = 0;
+ eeprom->register_write(eeprom);
+
+ /*
+ * kick a pulse.
+ */
+ eeprom_93cx6_pulse_high(eeprom);
+ eeprom_93cx6_pulse_low(eeprom);
+}
+
+static void eeprom_93cx6_write_bits(struct eeprom_93cx6 *eeprom,
+ const u16 data, const u16 count)
+{
+ unsigned int i;
+
+ eeprom->register_read(eeprom);
+
+ /*
+ * Clear data flags.
+ */
+ eeprom->reg_data_in = 0;
+ eeprom->reg_data_out = 0;
+
+ /*
+ * Start writing all bits.
+ */
+ for (i = count; i > 0; i--) {
+ /*
+ * Check if this bit needs to be set.
+ */
+ eeprom->reg_data_in = !!(data & (1 << (i - 1)));
+
+ /*
+ * Write the bit to the eeprom register.
+ */
+ eeprom->register_write(eeprom);
+
+ /*
+ * Kick a pulse.
+ */
+ eeprom_93cx6_pulse_high(eeprom);
+ eeprom_93cx6_pulse_low(eeprom);
+ }
+
+ eeprom->reg_data_in = 0;
+ eeprom->register_write(eeprom);
+}
+
+static void eeprom_93cx6_read_bits(struct eeprom_93cx6 *eeprom,
+ u16 *data, const u16 count)
+{
+ unsigned int i;
+ u16 buf = 0;
+
+ eeprom->register_read(eeprom);
+
+ /*
+ * Clear data flags.
+ */
+ eeprom->reg_data_in = 0;
+ eeprom->reg_data_out = 0;
+
+ /*
+ * Start reading all bits.
+ */
+ for (i = count; i > 0; i--) {
+ eeprom_93cx6_pulse_high(eeprom);
+
+ eeprom->register_read(eeprom);
+
+ /*
+ * Clear data_in flag.
+ */
+ eeprom->reg_data_in = 0;
+
+ /*
+ * Read if the bit has been set.
+ */
+ if (eeprom->reg_data_out)
+ buf |= (1 << (i - 1));
+
+ eeprom_93cx6_pulse_low(eeprom);
+ }
+
+ *data = buf;
+}
+
+/**
+ * eeprom_93cx6_read - Read multiple words from eeprom
+ * @eeprom: Pointer to eeprom structure
+ * @word: Word index from where we should start reading
+ * @data: target pointer where the information will have to be stored
+ *
+ * This function will read the eeprom data as host-endian word
+ * into the given data pointer.
+ */
+void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom, const u8 word,
+ u16 *data)
+{
+ u16 command;
+
+ /*
+ * Initialize the eeprom register
+ */
+ eeprom_93cx6_startup(eeprom);
+
+ /*
+ * Select the read opcode and the word to be read.
+ */
+ command = (PCI_EEPROM_READ_OPCODE << eeprom->width) | word;
+ eeprom_93cx6_write_bits(eeprom, command,
+ PCI_EEPROM_WIDTH_OPCODE + eeprom->width);
+
+ /*
+ * Read the requested 16 bits.
+ */
+ eeprom_93cx6_read_bits(eeprom, data, 16);
+
+ /*
+ * Cleanup eeprom register.
+ */
+ eeprom_93cx6_cleanup(eeprom);
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_read);
+
+/**
+ * eeprom_93cx6_multiread - Read multiple words from eeprom
+ * @eeprom: Pointer to eeprom structure
+ * @word: Word index from where we should start reading
+ * @data: target pointer where the information will have to be stored
+ * @words: Number of words that should be read.
+ *
+ * This function will read all requested words from the eeprom,
+ * this is done by calling eeprom_93cx6_read() multiple times.
+ * But with the additional change that while the eeprom_93cx6_read
+ * will return host ordered bytes, this method will return little
+ * endian words.
+ */
+void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom, const u8 word,
+ __le16 *data, const u16 words)
+{
+ unsigned int i;
+ u16 tmp;
+
+ for (i = 0; i < words; i++) {
+ tmp = 0;
+ eeprom_93cx6_read(eeprom, word + i, &tmp);
+ data[i] = cpu_to_le16(tmp);
+ }
+}
+EXPORT_SYMBOL_GPL(eeprom_93cx6_multiread);
+
--- /dev/null
+++ b/include/linux/eeprom_93cx6.h
@@ -0,0 +1,72 @@
+/*
+ Copyright (C) 2004 - 2006 rt2x00 SourceForge Project
+ <http://rt2x00.serialmonkey.com>
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the
+ Free Software Foundation, Inc.,
+ 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ Module: eeprom_93cx6
+ Abstract: EEPROM reader datastructures for 93cx6 chipsets.
+ Supported chipsets: 93c46 & 93c66.
+ */
+
+/*
+ * EEPROM operation defines.
+ */
+#define PCI_EEPROM_WIDTH_93C46 6
+#define PCI_EEPROM_WIDTH_93C66 8
+#define PCI_EEPROM_WIDTH_OPCODE 3
+#define PCI_EEPROM_WRITE_OPCODE 0x05
+#define PCI_EEPROM_READ_OPCODE 0x06
+#define PCI_EEPROM_EWDS_OPCODE 0x10
+#define PCI_EEPROM_EWEN_OPCODE 0x13
+
+/**
+ * struct eeprom_93cx6 - control structure for setting the commands
+ * for reading the eeprom data.
+ * @data: private pointer for the driver.
+ * @register_read(struct eeprom_93cx6 *eeprom): handler to
+ * read the eeprom register, this function should set all reg_* fields.
+ * @register_write(struct eeprom_93cx6 *eeprom): handler to
+ * write to the eeprom register by using all reg_* fields.
+ * @width: eeprom width, should be one of the PCI_EEPROM_WIDTH_* defines
+ * @reg_data_in: register field to indicate data input
+ * @reg_data_out: register field to indicate data output
+ * @reg_data_clock: register field to set the data clock
+ * @reg_chip_select: register field to set the chip select
+ *
+ * This structure is used for the communication between the driver
+ * and the eeprom_93cx6 handlers for reading the eeprom.
+ */
+struct eeprom_93cx6 {
+ void *data;
+
+ void (*register_read)(struct eeprom_93cx6 *eeprom);
+ void (*register_write)(struct eeprom_93cx6 *eeprom);
+
+ int width;
+
+ char reg_data_in;
+ char reg_data_out;
+ char reg_data_clock;
+ char reg_chip_select;
+};
+
+extern void eeprom_93cx6_read(struct eeprom_93cx6 *eeprom,
+ const u8 word, u16 *data);
+extern void eeprom_93cx6_multiread(struct eeprom_93cx6 *eeprom,
+ const u8 word, __le16 *data, const u16 words);

View file

@ -1,31 +0,0 @@
--- a/drivers/misc/eeprom_93cx6.c
+++ b/drivers/misc/eeprom_93cx6.c
@@ -39,14 +39,26 @@
{
eeprom->reg_data_clock = 1;
eeprom->register_write(eeprom);
- udelay(1);
+
+ /*
+ * Add a short delay for the pulse to work.
+ * According to the specifications the "maximum minimum"
+ * time should be 450ns.
+ */
+ ndelay(450);
}
static inline void eeprom_93cx6_pulse_low(struct eeprom_93cx6 *eeprom)
{
eeprom->reg_data_clock = 0;
eeprom->register_write(eeprom);
- udelay(1);
+
+ /*
+ * Add a short delay for the pulse to work.
+ * According to the specifications the "maximum minimum"
+ * time should be 450ns.
+ */
+ ndelay(450);
}
static void eeprom_93cx6_startup(struct eeprom_93cx6 *eeprom)

View file

@ -1,25 +0,0 @@
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -56,6 +56,7 @@
drivers depend on it.
*/
+static ushort maxSize = 0;
static int debug;
static struct usb_serial *serial_table[SERIAL_TTY_MINORS]; /* initially all NULL */
static spinlock_t table_lock;
@@ -864,7 +865,7 @@
dev_err(&interface->dev, "No free urbs available\n");
goto probe_error;
}
- buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
+ buffer_size = (endpoint->wMaxPacketSize > maxSize) ? endpoint->wMaxPacketSize : maxSize;
port->bulk_in_size = buffer_size;
port->bulk_in_endpointAddress = endpoint->bEndpointAddress;
port->bulk_in_buffer = kmalloc (buffer_size, GFP_KERNEL);
@@ -1245,3 +1246,5 @@
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
+module_param(maxSize, ushort,0);
+MODULE_PARM_DESC(maxSize,"User specified USB endpoint size");

View file

@ -1,46 +0,0 @@
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -1,6 +1,10 @@
#ifndef _LINUX_TIME_H
#define _LINUX_TIME_H
+#ifndef __KERNEL__
+#include <time.h>
+#else
+
#include <linux/types.h>
#ifdef __KERNEL__
@@ -225,4 +229,6 @@
*/
#define TIMER_ABSTIME 0x01
+#endif /* __KERNEL__ DEBIAN */
+
#endif
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -1,6 +1,14 @@
#ifndef _LINUX_TYPES_H
#define _LINUX_TYPES_H
+/* Debian: Use userland types instead. */
+#ifndef __KERNEL__
+# include <sys/types.h>
+/* For other kernel headers. */
+# include <linux/posix_types.h>
+# include <asm/types.h>
+#else
+
#ifdef __KERNEL__
#define BITS_TO_LONGS(bits) \
@@ -162,6 +170,8 @@
#endif /* __KERNEL_STRICT_NAMES */
+#endif /* __KERNEL__ DEBIAN */
+
/*
* Below are truly Linux-specific types that should never collide with
* any application/library that wants linux/types.h.

View file

@ -1,102 +0,0 @@
--- a/scripts/genksyms/parse.c_shipped
+++ b/scripts/genksyms/parse.c_shipped
@@ -144,7 +144,9 @@
#include <assert.h>
+#ifndef __APPLE__
#include <malloc.h>
+#endif
#include "genksyms.h"
static int is_typedef;
--- a/scripts/genksyms/parse.y
+++ b/scripts/genksyms/parse.y
@@ -24,7 +24,9 @@
%{
#include <assert.h>
+#ifndef __APPLE__
#include <malloc.h>
+#endif
#include "genksyms.h"
static int is_typedef;
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -30,6 +30,35 @@
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
+#ifdef __APPLE__
+/* Darwin has no memmem implementation, this one is ripped of the uClibc-0.9.28 source */
+void *memmem (const void *haystack, size_t haystack_len,
+ const void *needle, size_t needle_len)
+{
+ const char *begin;
+ const char *const last_possible
+ = (const char *) haystack + haystack_len - needle_len;
+
+ if (needle_len == 0)
+ /* The first occurrence of the empty string is deemed to occur at
+ the beginning of the string. */
+ return (void *) haystack;
+
+ /* Sanity check, otherwise the loop might search through the whole
+ memory. */
+ if (__builtin_expect (haystack_len < needle_len, 0))
+ return NULL;
+
+ for (begin = (const char *) haystack; begin <= last_possible; ++begin)
+ if (begin[0] == ((const char *) needle)[0] &&
+ !memcmp ((const void *) &begin[1],
+ (const void *) ((const char *) needle + 1),
+ needle_len - 1))
+ return (void *) begin;
+
+ return NULL;
+}
+#endif
#define KSYM_NAME_LEN 127
--- a/scripts/kconfig/Makefile
+++ b/scripts/kconfig/Makefile
@@ -87,6 +87,9 @@
# we really need to do so. (Do not call gcc as part of make mrproper)
HOST_EXTRACFLAGS = $(shell $(CONFIG_SHELL) $(check-lxdialog) -ccflags)
HOST_LOADLIBES = $(shell $(CONFIG_SHELL) $(check-lxdialog) -ldflags $(HOSTCC))
+ifeq ($(shell uname -s),Darwin)
+HOST_LOADLIBES += -lncurses
+endif
HOST_EXTRACFLAGS += -DLOCALE
--- a/scripts/mod/mk_elfconfig.c
+++ b/scripts/mod/mk_elfconfig.c
@@ -1,7 +1,11 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#ifndef __APPLE__
#include <elf.h>
+#else
+#include "../../../../../tools/sstrip/include/elf.h"
+#endif
int
main(int argc, char **argv)
--- a/scripts/mod/modpost.h
+++ b/scripts/mod/modpost.h
@@ -7,7 +7,11 @@
#include <sys/mman.h>
#include <fcntl.h>
#include <unistd.h>
+#ifndef __APPLE__
#include <elf.h>
+#else
+#include "../../../../../tools/sstrip/include/elf.h"
+#endif
#include "elfconfig.h"

View file

@ -1,17 +0,0 @@
--- a/include/linux/stddef.h
+++ b/include/linux/stddef.h
@@ -16,6 +16,7 @@
false = 0,
true = 1
};
+#endif /* __KERNEL__ */
#undef offsetof
#ifdef __compiler_offsetof
@@ -23,6 +24,5 @@
#else
#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
#endif
-#endif /* __KERNEL__ */
#endif

View file

@ -1,20 +0,0 @@
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -125,7 +125,7 @@
str="${ftype} ${name} ${location} ${str}"
;;
"nod")
- local dev=`LC_ALL=C ls -l "${location}"`
+ local dev=`LC_ALL=C ls -l --time-style=locale "${location}"`
local maj=`field 5 ${dev}`
local min=`field 6 ${dev}`
maj=${maj%,}
@@ -135,7 +135,7 @@
str="${ftype} ${name} ${str} ${dev} ${maj} ${min}"
;;
"slink")
- local target=`field 11 $(LC_ALL=C ls -l "${location}")`
+ local target=`field 11 $(LC_ALL=C ls -l --time-style=locale "${location}")`
str="${ftype} ${name} ${target} ${str}"
;;
*)

View file

@ -1,175 +0,0 @@
From: Michael Barkowski <michael.barkowski@freescale.com>
Date: Fri, 11 May 2007 23:24:51 +0000 (-0500)
Subject: phylib: add the ICPlus IP175C PHY driver
X-Git-Tag: v2.6.23-rc1~1201^2~58
X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=0cefeebaf3da39d768bffcf62460fe2088e824ef
phylib: add the ICPlus IP175C PHY driver
The ICPlus IP175C sports a 100Mbit/s 4-port switch in addition
to a dedicated 100Mbit/s WAN port.
Signed-off-by: Michael Barkowski <michael.barkowski@freescale.com>
Signed-off-by: Kim Phillips <kim.phillips@freescale.com>
Signed-off-by: Jeff Garzik <jeff@garzik.org>
---
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -55,6 +55,11 @@
---help---
Currently supports the BCM5411, BCM5421 and BCM5461 PHYs.
+config ICPLUS_PHY
+ tristate "Drivers for ICPlus PHYs"
+ ---help---
+ Currently supports the IP175C PHY.
+
config FIXED_PHY
tristate "Drivers for PHY emulation on fixed speed/link"
---help---
--- a/drivers/net/phy/Makefile
+++ b/drivers/net/phy/Makefile
@@ -11,4 +11,5 @@
obj-$(CONFIG_SMSC_PHY) += smsc.o
obj-$(CONFIG_VITESSE_PHY) += vitesse.o
obj-$(CONFIG_BROADCOM_PHY) += broadcom.o
+obj-$(CONFIG_ICPLUS_PHY) += icplus.o
obj-$(CONFIG_FIXED_PHY) += fixed.o
--- /dev/null
+++ b/drivers/net/phy/icplus.c
@@ -0,0 +1,134 @@
+/*
+ * Driver for ICPlus PHYs
+ *
+ * Copyright (c) 2007 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/phy.h>
+
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/uaccess.h>
+
+MODULE_DESCRIPTION("ICPlus IP175C PHY driver");
+MODULE_AUTHOR("Michael Barkowski");
+MODULE_LICENSE("GPL");
+
+static int ip175c_config_init(struct phy_device *phydev)
+{
+ int err, i;
+ static int full_reset_performed = 0;
+
+ if (full_reset_performed == 0) {
+
+ /* master reset */
+ err = phydev->bus->write(phydev->bus, 30, 0, 0x175c);
+ if (err < 0)
+ return err;
+
+ /* ensure no bus delays overlap reset period */
+ err = phydev->bus->read(phydev->bus, 30, 0);
+
+ /* data sheet specifies reset period is 2 msec */
+ mdelay(2);
+
+ /* enable IP175C mode */
+ err = phydev->bus->write(phydev->bus, 29, 31, 0x175c);
+ if (err < 0)
+ return err;
+
+ /* Set MII0 speed and duplex (in PHY mode) */
+ err = phydev->bus->write(phydev->bus, 29, 22, 0x420);
+ if (err < 0)
+ return err;
+
+ /* reset switch ports */
+ for (i = 0; i < 5; i++) {
+ err = phydev->bus->write(phydev->bus, i,
+ MII_BMCR, BMCR_RESET);
+ if (err < 0)
+ return err;
+ }
+
+ for (i = 0; i < 5; i++)
+ err = phydev->bus->read(phydev->bus, i, MII_BMCR);
+
+ mdelay(2);
+
+ full_reset_performed = 1;
+ }
+
+ if (phydev->addr != 4) {
+ phydev->state = PHY_RUNNING;
+ phydev->speed = SPEED_100;
+ phydev->duplex = DUPLEX_FULL;
+ phydev->link = 1;
+ netif_carrier_on(phydev->attached_dev);
+ }
+
+ return 0;
+}
+
+static int ip175c_read_status(struct phy_device *phydev)
+{
+ if (phydev->addr == 4) /* WAN port */
+ genphy_read_status(phydev);
+ else
+ /* Don't need to read status for switch ports */
+ phydev->irq = PHY_IGNORE_INTERRUPT;
+
+ return 0;
+}
+
+static int ip175c_config_aneg(struct phy_device *phydev)
+{
+ if (phydev->addr == 4) /* WAN port */
+ genphy_config_aneg(phydev);
+
+ return 0;
+}
+
+static struct phy_driver ip175c_driver = {
+ .phy_id = 0x02430d80,
+ .name = "ICPlus IP175C",
+ .phy_id_mask = 0x0ffffff0,
+ .features = PHY_BASIC_FEATURES,
+ .config_init = &ip175c_config_init,
+ .config_aneg = &ip175c_config_aneg,
+ .read_status = &ip175c_read_status,
+ .driver = { .owner = THIS_MODULE,},
+};
+
+static int __init ip175c_init(void)
+{
+ return phy_driver_register(&ip175c_driver);
+}
+
+static void __exit ip175c_exit(void)
+{
+ phy_driver_unregister(&ip175c_driver);
+}
+
+module_init(ip175c_init);
+module_exit(ip175c_exit);

View file

@ -1,195 +0,0 @@
CONFIG_32BIT=y
# CONFIG_64BIT is not set
# CONFIG_64BIT_PHYS_ADDR is not set
# CONFIG_8139TOO is not set
# CONFIG_ARCH_HAS_ILOG2_U32 is not set
# CONFIG_ARCH_HAS_ILOG2_U64 is not set
# CONFIG_ARCH_SUPPORTS_MSI is not set
# CONFIG_ATM is not set
# CONFIG_AX25 is not set
CONFIG_BASE_SMALL=0
CONFIG_BITREVERSE=y
CONFIG_BLK_DEV_CF_MIPS=y
# CONFIG_BT is not set
# CONFIG_CPU_BIG_ENDIAN is not set
CONFIG_CPU_HAS_LLSC=y
CONFIG_CPU_HAS_PREFETCH=y
CONFIG_CPU_HAS_SYNC=y
CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_CPU_MIPS32=y
CONFIG_CPU_MIPS32_R1=y
# CONFIG_CPU_MIPS32_R2 is not set
# CONFIG_CPU_MIPS64_R1 is not set
# CONFIG_CPU_MIPS64_R2 is not set
CONFIG_CPU_MIPSR1=y
# CONFIG_CPU_NEVADA is not set
# CONFIG_CPU_R10000 is not set
# CONFIG_CPU_R3000 is not set
# CONFIG_CPU_R4300 is not set
# CONFIG_CPU_R4X00 is not set
# CONFIG_CPU_R5000 is not set
# CONFIG_CPU_R5432 is not set
# CONFIG_CPU_R6000 is not set
# CONFIG_CPU_R8000 is not set
# CONFIG_CPU_RM7000 is not set
# CONFIG_CPU_RM9000 is not set
# CONFIG_CPU_SB1 is not set
CONFIG_CPU_SUPPORTS_32BIT_KERNEL=y
CONFIG_CPU_SUPPORTS_HIGHMEM=y
# CONFIG_CPU_TX39XX is not set
# CONFIG_CPU_TX49XX is not set
# CONFIG_CPU_VR41XX is not set
# CONFIG_DDB5477 is not set
CONFIG_DEVPORT=y
# CONFIG_DM9000 is not set
CONFIG_DMA_NEED_PCI_MAP_STATE=y
CONFIG_DMA_NONCOHERENT=y
CONFIG_EXT2_FS=y
CONFIG_FS_POSIX_ACL=y
CONFIG_GENERIC_FIND_NEXT_BIT=y
# CONFIG_GENERIC_GPIO is not set
# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
# CONFIG_GEN_RTC is not set
CONFIG_HAS_DMA=y
CONFIG_HAS_IOMEM=y
CONFIG_HAS_IOPORT=y
CONFIG_HW_HAS_PCI=y
CONFIG_HW_RANDOM=y
# CONFIG_I2C is not set
# CONFIG_IDE is not set
CONFIG_INITRAMFS_SOURCE=""
CONFIG_IRQ_CPU=y
CONFIG_KORINA=y
CONFIG_LEDS_RB500=y
# CONFIG_MACH_ALCHEMY is not set
# CONFIG_MACH_DECSTATION is not set
# CONFIG_MACH_JAZZ is not set
# CONFIG_MACH_VR41XX is not set
CONFIG_MIKROTIK_RB500=y
CONFIG_MINI_FO=m
CONFIG_MIPS=y
# CONFIG_MIPS_ATLAS is not set
# CONFIG_MIPS_COBALT is not set
# CONFIG_MIPS_EV64120 is not set
CONFIG_MIPS_L1_CACHE_SHIFT=4
# CONFIG_MIPS_MALTA is not set
CONFIG_MIPS_MT_DISABLED=y
# CONFIG_MIPS_MT_SMP is not set
# CONFIG_MIPS_MT_SMTC is not set
# CONFIG_MIPS_SEAD is not set
# CONFIG_MIPS_SIM is not set
# CONFIG_MIPS_VPE_LOADER is not set
# CONFIG_MOMENCO_OCELOT is not set
# CONFIG_MOMENCO_OCELOT_3 is not set
# CONFIG_MOMENCO_OCELOT_C is not set
CONFIG_MTD=y
# CONFIG_MTD_ABSENT is not set
CONFIG_MTD_BLKDEVS=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_BLOCK2MTD=y
# CONFIG_MTD_CFI is not set
CONFIG_MTD_CFI_I1=y
CONFIG_MTD_CFI_I2=y
# CONFIG_MTD_CFI_I4 is not set
# CONFIG_MTD_CFI_I8 is not set
CONFIG_MTD_CHAR=y
# CONFIG_MTD_CMDLINE_PARTS is not set
# CONFIG_MTD_COMPLEX_MAPPINGS is not set
# CONFIG_MTD_CONCAT is not set
# CONFIG_MTD_DEBUG is not set
# CONFIG_MTD_DOC2000 is not set
# CONFIG_MTD_DOC2001 is not set
# CONFIG_MTD_DOC2001PLUS is not set
# CONFIG_MTD_JEDECPROBE is not set
CONFIG_MTD_MAP_BANK_WIDTH_1=y
# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set
CONFIG_MTD_MAP_BANK_WIDTH_2=y
# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set
CONFIG_MTD_MAP_BANK_WIDTH_4=y
# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set
# CONFIG_MTD_MTDRAM is not set
CONFIG_MTD_NAND=y
# CONFIG_MTD_NAND_CAFE is not set
# CONFIG_MTD_NAND_DISKONCHIP is not set
# CONFIG_MTD_NAND_ECC_SMC is not set
CONFIG_MTD_NAND_IDS=y
# CONFIG_MTD_NAND_MUSEUM_IDS is not set
# CONFIG_MTD_NAND_NANDSIM is not set
CONFIG_MTD_NAND_PLATFORM=y
CONFIG_MTD_NAND_VERIFY_WRITE=y
# CONFIG_MTD_ONENAND is not set
CONFIG_MTD_PARTITIONS=y
# CONFIG_MTD_PHRAM is not set
# CONFIG_MTD_PLATRAM is not set
# CONFIG_MTD_PMC551 is not set
# CONFIG_MTD_RAM is not set
# CONFIG_MTD_REDBOOT_PARTS is not set
# CONFIG_MTD_ROM is not set
# CONFIG_MTD_SLRAM is not set
# CONFIG_NATSEMI is not set
# CONFIG_NE2K_PCI is not set
# CONFIG_NETDEV_1000 is not set
CONFIG_NET_SCH_FIFO=y
# CONFIG_NET_VENDOR_3COM is not set
CONFIG_NF_CT_PROTO_GRE=m
CONFIG_NF_NAT_PROTO_GRE=m
# CONFIG_PAGE_SIZE_16KB is not set
CONFIG_PAGE_SIZE_4KB=y
# CONFIG_PAGE_SIZE_64KB is not set
# CONFIG_PAGE_SIZE_8KB is not set
# CONFIG_PCIPCWATCHDOG is not set
# CONFIG_PMC_YOSEMITE is not set
# CONFIG_PNPACPI is not set
# CONFIG_PNX8550_JBS is not set
# CONFIG_PNX8550_STB810 is not set
CONFIG_RC32434_WDT=y
# CONFIG_RTC is not set
CONFIG_RWSEM_GENERIC_SPINLOCK=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
CONFIG_SCSI_WAIT_SCAN=m
# CONFIG_SERIAL_8250_EXTENDED is not set
# CONFIG_SGI_IP22 is not set
# CONFIG_SGI_IP27 is not set
# CONFIG_SGI_IP32 is not set
# CONFIG_SIBYTE_BIGSUR is not set
# CONFIG_SIBYTE_CARMEL is not set
# CONFIG_SIBYTE_CRHINE is not set
# CONFIG_SIBYTE_CRHONE is not set
# CONFIG_SIBYTE_LITTLESUR is not set
# CONFIG_SIBYTE_PTSWARM is not set
# CONFIG_SIBYTE_RHONE is not set
# CONFIG_SIBYTE_SENTOSA is not set
# CONFIG_SIBYTE_SWARM is not set
# CONFIG_SOFT_WATCHDOG is not set
# CONFIG_SOUND is not set
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SWAP_IO_SPACE=y
CONFIG_SYSVIPC_SYSCTL=y
CONFIG_SYS_HAS_CPU_MIPS32_R1=y
CONFIG_SYS_SUPPORTS_32BIT_KERNEL=y
CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
CONFIG_SYS_SUPPORTS_LITTLE_ENDIAN=y
# CONFIG_TC35815 is not set
# CONFIG_TOSHIBA_JMR3927 is not set
# CONFIG_TOSHIBA_RBTX4927 is not set
# CONFIG_TOSHIBA_RBTX4938 is not set
CONFIG_TRAD_SIGNALS=y
# CONFIG_UNUSED_SYMBOLS is not set
# CONFIG_USB is not set
# CONFIG_VGASTATE is not set
CONFIG_VIA_RHINE=y
# CONFIG_VIA_RHINE_MMIO is not set
CONFIG_VIA_RHINE_NAPI=y
# CONFIG_YAFFS_9BYTE_TAGS is not set
CONFIG_YAFFS_ALWAYS_CHECK_CHUNK_ERASED=y
CONFIG_YAFFS_AUTO_YAFFS2=y
CONFIG_YAFFS_CHECKPOINT_RESERVED_BLOCKS=0
# CONFIG_YAFFS_DISABLE_LAZY_LOAD is not set
# CONFIG_YAFFS_DISABLE_WIDE_TNODES is not set
CONFIG_YAFFS_DOES_ECC=y
CONFIG_YAFFS_ECC_WRONG_ORDER=y
CONFIG_YAFFS_FS=y
CONFIG_YAFFS_SHORT_NAMES_IN_RAM=y
CONFIG_YAFFS_YAFFS1=y
CONFIG_YAFFS_YAFFS2=y
CONFIG_ZONE_DMA_FLAG=0