From bc22c17e12c130dc929218a95aa347e0f3fd05dc Mon Sep 17 00:00:00 2001 From: Alain Knaff Date: Sun, 4 Jan 2009 22:46:16 +0100 Subject: bzip2/lzma: library support for gzip, bzip2 and lzma decompression Impact: Replaces inflate.c with a wrapper around zlib_inflate; new library code This is the first part of the bzip2/lzma patch The bzip patch is based on an idea by Christian Ludwig, includes support for compressing the kernel with bzip2 or lzma rather than gzip. Both compressors give smaller sizes than gzip. Lzma's decompresses faster than bzip2. It also supports ramdisks and initramfs' compressed using these two compressors. The functionality has been successfully used for a couple of years by the udpcast project This version applies to "tip" kernel 2.6.28 This part contains: - changed inflate.c to accomodate rest of patch - implementation of bzip2 compression (not used at this stage yet) - implementation of lzma compression (not used at this stage yet) - Makefile routines to support bzip2 and lzma kernel compression Signed-off-by: Alain Knaff Signed-off-by: H. Peter Anvin --- lib/decompress_bunzip2.c | 735 ++++++++++++++++++++++++++++++++++++++++++++ lib/decompress_inflate.c | 167 ++++++++++ lib/decompress_unlzma.c | 647 ++++++++++++++++++++++++++++++++++++++ lib/zlib_inflate/inflate.h | 4 + lib/zlib_inflate/inftrees.h | 4 + 5 files changed, 1557 insertions(+) create mode 100644 lib/decompress_bunzip2.c create mode 100644 lib/decompress_inflate.c create mode 100644 lib/decompress_unlzma.c (limited to 'lib') diff --git a/lib/decompress_bunzip2.c b/lib/decompress_bunzip2.c new file mode 100644 index 0000000..5d3ddb5 --- /dev/null +++ b/lib/decompress_bunzip2.c @@ -0,0 +1,735 @@ +/* vi: set sw = 4 ts = 4: */ +/* Small bzip2 deflate implementation, by Rob Landley (rob@landley.net). + + Based on bzip2 decompression code by Julian R Seward (jseward@acm.org), + which also acknowledges contributions by Mike Burrows, David Wheeler, + Peter Fenwick, Alistair Moffat, Radford Neal, Ian H. Witten, + Robert Sedgewick, and Jon L. Bentley. + + This code is licensed under the LGPLv2: + LGPL (http://www.gnu.org/copyleft/lgpl.html +*/ + +/* + Size and speed optimizations by Manuel Novoa III (mjn3@codepoet.org). + + More efficient reading of Huffman codes, a streamlined read_bunzip() + function, and various other tweaks. In (limited) tests, approximately + 20% faster than bzcat on x86 and about 10% faster on arm. + + Note that about 2/3 of the time is spent in read_unzip() reversing + the Burrows-Wheeler transformation. Much of that time is delay + resulting from cache misses. + + I would ask that anyone benefiting from this work, especially those + using it in commercial products, consider making a donation to my local + non-profit hospice organization in the name of the woman I loved, who + passed away Feb. 12, 2003. + + In memory of Toni W. Hagan + + Hospice of Acadiana, Inc. + 2600 Johnston St., Suite 200 + Lafayette, LA 70503-3240 + + Phone (337) 232-1234 or 1-800-738-2226 + Fax (337) 232-1297 + + http://www.hospiceacadiana.com/ + + Manuel + */ + +/* + Made it fit for running in Linux Kernel by Alain Knaff (alain@knaff.lu) +*/ + + +#ifndef STATIC +#include +#endif /* !STATIC */ + +#include + +#ifndef INT_MAX +#define INT_MAX 0x7fffffff +#endif + +/* Constants for Huffman coding */ +#define MAX_GROUPS 6 +#define GROUP_SIZE 50 /* 64 would have been more efficient */ +#define MAX_HUFCODE_BITS 20 /* Longest Huffman code allowed */ +#define MAX_SYMBOLS 258 /* 256 literals + RUNA + RUNB */ +#define SYMBOL_RUNA 0 +#define SYMBOL_RUNB 1 + +/* Status return values */ +#define RETVAL_OK 0 +#define RETVAL_LAST_BLOCK (-1) +#define RETVAL_NOT_BZIP_DATA (-2) +#define RETVAL_UNEXPECTED_INPUT_EOF (-3) +#define RETVAL_UNEXPECTED_OUTPUT_EOF (-4) +#define RETVAL_DATA_ERROR (-5) +#define RETVAL_OUT_OF_MEMORY (-6) +#define RETVAL_OBSOLETE_INPUT (-7) + +/* Other housekeeping constants */ +#define BZIP2_IOBUF_SIZE 4096 + +/* This is what we know about each Huffman coding group */ +struct group_data { + /* We have an extra slot at the end of limit[] for a sentinal value. */ + int limit[MAX_HUFCODE_BITS+1]; + int base[MAX_HUFCODE_BITS]; + int permute[MAX_SYMBOLS]; + int minLen, maxLen; +}; + +/* Structure holding all the housekeeping data, including IO buffers and + memory that persists between calls to bunzip */ +struct bunzip_data { + /* State for interrupting output loop */ + int writeCopies, writePos, writeRunCountdown, writeCount, writeCurrent; + /* I/O tracking data (file handles, buffers, positions, etc.) */ + int (*fill)(void*, unsigned int); + int inbufCount, inbufPos /*, outbufPos*/; + unsigned char *inbuf /*,*outbuf*/; + unsigned int inbufBitCount, inbufBits; + /* The CRC values stored in the block header and calculated from the + data */ + unsigned int crc32Table[256], headerCRC, totalCRC, writeCRC; + /* Intermediate buffer and its size (in bytes) */ + unsigned int *dbuf, dbufSize; + /* These things are a bit too big to go on the stack */ + unsigned char selectors[32768]; /* nSelectors = 15 bits */ + struct group_data groups[MAX_GROUPS]; /* Huffman coding tables */ + int io_error; /* non-zero if we have IO error */ +}; + + +/* Return the next nnn bits of input. All reads from the compressed input + are done through this function. All reads are big endian */ +static unsigned int INIT get_bits(struct bunzip_data *bd, char bits_wanted) +{ + unsigned int bits = 0; + + /* If we need to get more data from the byte buffer, do so. + (Loop getting one byte at a time to enforce endianness and avoid + unaligned access.) */ + while (bd->inbufBitCount < bits_wanted) { + /* If we need to read more data from file into byte buffer, do + so */ + if (bd->inbufPos == bd->inbufCount) { + if (bd->io_error) + return 0; + bd->inbufCount = bd->fill(bd->inbuf, BZIP2_IOBUF_SIZE); + if (bd->inbufCount <= 0) { + bd->io_error = RETVAL_UNEXPECTED_INPUT_EOF; + return 0; + } + bd->inbufPos = 0; + } + /* Avoid 32-bit overflow (dump bit buffer to top of output) */ + if (bd->inbufBitCount >= 24) { + bits = bd->inbufBits&((1 << bd->inbufBitCount)-1); + bits_wanted -= bd->inbufBitCount; + bits <<= bits_wanted; + bd->inbufBitCount = 0; + } + /* Grab next 8 bits of input from buffer. */ + bd->inbufBits = (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++]; + bd->inbufBitCount += 8; + } + /* Calculate result */ + bd->inbufBitCount -= bits_wanted; + bits |= (bd->inbufBits >> bd->inbufBitCount)&((1 << bits_wanted)-1); + + return bits; +} + +/* Unpacks the next block and sets up for the inverse burrows-wheeler step. */ + +static int INIT get_next_block(struct bunzip_data *bd) +{ + struct group_data *hufGroup = NULL; + int *base = NULL; + int *limit = NULL; + int dbufCount, nextSym, dbufSize, groupCount, selector, + i, j, k, t, runPos, symCount, symTotal, nSelectors, + byteCount[256]; + unsigned char uc, symToByte[256], mtfSymbol[256], *selectors; + unsigned int *dbuf, origPtr; + + dbuf = bd->dbuf; + dbufSize = bd->dbufSize; + selectors = bd->selectors; + + /* Read in header signature and CRC, then validate signature. + (last block signature means CRC is for whole file, return now) */ + i = get_bits(bd, 24); + j = get_bits(bd, 24); + bd->headerCRC = get_bits(bd, 32); + if ((i == 0x177245) && (j == 0x385090)) + return RETVAL_LAST_BLOCK; + if ((i != 0x314159) || (j != 0x265359)) + return RETVAL_NOT_BZIP_DATA; + /* We can add support for blockRandomised if anybody complains. + There was some code for this in busybox 1.0.0-pre3, but nobody ever + noticed that it didn't actually work. */ + if (get_bits(bd, 1)) + return RETVAL_OBSOLETE_INPUT; + origPtr = get_bits(bd, 24); + if (origPtr > dbufSize) + return RETVAL_DATA_ERROR; + /* mapping table: if some byte values are never used (encoding things + like ascii text), the compression code removes the gaps to have fewer + symbols to deal with, and writes a sparse bitfield indicating which + values were present. We make a translation table to convert the + symbols back to the corresponding bytes. */ + t = get_bits(bd, 16); + symTotal = 0; + for (i = 0; i < 16; i++) { + if (t&(1 << (15-i))) { + k = get_bits(bd, 16); + for (j = 0; j < 16; j++) + if (k&(1 << (15-j))) + symToByte[symTotal++] = (16*i)+j; + } + } + /* How many different Huffman coding groups does this block use? */ + groupCount = get_bits(bd, 3); + if (groupCount < 2 || groupCount > MAX_GROUPS) + return RETVAL_DATA_ERROR; + /* nSelectors: Every GROUP_SIZE many symbols we select a new + Huffman coding group. Read in the group selector list, + which is stored as MTF encoded bit runs. (MTF = Move To + Front, as each value is used it's moved to the start of the + list.) */ + nSelectors = get_bits(bd, 15); + if (!nSelectors) + return RETVAL_DATA_ERROR; + for (i = 0; i < groupCount; i++) + mtfSymbol[i] = i; + for (i = 0; i < nSelectors; i++) { + /* Get next value */ + for (j = 0; get_bits(bd, 1); j++) + if (j >= groupCount) + return RETVAL_DATA_ERROR; + /* Decode MTF to get the next selector */ + uc = mtfSymbol[j]; + for (; j; j--) + mtfSymbol[j] = mtfSymbol[j-1]; + mtfSymbol[0] = selectors[i] = uc; + } + /* Read the Huffman coding tables for each group, which code + for symTotal literal symbols, plus two run symbols (RUNA, + RUNB) */ + symCount = symTotal+2; + for (j = 0; j < groupCount; j++) { + unsigned char length[MAX_SYMBOLS], temp[MAX_HUFCODE_BITS+1]; + int minLen, maxLen, pp; + /* Read Huffman code lengths for each symbol. They're + stored in a way similar to mtf; record a starting + value for the first symbol, and an offset from the + previous value for everys symbol after that. + (Subtracting 1 before the loop and then adding it + back at the end is an optimization that makes the + test inside the loop simpler: symbol length 0 + becomes negative, so an unsigned inequality catches + it.) */ + t = get_bits(bd, 5)-1; + for (i = 0; i < symCount; i++) { + for (;;) { + if (((unsigned)t) > (MAX_HUFCODE_BITS-1)) + return RETVAL_DATA_ERROR; + + /* If first bit is 0, stop. Else + second bit indicates whether to + increment or decrement the value. + Optimization: grab 2 bits and unget + the second if the first was 0. */ + + k = get_bits(bd, 2); + if (k < 2) { + bd->inbufBitCount++; + break; + } + /* Add one if second bit 1, else + * subtract 1. Avoids if/else */ + t += (((k+1)&2)-1); + } + /* Correct for the initial -1, to get the + * final symbol length */ + length[i] = t+1; + } + /* Find largest and smallest lengths in this group */ + minLen = maxLen = length[0]; + + for (i = 1; i < symCount; i++) { + if (length[i] > maxLen) + maxLen = length[i]; + else if (length[i] < minLen) + minLen = length[i]; + } + + /* Calculate permute[], base[], and limit[] tables from + * length[]. + * + * permute[] is the lookup table for converting + * Huffman coded symbols into decoded symbols. base[] + * is the amount to subtract from the value of a + * Huffman symbol of a given length when using + * permute[]. + * + * limit[] indicates the largest numerical value a + * symbol with a given number of bits can have. This + * is how the Huffman codes can vary in length: each + * code with a value > limit[length] needs another + * bit. + */ + hufGroup = bd->groups+j; + hufGroup->minLen = minLen; + hufGroup->maxLen = maxLen; + /* Note that minLen can't be smaller than 1, so we + adjust the base and limit array pointers so we're + not always wasting the first entry. We do this + again when using them (during symbol decoding).*/ + base = hufGroup->base-1; + limit = hufGroup->limit-1; + /* Calculate permute[]. Concurently, initialize + * temp[] and limit[]. */ + pp = 0; + for (i = minLen; i <= maxLen; i++) { + temp[i] = limit[i] = 0; + for (t = 0; t < symCount; t++) + if (length[t] == i) + hufGroup->permute[pp++] = t; + } + /* Count symbols coded for at each bit length */ + for (i = 0; i < symCount; i++) + temp[length[i]]++; + /* Calculate limit[] (the largest symbol-coding value + *at each bit length, which is (previous limit << + *1)+symbols at this level), and base[] (number of + *symbols to ignore at each bit length, which is limit + *minus the cumulative count of symbols coded for + *already). */ + pp = t = 0; + for (i = minLen; i < maxLen; i++) { + pp += temp[i]; + /* We read the largest possible symbol size + and then unget bits after determining how + many we need, and those extra bits could be + set to anything. (They're noise from + future symbols.) At each level we're + really only interested in the first few + bits, so here we set all the trailing + to-be-ignored bits to 1 so they don't + affect the value > limit[length] + comparison. */ + limit[i] = (pp << (maxLen - i)) - 1; + pp <<= 1; + base[i+1] = pp-(t += temp[i]); + } + limit[maxLen+1] = INT_MAX; /* Sentinal value for + * reading next sym. */ + limit[maxLen] = pp+temp[maxLen]-1; + base[minLen] = 0; + } + /* We've finished reading and digesting the block header. Now + read this block's Huffman coded symbols from the file and + undo the Huffman coding and run length encoding, saving the + result into dbuf[dbufCount++] = uc */ + + /* Initialize symbol occurrence counters and symbol Move To + * Front table */ + for (i = 0; i < 256; i++) { + byteCount[i] = 0; + mtfSymbol[i] = (unsigned char)i; + } + /* Loop through compressed symbols. */ + runPos = dbufCount = symCount = selector = 0; + for (;;) { + /* Determine which Huffman coding group to use. */ + if (!(symCount--)) { + symCount = GROUP_SIZE-1; + if (selector >= nSelectors) + return RETVAL_DATA_ERROR; + hufGroup = bd->groups+selectors[selector++]; + base = hufGroup->base-1; + limit = hufGroup->limit-1; + } + /* Read next Huffman-coded symbol. */ + /* Note: It is far cheaper to read maxLen bits and + back up than it is to read minLen bits and then an + additional bit at a time, testing as we go. + Because there is a trailing last block (with file + CRC), there is no danger of the overread causing an + unexpected EOF for a valid compressed file. As a + further optimization, we do the read inline + (falling back to a call to get_bits if the buffer + runs dry). The following (up to got_huff_bits:) is + equivalent to j = get_bits(bd, hufGroup->maxLen); + */ + while (bd->inbufBitCount < hufGroup->maxLen) { + if (bd->inbufPos == bd->inbufCount) { + j = get_bits(bd, hufGroup->maxLen); + goto got_huff_bits; + } + bd->inbufBits = + (bd->inbufBits << 8)|bd->inbuf[bd->inbufPos++]; + bd->inbufBitCount += 8; + }; + bd->inbufBitCount -= hufGroup->maxLen; + j = (bd->inbufBits >> bd->inbufBitCount)& + ((1 << hufGroup->maxLen)-1); +got_huff_bits: + /* Figure how how many bits are in next symbol and + * unget extras */ + i = hufGroup->minLen; + while (j > limit[i]) + ++i; + bd->inbufBitCount += (hufGroup->maxLen - i); + /* Huffman decode value to get nextSym (with bounds checking) */ + if ((i > hufGroup->maxLen) + || (((unsigned)(j = (j>>(hufGroup->maxLen-i))-base[i])) + >= MAX_SYMBOLS)) + return RETVAL_DATA_ERROR; + nextSym = hufGroup->permute[j]; + /* We have now decoded the symbol, which indicates + either a new literal byte, or a repeated run of the + most recent literal byte. First, check if nextSym + indicates a repeated run, and if so loop collecting + how many times to repeat the last literal. */ + if (((unsigned)nextSym) <= SYMBOL_RUNB) { /* RUNA or RUNB */ + /* If this is the start of a new run, zero out + * counter */ + if (!runPos) { + runPos = 1; + t = 0; + } + /* Neat trick that saves 1 symbol: instead of + or-ing 0 or 1 at each bit position, add 1 + or 2 instead. For example, 1011 is 1 << 0 + + 1 << 1 + 2 << 2. 1010 is 2 << 0 + 2 << 1 + + 1 << 2. You can make any bit pattern + that way using 1 less symbol than the basic + or 0/1 method (except all bits 0, which + would use no symbols, but a run of length 0 + doesn't mean anything in this context). + Thus space is saved. */ + t += (runPos << nextSym); + /* +runPos if RUNA; +2*runPos if RUNB */ + + runPos <<= 1; + continue; + } + /* When we hit the first non-run symbol after a run, + we now know how many times to repeat the last + literal, so append that many copies to our buffer + of decoded symbols (dbuf) now. (The last literal + used is the one at the head of the mtfSymbol + array.) */ + if (runPos) { + runPos = 0; + if (dbufCount+t >= dbufSize) + return RETVAL_DATA_ERROR; + + uc = symToByte[mtfSymbol[0]]; + byteCount[uc] += t; + while (t--) + dbuf[dbufCount++] = uc; + } + /* Is this the terminating symbol? */ + if (nextSym > symTotal) + break; + /* At this point, nextSym indicates a new literal + character. Subtract one to get the position in the + MTF array at which this literal is currently to be + found. (Note that the result can't be -1 or 0, + because 0 and 1 are RUNA and RUNB. But another + instance of the first symbol in the mtf array, + position 0, would have been handled as part of a + run above. Therefore 1 unused mtf position minus 2 + non-literal nextSym values equals -1.) */ + if (dbufCount >= dbufSize) + return RETVAL_DATA_ERROR; + i = nextSym - 1; + uc = mtfSymbol[i]; + /* Adjust the MTF array. Since we typically expect to + *move only a small number of symbols, and are bound + *by 256 in any case, using memmove here would + *typically be bigger and slower due to function call + *overhead and other assorted setup costs. */ + do { + mtfSymbol[i] = mtfSymbol[i-1]; + } while (--i); + mtfSymbol[0] = uc; + uc = symToByte[uc]; + /* We have our literal byte. Save it into dbuf. */ + byteCount[uc]++; + dbuf[dbufCount++] = (unsigned int)uc; + } + /* At this point, we've read all the Huffman-coded symbols + (and repeated runs) for this block from the input stream, + and decoded them into the intermediate buffer. There are + dbufCount many decoded bytes in dbuf[]. Now undo the + Burrows-Wheeler transform on dbuf. See + http://dogma.net/markn/articles/bwt/bwt.htm + */ + /* Turn byteCount into cumulative occurrence counts of 0 to n-1. */ + j = 0; + for (i = 0; i < 256; i++) { + k = j+byteCount[i]; + byteCount[i] = j; + j = k; + } + /* Figure out what order dbuf would be in if we sorted it. */ + for (i = 0; i < dbufCount; i++) { + uc = (unsigned char)(dbuf[i] & 0xff); + dbuf[byteCount[uc]] |= (i << 8); + byteCount[uc]++; + } + /* Decode first byte by hand to initialize "previous" byte. + Note that it doesn't get output, and if the first three + characters are identical it doesn't qualify as a run (hence + writeRunCountdown = 5). */ + if (dbufCount) { + if (origPtr >= dbufCount) + return RETVAL_DATA_ERROR; + bd->writePos = dbuf[origPtr]; + bd->writeCurrent = (unsigned char)(bd->writePos&0xff); + bd->writePos >>= 8; + bd->writeRunCountdown = 5; + } + bd->writeCount = dbufCount; + + return RETVAL_OK; +} + +/* Undo burrows-wheeler transform on intermediate buffer to produce output. + If start_bunzip was initialized with out_fd =-1, then up to len bytes of + data are written to outbuf. Return value is number of bytes written or + error (all errors are negative numbers). If out_fd!=-1, outbuf and len + are ignored, data is written to out_fd and return is RETVAL_OK or error. +*/ + +static int INIT read_bunzip(struct bunzip_data *bd, char *outbuf, int len) +{ + const unsigned int *dbuf; + int pos, xcurrent, previous, gotcount; + + /* If last read was short due to end of file, return last block now */ + if (bd->writeCount < 0) + return bd->writeCount; + + gotcount = 0; + dbuf = bd->dbuf; + pos = bd->writePos; + xcurrent = bd->writeCurrent; + + /* We will always have pending decoded data to write into the output + buffer unless this is the very first call (in which case we haven't + Huffman-decoded a block into the intermediate buffer yet). */ + + if (bd->writeCopies) { + /* Inside the loop, writeCopies means extra copies (beyond 1) */ + --bd->writeCopies; + /* Loop outputting bytes */ + for (;;) { + /* If the output buffer is full, snapshot + * state and return */ + if (gotcount >= len) { + bd->writePos = pos; + bd->writeCurrent = xcurrent; + bd->writeCopies++; + return len; + } + /* Write next byte into output buffer, updating CRC */ + outbuf[gotcount++] = xcurrent; + bd->writeCRC = (((bd->writeCRC) << 8) + ^bd->crc32Table[((bd->writeCRC) >> 24) + ^xcurrent]); + /* Loop now if we're outputting multiple + * copies of this byte */ + if (bd->writeCopies) { + --bd->writeCopies; + continue; + } +decode_next_byte: + if (!bd->writeCount--) + break; + /* Follow sequence vector to undo + * Burrows-Wheeler transform */ + previous = xcurrent; + pos = dbuf[pos]; + xcurrent = pos&0xff; + pos >>= 8; + /* After 3 consecutive copies of the same + byte, the 4th is a repeat count. We count + down from 4 instead *of counting up because + testing for non-zero is faster */ + if (--bd->writeRunCountdown) { + if (xcurrent != previous) + bd->writeRunCountdown = 4; + } else { + /* We have a repeated run, this byte + * indicates the count */ + bd->writeCopies = xcurrent; + xcurrent = previous; + bd->writeRunCountdown = 5; + /* Sometimes there are just 3 bytes + * (run length 0) */ + if (!bd->writeCopies) + goto decode_next_byte; + /* Subtract the 1 copy we'd output + * anyway to get extras */ + --bd->writeCopies; + } + } + /* Decompression of this block completed successfully */ + bd->writeCRC = ~bd->writeCRC; + bd->totalCRC = ((bd->totalCRC << 1) | + (bd->totalCRC >> 31)) ^ bd->writeCRC; + /* If this block had a CRC error, force file level CRC error. */ + if (bd->writeCRC != bd->headerCRC) { + bd->totalCRC = bd->headerCRC+1; + return RETVAL_LAST_BLOCK; + } + } + + /* Refill the intermediate buffer by Huffman-decoding next + * block of input */ + /* (previous is just a convenient unused temp variable here) */ + previous = get_next_block(bd); + if (previous) { + bd->writeCount = previous; + return (previous != RETVAL_LAST_BLOCK) ? previous : gotcount; + } + bd->writeCRC = 0xffffffffUL; + pos = bd->writePos; + xcurrent = bd->writeCurrent; + goto decode_next_byte; +} + +static int INIT nofill(void *buf, unsigned int len) +{ + return -1; +} + +/* Allocate the structure, read file header. If in_fd ==-1, inbuf must contain + a complete bunzip file (len bytes long). If in_fd!=-1, inbuf and len are + ignored, and data is read from file handle into temporary buffer. */ +static int INIT start_bunzip(struct bunzip_data **bdp, void *inbuf, int len, + int (*fill)(void*, unsigned int)) +{ + struct bunzip_data *bd; + unsigned int i, j, c; + const unsigned int BZh0 = + (((unsigned int)'B') << 24)+(((unsigned int)'Z') << 16) + +(((unsigned int)'h') << 8)+(unsigned int)'0'; + + /* Figure out how much data to allocate */ + i = sizeof(struct bunzip_data); + + /* Allocate bunzip_data. Most fields initialize to zero. */ + bd = *bdp = malloc(i); + memset(bd, 0, sizeof(struct bunzip_data)); + /* Setup input buffer */ + bd->inbuf = inbuf; + bd->inbufCount = len; + if (fill != NULL) + bd->fill = fill; + else + bd->fill = nofill; + + /* Init the CRC32 table (big endian) */ + for (i = 0; i < 256; i++) { + c = i << 24; + for (j = 8; j; j--) + c = c&0x80000000 ? (c << 1)^0x04c11db7 : (c << 1); + bd->crc32Table[i] = c; + } + + /* Ensure that file starts with "BZh['1'-'9']." */ + i = get_bits(bd, 32); + if (((unsigned int)(i-BZh0-1)) >= 9) + return RETVAL_NOT_BZIP_DATA; + + /* Fourth byte (ascii '1'-'9'), indicates block size in units of 100k of + uncompressed data. Allocate intermediate buffer for block. */ + bd->dbufSize = 100000*(i-BZh0); + + bd->dbuf = large_malloc(bd->dbufSize * sizeof(int)); + return RETVAL_OK; +} + +/* Example usage: decompress src_fd to dst_fd. (Stops at end of bzip2 data, + not end of file.) */ +STATIC int INIT bunzip2(unsigned char *buf, int len, + int(*fill)(void*, unsigned int), + int(*flush)(void*, unsigned int), + unsigned char *outbuf, + int *pos, + void(*error_fn)(char *x)) +{ + struct bunzip_data *bd; + int i = -1; + unsigned char *inbuf; + + set_error_fn(error_fn); + if (flush) + outbuf = malloc(BZIP2_IOBUF_SIZE); + else + len -= 4; /* Uncompressed size hack active in pre-boot + environment */ + if (!outbuf) { + error("Could not allocate output bufer"); + return -1; + } + if (buf) + inbuf = buf; + else + inbuf = malloc(BZIP2_IOBUF_SIZE); + if (!inbuf) { + error("Could not allocate input bufer"); + goto exit_0; + } + i = start_bunzip(&bd, inbuf, len, fill); + if (!i) { + for (;;) { + i = read_bunzip(bd, outbuf, BZIP2_IOBUF_SIZE); + if (i <= 0) + break; + if (!flush) + outbuf += i; + else + if (i != flush(outbuf, i)) { + i = RETVAL_UNEXPECTED_OUTPUT_EOF; + break; + } + } + } + /* Check CRC and release memory */ + if (i == RETVAL_LAST_BLOCK) { + if (bd->headerCRC != bd->totalCRC) + error("Data integrity error when decompressing."); + else + i = RETVAL_OK; + } else if (i == RETVAL_UNEXPECTED_OUTPUT_EOF) { + error("Compressed file ends unexpectedly"); + } + if (bd->dbuf) + large_free(bd->dbuf); + if (pos) + *pos = bd->inbufPos; + free(bd); + if (!buf) + free(inbuf); +exit_0: + if (flush) + free(outbuf); + return i; +} + +#define decompress bunzip2 diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c new file mode 100644 index 0000000..163e66a --- /dev/null +++ b/lib/decompress_inflate.c @@ -0,0 +1,167 @@ +#ifdef STATIC +/* Pre-boot environment: included */ + +/* prevent inclusion of _LINUX_KERNEL_H in pre-boot environment: lots + * errors about console_printk etc... on ARM */ +#define _LINUX_KERNEL_H + +#include "zlib_inflate/inftrees.c" +#include "zlib_inflate/inffast.c" +#include "zlib_inflate/inflate.c" + +#else /* STATIC */ +/* initramfs et al: linked */ + +#include + +#include "zlib_inflate/inftrees.h" +#include "zlib_inflate/inffast.h" +#include "zlib_inflate/inflate.h" + +#include "zlib_inflate/infutil.h" + +#endif /* STATIC */ + +#include + +#define INBUF_LEN (16*1024) + +/* Included from initramfs et al code */ +STATIC int INIT gunzip(unsigned char *buf, int len, + int(*fill)(void*, unsigned int), + int(*flush)(void*, unsigned int), + unsigned char *out_buf, + int *pos, + void(*error_fn)(char *x)) { + u8 *zbuf; + struct z_stream_s *strm; + int rc; + size_t out_len; + + set_error_fn(error_fn); + rc = -1; + if (flush) { + out_len = 0x8100; /* 32 K */ + out_buf = malloc(out_len); + } else { + out_len = 0x7fffffff; /* no limit */ + } + if (!out_buf) { + error("Out of memory while allocating output buffer"); + goto gunzip_nomem1; + } + + if (buf) + zbuf = buf; + else { + zbuf = malloc(INBUF_LEN); + len = 0; + } + if (!zbuf) { + error("Out of memory while allocating input buffer"); + goto gunzip_nomem2; + } + + strm = malloc(sizeof(*strm)); + if (strm == NULL) { + error("Out of memory while allocating z_stream"); + goto gunzip_nomem3; + } + + strm->workspace = malloc(flush ? zlib_inflate_workspacesize() : + sizeof(struct inflate_state)); + if (strm->workspace == NULL) { + error("Out of memory while allocating workspace"); + goto gunzip_nomem4; + } + + if (len == 0) + len = fill(zbuf, INBUF_LEN); + + /* verify the gzip header */ + if (len < 10 || + zbuf[0] != 0x1f || zbuf[1] != 0x8b || zbuf[2] != 0x08) { + if (pos) + *pos = 0; + error("Not a gzip file"); + goto gunzip_5; + } + + /* skip over gzip header (1f,8b,08... 10 bytes total + + * possible asciz filename) + */ + strm->next_in = zbuf + 10; + /* skip over asciz filename */ + if (zbuf[3] & 0x8) { + while (strm->next_in[0]) + strm->next_in++; + strm->next_in++; + } + strm->avail_in = len - 10; + + strm->next_out = out_buf; + strm->avail_out = out_len; + + rc = zlib_inflateInit2(strm, -MAX_WBITS); + + if (!flush) { + WS(strm)->inflate_state.wsize = 0; + WS(strm)->inflate_state.window = NULL; + } + + while (rc == Z_OK) { + if (strm->avail_in == 0) { + /* TODO: handle case where both pos and fill are set */ + len = fill(zbuf, INBUF_LEN); + if (len < 0) { + rc = -1; + error("read error"); + break; + } + strm->next_in = zbuf; + strm->avail_in = len; + } + rc = zlib_inflate(strm, 0); + + /* Write any data generated */ + if (flush && strm->next_out > out_buf) { + int l = strm->next_out - out_buf; + if (l != flush(out_buf, l)) { + rc = -1; + error("write error"); + break; + } + strm->next_out = out_buf; + strm->avail_out = out_len; + } + + /* after Z_FINISH, only Z_STREAM_END is "we unpacked it all" */ + if (rc == Z_STREAM_END) { + rc = 0; + break; + } else if (rc != Z_OK) { + error("uncompression error"); + rc = -1; + } + } + + zlib_inflateEnd(strm); + if (pos) + /* add + 8 to skip over trailer */ + *pos = strm->next_in - zbuf+8; + +gunzip_5: + free(strm->workspace); +gunzip_nomem4: + free(strm); +gunzip_nomem3: + if (!buf) + free(zbuf); +gunzip_nomem2: + if (flush) + free(out_buf); +gunzip_nomem1: + return rc; /* returns Z_OK (0) if successful */ +} + +#define decompress gunzip diff --git a/lib/decompress_unlzma.c b/lib/decompress_unlzma.c new file mode 100644 index 0000000..546f2f4 --- /dev/null +++ b/lib/decompress_unlzma.c @@ -0,0 +1,647 @@ +/* Lzma decompressor for Linux kernel. Shamelessly snarfed + *from busybox 1.1.1 + * + *Linux kernel adaptation + *Copyright (C) 2006 Alain < alain@knaff.lu > + * + *Based on small lzma deflate implementation/Small range coder + *implementation for lzma. + *Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org > + * + *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/) + *Copyright (C) 1999-2005 Igor Pavlov + * + *Copyrights of the parts, see headers below. + * + * + *This program is free software; you can redistribute it and/or + *modify it under the terms of the GNU Lesser General Public + *License as published by the Free Software Foundation; either + *version 2.1 of the License, or (at your option) any later version. + * + *This program is distributed in the hope that it will be useful, + *but WITHOUT ANY WARRANTY; without even the implied warranty of + *MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + *Lesser General Public License for more details. + * + *You should have received a copy of the GNU Lesser General Public + *License along with this library; if not, write to the Free Software + *Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef STATIC +#include +#endif /* STATIC */ + +#include + +#define MIN(a, b) (((a) < (b)) ? (a) : (b)) + +static long long INIT read_int(unsigned char *ptr, int size) +{ + int i; + long long ret = 0; + + for (i = 0; i < size; i++) + ret = (ret << 8) | ptr[size-i-1]; + return ret; +} + +#define ENDIAN_CONVERT(x) \ + x = (typeof(x))read_int((unsigned char *)&x, sizeof(x)) + + +/* Small range coder implementation for lzma. + *Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org > + * + *Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/) + *Copyright (c) 1999-2005 Igor Pavlov + */ + +#include + +#define LZMA_IOBUF_SIZE 0x10000 + +struct rc { + int (*fill)(void*, unsigned int); + uint8_t *ptr; + uint8_t *buffer; + uint8_t *buffer_end; + int buffer_size; + uint32_t code; + uint32_t range; + uint32_t bound; +}; + + +#define RC_TOP_BITS 24 +#define RC_MOVE_BITS 5 +#define RC_MODEL_TOTAL_BITS 11 + + +/* Called twice: once at startup and once in rc_normalize() */ +static void INIT rc_read(struct rc *rc) +{ + rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE); + if (rc->buffer_size <= 0) + error("unexpected EOF"); + rc->ptr = rc->buffer; + rc->buffer_end = rc->buffer + rc->buffer_size; +} + +/* Called once */ +static inline void INIT rc_init(struct rc *rc, + int (*fill)(void*, unsigned int), + char *buffer, int buffer_size) +{ + rc->fill = fill; + rc->buffer = (uint8_t *)buffer; + rc->buffer_size = buffer_size; + rc->buffer_end = rc->buffer + rc->buffer_size; + rc->ptr = rc->buffer; + + rc->code = 0; + rc->range = 0xFFFFFFFF; +} + +static inline void INIT rc_init_code(struct rc *rc) +{ + int i; + + for (i = 0; i < 5; i++) { + if (rc->ptr >= rc->buffer_end) + rc_read(rc); + rc->code = (rc->code << 8) | *rc->ptr++; + } +} + + +/* Called once. TODO: bb_maybe_free() */ +static inline void INIT rc_free(struct rc *rc) +{ + free(rc->buffer); +} + +/* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */ +static void INIT rc_do_normalize(struct rc *rc) +{ + if (rc->ptr >= rc->buffer_end) + rc_read(rc); + rc->range <<= 8; + rc->code = (rc->code << 8) | *rc->ptr++; +} +static inline void INIT rc_normalize(struct rc *rc) +{ + if (rc->range < (1 << RC_TOP_BITS)) + rc_do_normalize(rc); +} + +/* Called 9 times */ +/* Why rc_is_bit_0_helper exists? + *Because we want to always expose (rc->code < rc->bound) to optimizer + */ +static inline uint32_t INIT rc_is_bit_0_helper(struct rc *rc, uint16_t *p) +{ + rc_normalize(rc); + rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS); + return rc->bound; +} +static inline int INIT rc_is_bit_0(struct rc *rc, uint16_t *p) +{ + uint32_t t = rc_is_bit_0_helper(rc, p); + return rc->code < t; +} + +/* Called ~10 times, but very small, thus inlined */ +static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p) +{ + rc->range = rc->bound; + *p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS; +} +static inline void rc_update_bit_1(struct rc *rc, uint16_t *p) +{ + rc->range -= rc->bound; + rc->code -= rc->bound; + *p -= *p >> RC_MOVE_BITS; +} + +/* Called 4 times in unlzma loop */ +static int INIT rc_get_bit(struct rc *rc, uint16_t *p, int *symbol) +{ + if (rc_is_bit_0(rc, p)) { + rc_update_bit_0(rc, p); + *symbol *= 2; + return 0; + } else { + rc_update_bit_1(rc, p); + *symbol = *symbol * 2 + 1; + return 1; + } +} + +/* Called once */ +static inline int INIT rc_direct_bit(struct rc *rc) +{ + rc_normalize(rc); + rc->range >>= 1; + if (rc->code >= rc->range) { + rc->code -= rc->range; + return 1; + } + return 0; +} + +/* Called twice */ +static inline void INIT +rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol) +{ + int i = num_levels; + + *symbol = 1; + while (i--) + rc_get_bit(rc, p + *symbol, symbol); + *symbol -= 1 << num_levels; +} + + +/* + * Small lzma deflate implementation. + * Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org > + * + * Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/) + * Copyright (C) 1999-2005 Igor Pavlov + */ + + +struct lzma_header { + uint8_t pos; + uint32_t dict_size; + uint64_t dst_size; +} __attribute__ ((packed)) ; + + +#define LZMA_BASE_SIZE 1846 +#define LZMA_LIT_SIZE 768 + +#define LZMA_NUM_POS_BITS_MAX 4 + +#define LZMA_LEN_NUM_LOW_BITS 3 +#define LZMA_LEN_NUM_MID_BITS 3 +#define LZMA_LEN_NUM_HIGH_BITS 8 + +#define LZMA_LEN_CHOICE 0 +#define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1) +#define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1) +#define LZMA_LEN_MID (LZMA_LEN_LOW \ + + (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS))) +#define LZMA_LEN_HIGH (LZMA_LEN_MID \ + +(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS))) +#define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS)) + +#define LZMA_NUM_STATES 12 +#define LZMA_NUM_LIT_STATES 7 + +#define LZMA_START_POS_MODEL_INDEX 4 +#define LZMA_END_POS_MODEL_INDEX 14 +#define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1)) + +#define LZMA_NUM_POS_SLOT_BITS 6 +#define LZMA_NUM_LEN_TO_POS_STATES 4 + +#define LZMA_NUM_ALIGN_BITS 4 + +#define LZMA_MATCH_MIN_LEN 2 + +#define LZMA_IS_MATCH 0 +#define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX)) +#define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES) +#define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES) +#define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES) +#define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES) +#define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \ + + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX)) +#define LZMA_SPEC_POS (LZMA_POS_SLOT \ + +(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS)) +#define LZMA_ALIGN (LZMA_SPEC_POS \ + + LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX) +#define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS)) +#define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS) +#define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS) + + +struct writer { + uint8_t *buffer; + uint8_t previous_byte; + size_t buffer_pos; + int bufsize; + size_t global_pos; + int(*flush)(void*, unsigned int); + struct lzma_header *header; +}; + +struct cstate { + int state; + uint32_t rep0, rep1, rep2, rep3; +}; + +static inline size_t INIT get_pos(struct writer *wr) +{ + return + wr->global_pos + wr->buffer_pos; +} + +static inline uint8_t INIT peek_old_byte(struct writer *wr, + uint32_t offs) +{ + if (!wr->flush) { + int32_t pos; + while (offs > wr->header->dict_size) + offs -= wr->header->dict_size; + pos = wr->buffer_pos - offs; + return wr->buffer[pos]; + } else { + uint32_t pos = wr->buffer_pos - offs; + while (pos >= wr->header->dict_size) + pos += wr->header->dict_size; + return wr->buffer[pos]; + } + +} + +static inline void INIT write_byte(struct writer *wr, uint8_t byte) +{ + wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte; + if (wr->flush && wr->buffer_pos == wr->header->dict_size) { + wr->buffer_pos = 0; + wr->global_pos += wr->header->dict_size; + wr->flush((char *)wr->buffer, wr->header->dict_size); + } +} + + +static inline void INIT copy_byte(struct writer *wr, uint32_t offs) +{ + write_byte(wr, peek_old_byte(wr, offs)); +} + +static inline void INIT copy_bytes(struct writer *wr, + uint32_t rep0, int len) +{ + do { + copy_byte(wr, rep0); + len--; + } while (len != 0 && wr->buffer_pos < wr->header->dst_size); +} + +static inline void INIT process_bit0(struct writer *wr, struct rc *rc, + struct cstate *cst, uint16_t *p, + int pos_state, uint16_t *prob, + int lc, uint32_t literal_pos_mask) { + int mi = 1; + rc_update_bit_0(rc, prob); + prob = (p + LZMA_LITERAL + + (LZMA_LIT_SIZE + * (((get_pos(wr) & literal_pos_mask) << lc) + + (wr->previous_byte >> (8 - lc)))) + ); + + if (cst->state >= LZMA_NUM_LIT_STATES) { + int match_byte = peek_old_byte(wr, cst->rep0); + do { + int bit; + uint16_t *prob_lit; + + match_byte <<= 1; + bit = match_byte & 0x100; + prob_lit = prob + 0x100 + bit + mi; + if (rc_get_bit(rc, prob_lit, &mi)) { + if (!bit) + break; + } else { + if (bit) + break; + } + } while (mi < 0x100); + } + while (mi < 0x100) { + uint16_t *prob_lit = prob + mi; + rc_get_bit(rc, prob_lit, &mi); + } + write_byte(wr, mi); + if (cst->state < 4) + cst->state = 0; + else if (cst->state < 10) + cst->state -= 3; + else + cst->state -= 6; +} + +static inline void INIT process_bit1(struct writer *wr, struct rc *rc, + struct cstate *cst, uint16_t *p, + int pos_state, uint16_t *prob) { + int offset; + uint16_t *prob_len; + int num_bits; + int len; + + rc_update_bit_1(rc, prob); + prob = p + LZMA_IS_REP + cst->state; + if (rc_is_bit_0(rc, prob)) { + rc_update_bit_0(rc, prob); + cst->rep3 = cst->rep2; + cst->rep2 = cst->rep1; + cst->rep1 = cst->rep0; + cst->state = cst->state < LZMA_NUM_LIT_STATES ? 0 : 3; + prob = p + LZMA_LEN_CODER; + } else { + rc_update_bit_1(rc, prob); + prob = p + LZMA_IS_REP_G0 + cst->state; + if (rc_is_bit_0(rc, prob)) { + rc_update_bit_0(rc, prob); + prob = (p + LZMA_IS_REP_0_LONG + + (cst->state << + LZMA_NUM_POS_BITS_MAX) + + pos_state); + if (rc_is_bit_0(rc, prob)) { + rc_update_bit_0(rc, prob); + + cst->state = cst->state < LZMA_NUM_LIT_STATES ? + 9 : 11; + copy_byte(wr, cst->rep0); + return; + } else { + rc_update_bit_1(rc, prob); + } + } else { + uint32_t distance; + + rc_update_bit_1(rc, prob); + prob = p + LZMA_IS_REP_G1 + cst->state; + if (rc_is_bit_0(rc, prob)) { + rc_update_bit_0(rc, prob); + distance = cst->rep1; + } else { + rc_update_bit_1(rc, prob); + prob = p + LZMA_IS_REP_G2 + cst->state; + if (rc_is_bit_0(rc, prob)) { + rc_update_bit_0(rc, prob); + distance = cst->rep2; + } else { + rc_update_bit_1(rc, prob); + distance = cst->rep3; + cst->rep3 = cst->rep2; + } + cst->rep2 = cst->rep1; + } + cst->rep1 = cst->rep0; + cst->rep0 = distance; + } + cst->state = cst->state < LZMA_NUM_LIT_STATES ? 8 : 11; + prob = p + LZMA_REP_LEN_CODER; + } + + prob_len = prob + LZMA_LEN_CHOICE; + if (rc_is_bit_0(rc, prob_len)) { + rc_update_bit_0(rc, prob_len); + prob_len = (prob + LZMA_LEN_LOW + + (pos_state << + LZMA_LEN_NUM_LOW_BITS)); + offset = 0; + num_bits = LZMA_LEN_NUM_LOW_BITS; + } else { + rc_update_bit_1(rc, prob_len); + prob_len = prob + LZMA_LEN_CHOICE_2; + if (rc_is_bit_0(rc, prob_len)) { + rc_update_bit_0(rc, prob_len); + prob_len = (prob + LZMA_LEN_MID + + (pos_state << + LZMA_LEN_NUM_MID_BITS)); + offset = 1 << LZMA_LEN_NUM_LOW_BITS; + num_bits = LZMA_LEN_NUM_MID_BITS; + } else { + rc_update_bit_1(rc, prob_len); + prob_len = prob + LZMA_LEN_HIGH; + offset = ((1 << LZMA_LEN_NUM_LOW_BITS) + + (1 << LZMA_LEN_NUM_MID_BITS)); + num_bits = LZMA_LEN_NUM_HIGH_BITS; + } + } + + rc_bit_tree_decode(rc, prob_len, num_bits, &len); + len += offset; + + if (cst->state < 4) { + int pos_slot; + + cst->state += LZMA_NUM_LIT_STATES; + prob = + p + LZMA_POS_SLOT + + ((len < + LZMA_NUM_LEN_TO_POS_STATES ? len : + LZMA_NUM_LEN_TO_POS_STATES - 1) + << LZMA_NUM_POS_SLOT_BITS); + rc_bit_tree_decode(rc, prob, + LZMA_NUM_POS_SLOT_BITS, + &pos_slot); + if (pos_slot >= LZMA_START_POS_MODEL_INDEX) { + int i, mi; + num_bits = (pos_slot >> 1) - 1; + cst->rep0 = 2 | (pos_slot & 1); + if (pos_slot < LZMA_END_POS_MODEL_INDEX) { + cst->rep0 <<= num_bits; + prob = p + LZMA_SPEC_POS + + cst->rep0 - pos_slot - 1; + } else { + num_bits -= LZMA_NUM_ALIGN_BITS; + while (num_bits--) + cst->rep0 = (cst->rep0 << 1) | + rc_direct_bit(rc); + prob = p + LZMA_ALIGN; + cst->rep0 <<= LZMA_NUM_ALIGN_BITS; + num_bits = LZMA_NUM_ALIGN_BITS; + } + i = 1; + mi = 1; + while (num_bits--) { + if (rc_get_bit(rc, prob + mi, &mi)) + cst->rep0 |= i; + i <<= 1; + } + } else + cst->rep0 = pos_slot; + if (++(cst->rep0) == 0) + return; + } + + len += LZMA_MATCH_MIN_LEN; + + copy_bytes(wr, cst->rep0, len); +} + + + +STATIC inline int INIT unlzma(unsigned char *buf, int in_len, + int(*fill)(void*, unsigned int), + int(*flush)(void*, unsigned int), + unsigned char *output, + int *posp, + void(*error_fn)(char *x) + ) +{ + struct lzma_header header; + int lc, pb, lp; + uint32_t pos_state_mask; + uint32_t literal_pos_mask; + uint16_t *p; + int num_probs; + struct rc rc; + int i, mi; + struct writer wr; + struct cstate cst; + unsigned char *inbuf; + int ret = -1; + + set_error_fn(error_fn); + if (!flush) + in_len -= 4; /* Uncompressed size hack active in pre-boot + environment */ + if (buf) + inbuf = buf; + else + inbuf = malloc(LZMA_IOBUF_SIZE); + if (!inbuf) { + error("Could not allocate input bufer"); + goto exit_0; + } + + cst.state = 0; + cst.rep0 = cst.rep1 = cst.rep2 = cst.rep3 = 1; + + wr.header = &header; + wr.flush = flush; + wr.global_pos = 0; + wr.previous_byte = 0; + wr.buffer_pos = 0; + + rc_init(&rc, fill, inbuf, in_len); + + for (i = 0; i < sizeof(header); i++) { + if (rc.ptr >= rc.buffer_end) + rc_read(&rc); + ((unsigned char *)&header)[i] = *rc.ptr++; + } + + if (header.pos >= (9 * 5 * 5)) + error("bad header"); + + mi = 0; + lc = header.pos; + while (lc >= 9) { + mi++; + lc -= 9; + } + pb = 0; + lp = mi; + while (lp >= 5) { + pb++; + lp -= 5; + } + pos_state_mask = (1 << pb) - 1; + literal_pos_mask = (1 << lp) - 1; + + ENDIAN_CONVERT(header.dict_size); + ENDIAN_CONVERT(header.dst_size); + + if (header.dict_size == 0) + header.dict_size = 1; + + if (output) + wr.buffer = output; + else { + wr.bufsize = MIN(header.dst_size, header.dict_size); + wr.buffer = large_malloc(wr.bufsize); + } + if (wr.buffer == NULL) + goto exit_1; + + num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp)); + p = (uint16_t *) large_malloc(num_probs * sizeof(*p)); + if (p == 0) + goto exit_2; + num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp)); + for (i = 0; i < num_probs; i++) + p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1; + + rc_init_code(&rc); + + while (get_pos(&wr) < header.dst_size) { + int pos_state = get_pos(&wr) & pos_state_mask; + uint16_t *prob = p + LZMA_IS_MATCH + + (cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state; + if (rc_is_bit_0(&rc, prob)) + process_bit0(&wr, &rc, &cst, p, pos_state, prob, + lc, literal_pos_mask); + else { + process_bit1(&wr, &rc, &cst, p, pos_state, prob); + if (cst.rep0 == 0) + break; + } + } + + if (posp) + *posp = rc.ptr-rc.buffer; + if (wr.flush) + wr.flush(wr.buffer, wr.buffer_pos); + ret = 0; + large_free(p); +exit_2: + if (!output) + large_free(wr.buffer); +exit_1: + if (!buf) + free(inbuf); +exit_0: + return ret; +} + +#define decompress unlzma diff --git a/lib/zlib_inflate/inflate.h b/lib/zlib_inflate/inflate.h index df8a6c9..3d17b3d 100644 --- a/lib/zlib_inflate/inflate.h +++ b/lib/zlib_inflate/inflate.h @@ -1,3 +1,6 @@ +#ifndef INFLATE_H +#define INFLATE_H + /* inflate.h -- internal inflate state definition * Copyright (C) 1995-2004 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h @@ -105,3 +108,4 @@ struct inflate_state { unsigned short work[288]; /* work area for code table building */ code codes[ENOUGH]; /* space for code tables */ }; +#endif diff --git a/lib/zlib_inflate/inftrees.h b/lib/zlib_inflate/inftrees.h index 5f5219b..b70b473 100644 --- a/lib/zlib_inflate/inftrees.h +++ b/lib/zlib_inflate/inftrees.h @@ -1,3 +1,6 @@ +#ifndef INFTREES_H +#define INFTREES_H + /* inftrees.h -- header to use inftrees.c * Copyright (C) 1995-2005 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h @@ -53,3 +56,4 @@ typedef enum { extern int zlib_inflate_table (codetype type, unsigned short *lens, unsigned codes, code **table, unsigned *bits, unsigned short *work); +#endif -- cgit v1.1 From 30d65dbfe3add7f010a075991dc0bfeaebb7d9e1 Mon Sep 17 00:00:00 2001 From: Alain Knaff Date: Sun, 4 Jan 2009 22:46:17 +0100 Subject: bzip2/lzma: config and initramfs support for bzip2/lzma decompression Impact: New code for initramfs decompression, new features This is the second part of the bzip2/lzma patch The bzip patch is based on an idea by Christian Ludwig, includes support for compressing the kernel with bzip2 or lzma rather than gzip. Both compressors give smaller sizes than gzip. Lzma's decompresses faster than bzip2. It also supports ramdisks and initramfs' compressed using these two compressors. The functionality has been successfully used for a couple of years by the udpcast project This version applies to "tip" kernel 2.6.28 This part contains: - support for new compressions (bzip2 and lzma) in initramfs and old-style ramdisk - config dialog for kernel compression (but new kernel compressions not yet supported) Signed-off-by: Alain Knaff Signed-off-by: H. Peter Anvin --- lib/Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Makefile b/lib/Makefile index 32b0e64..e2a21d5 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -11,7 +11,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o \ idr.o int_sqrt.o extable.o prio_tree.o \ sha1.o irq_regs.o reciprocal_div.o argv_split.o \ - proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o + proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o \ + decompress_inflate.o decompress_bunzip2.o decompress_unlzma.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o -- cgit v1.1 From c8531ab343dec88ed8005e403b1b304c710b7494 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Mon, 5 Jan 2009 13:48:31 -0800 Subject: bzip2/lzma: proper Kconfig dependencies for the ramdisk options Impact: Partial resolution of build failure Make all the compression algorithms properly configurable, and make sure the ramdisk options pull in the proper compression algorithms, as they should. Signed-off-by: H. Peter Anvin --- lib/Kconfig | 13 +++++++++++++ lib/Makefile | 7 +++++-- 2 files changed, 18 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig b/lib/Kconfig index 03c2c24..e37f061 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -98,6 +98,19 @@ config LZO_DECOMPRESS tristate # +# These all provide a common interface (hence the apparent duplication with +# ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.) +# +config DECOMPRESS_GZIP + tristate + +config DECOMPRESS_BZIP2 + tristate + +config DECOMPRESS_LZMA + tristate + +# # Generic allocator support is selected if needed # config GENERIC_ALLOCATOR diff --git a/lib/Makefile b/lib/Makefile index e2a21d5..d9ac5a4 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -11,8 +11,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o \ idr.o int_sqrt.o extable.o prio_tree.o \ sha1.o irq_regs.o reciprocal_div.o argv_split.o \ - proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o \ - decompress_inflate.o decompress_bunzip2.o decompress_unlzma.o + proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o @@ -66,6 +65,10 @@ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ obj-$(CONFIG_LZO_COMPRESS) += lzo/ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ +obj-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o +obj-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o +obj-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o + obj-$(CONFIG_TEXTSEARCH) += textsearch.o obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o obj-$(CONFIG_TEXTSEARCH_BM) += ts_bm.o -- cgit v1.1 From 160c1d8e40866edfeae7d68816b7005d70acf391 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 5 Jan 2009 23:59:02 +0900 Subject: x86, ia64: convert to use generic dma_map_ops struct This converts X86 and IA64 to use include/linux/dma-mapping.h. It's a bit large but pretty boring. The major change for X86 is converting 'int dir' to 'enum dma_data_direction dir' in DMA mapping operations. The major changes for IA64 is using map_page and unmap_page instead of map_single and unmap_single. Signed-off-by: FUJITA Tomonori Acked-by: Tony Luck Signed-off-by: Ingo Molnar --- lib/swiotlb.c | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 48deef7..d047de9 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -736,7 +736,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, void swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir) + size_t size, enum dma_data_direction dir) { swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_CPU); } @@ -744,7 +744,7 @@ EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); void swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir) + size_t size, enum dma_data_direction dir) { swiotlb_sync_single(hwdev, dev_addr, size, dir, SYNC_FOR_DEVICE); } @@ -769,7 +769,8 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, void swiotlb_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, int dir) + unsigned long offset, size_t size, + enum dma_data_direction dir) { swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, SYNC_FOR_CPU); @@ -778,7 +779,8 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_cpu); void swiotlb_sync_single_range_for_device(struct device *hwdev, dma_addr_t dev_addr, - unsigned long offset, size_t size, int dir) + unsigned long offset, size_t size, + enum dma_data_direction dir) { swiotlb_sync_single_range(hwdev, dev_addr, offset, size, dir, SYNC_FOR_DEVICE); @@ -803,7 +805,7 @@ EXPORT_SYMBOL_GPL(swiotlb_sync_single_range_for_device); */ int swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, - int dir, struct dma_attrs *attrs) + enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; @@ -850,7 +852,7 @@ EXPORT_SYMBOL(swiotlb_map_sg); */ void swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, - int nelems, int dir, struct dma_attrs *attrs) + int nelems, enum dma_data_direction dir, struct dma_attrs *attrs) { struct scatterlist *sg; int i; @@ -902,7 +904,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, void swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, - int nelems, int dir) + int nelems, enum dma_data_direction dir) { swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_CPU); } @@ -910,7 +912,7 @@ EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu); void swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, - int nelems, int dir) + int nelems, enum dma_data_direction dir) { swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); } -- cgit v1.1 From f98eee8ea99fe74ee9c4e867ba178ec3072793be Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Mon, 5 Jan 2009 23:59:03 +0900 Subject: x86, ia64: remove duplicated swiotlb code This adds swiotlb_map_page and swiotlb_unmap_page to lib/swiotlb.c and remove IA64 and X86's swiotlb_map_page and swiotlb_unmap_page. This also removes unnecessary swiotlb_map_single, swiotlb_map_single_attrs, swiotlb_unmap_single and swiotlb_unmap_single_attrs. Signed-off-by: FUJITA Tomonori Acked-by: Tony Luck Signed-off-by: Ingo Molnar --- lib/swiotlb.c | 48 ++++++++++++++++++------------------------------ 1 file changed, 18 insertions(+), 30 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index d047de9..ec7922b 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -636,11 +636,14 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) * Once the device is given the dma address, the device owns this memory until * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. */ -dma_addr_t -swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, - int dir, struct dma_attrs *attrs) -{ - dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, ptr); +dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs) +{ + phys_addr_t phys = page_to_phys(page) + offset; + void *ptr = page_address(page) + offset; + dma_addr_t dev_addr = swiotlb_phys_to_bus(dev, phys); void *map; BUG_ON(dir == DMA_NONE); @@ -649,37 +652,30 @@ swiotlb_map_single_attrs(struct device *hwdev, void *ptr, size_t size, * we can safely return the device addr and not worry about bounce * buffering it. */ - if (!address_needs_mapping(hwdev, dev_addr, size) && + if (!address_needs_mapping(dev, dev_addr, size) && !range_needs_mapping(ptr, size)) return dev_addr; /* * Oh well, have to allocate and map a bounce buffer. */ - map = map_single(hwdev, virt_to_phys(ptr), size, dir); + map = map_single(dev, phys, size, dir); if (!map) { - swiotlb_full(hwdev, size, dir, 1); + swiotlb_full(dev, size, dir, 1); map = io_tlb_overflow_buffer; } - dev_addr = swiotlb_virt_to_bus(hwdev, map); + dev_addr = swiotlb_virt_to_bus(dev, map); /* * Ensure that the address returned is DMA'ble */ - if (address_needs_mapping(hwdev, dev_addr, size)) + if (address_needs_mapping(dev, dev_addr, size)) panic("map_single: bounce buffer is not DMA'ble"); return dev_addr; } -EXPORT_SYMBOL(swiotlb_map_single_attrs); - -dma_addr_t -swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) -{ - return swiotlb_map_single_attrs(hwdev, ptr, size, dir, NULL); -} -EXPORT_SYMBOL(swiotlb_map_single); +EXPORT_SYMBOL_GPL(swiotlb_map_page); /* * Unmap a single streaming mode DMA translation. The dma_addr and size must @@ -689,9 +685,9 @@ EXPORT_SYMBOL(swiotlb_map_single); * After this call, reads by the cpu to the buffer are guaranteed to see * whatever the device wrote there. */ -void -swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, - size_t size, int dir, struct dma_attrs *attrs) +void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) { char *dma_addr = swiotlb_bus_to_virt(dev_addr); @@ -701,15 +697,7 @@ swiotlb_unmap_single_attrs(struct device *hwdev, dma_addr_t dev_addr, else if (dir == DMA_FROM_DEVICE) dma_mark_clean(dma_addr, size); } -EXPORT_SYMBOL(swiotlb_unmap_single_attrs); - -void -swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, - int dir) -{ - return swiotlb_unmap_single_attrs(hwdev, dev_addr, size, dir, NULL); -} -EXPORT_SYMBOL(swiotlb_unmap_single); +EXPORT_SYMBOL_GPL(swiotlb_unmap_page); /* * Make physical memory consistent for a single streaming mode DMA translation -- cgit v1.1 From 7856a16ea03ed9b17860d756ee6473c2e57882b2 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Wed, 7 Jan 2009 00:01:43 -0800 Subject: bzip2/lzma: DECOMPRESS_GZIP should select ZLIB_INFLATE Impact: Partial resolution of build failure DECOMPRESS_GZIP is just a common-interface wrapper around the zlib_inflate code; it thus need to select it. Signed-off-by: H. Peter Anvin --- lib/Kconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'lib') diff --git a/lib/Kconfig b/lib/Kconfig index e37f061..daa4818 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -102,6 +102,7 @@ config LZO_DECOMPRESS # ZLIB_INFLATE; DECOMPRESS_GZIP is just a wrapper.) # config DECOMPRESS_GZIP + select ZLIB_INFLATE tristate config DECOMPRESS_BZIP2 -- cgit v1.1 From 5619448fc55650cb76cbb825e16e4bb5d1390399 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Thu, 8 Jan 2009 15:09:12 -0800 Subject: bzip2/lzma: fix constant in decompress_inflate Impact: Cleanup Fix constant 0x8100 /* 32K */; according to Alain the value 0x8100 was left over test code to test misalignment, the correct value is indeed 0x8000 == 32K. Signed-off-by: H. Peter Anvin --- lib/decompress_inflate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c index 163e66a..d2e7c75 100644 --- a/lib/decompress_inflate.c +++ b/lib/decompress_inflate.c @@ -41,7 +41,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len, set_error_fn(error_fn); rc = -1; if (flush) { - out_len = 0x8100; /* 32 K */ + out_len = 0x8000; /* 32 K */ out_buf = malloc(out_len); } else { out_len = 0x7fffffff; /* no limit */ -- cgit v1.1 From 6c11b12ac6f101732d43b5682f5b3ae4dda4205f Mon Sep 17 00:00:00 2001 From: Alain Knaff Date: Thu, 8 Jan 2009 15:10:19 -0800 Subject: bzip2/lzma: fix decompress_inflate.c vs multi-block-with-embedded-filename Impact: Bug fix Fix gunzip uncompression, so that it also works with files with embedded filenames that are larger than one block. Signed-off-by: Alain Knaff Signed-off-by: H. Peter Anvin --- lib/decompress_inflate.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/decompress_inflate.c b/lib/decompress_inflate.c index d2e7c75..839a329 100644 --- a/lib/decompress_inflate.c +++ b/lib/decompress_inflate.c @@ -97,7 +97,7 @@ STATIC int INIT gunzip(unsigned char *buf, int len, strm->next_in++; strm->next_in++; } - strm->avail_in = len - 10; + strm->avail_in = len - (strm->next_in - zbuf); strm->next_out = out_buf; strm->avail_out = out_len; -- cgit v1.1 From 889c92d21db40be0b7d22a59395060237895bb85 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Thu, 8 Jan 2009 15:14:17 -0800 Subject: bzip2/lzma: centralize format detection Centralize the compression format detection to a common routine in the lib directory, and use it for both initramfs and initrd. Signed-off-by: H. Peter Anvin --- lib/Makefile | 9 +++++---- lib/decompress.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 4 deletions(-) create mode 100644 lib/decompress.c (limited to 'lib') diff --git a/lib/Makefile b/lib/Makefile index d9ac5a4..790de7c 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -11,7 +11,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o \ idr.o int_sqrt.o extable.o prio_tree.o \ sha1.o irq_regs.o reciprocal_div.o argv_split.o \ - proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o + proportions.o prio_heap.o ratelimit.o show_mem.o \ + is_single_threaded.o decompress.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o @@ -65,9 +66,9 @@ obj-$(CONFIG_REED_SOLOMON) += reed_solomon/ obj-$(CONFIG_LZO_COMPRESS) += lzo/ obj-$(CONFIG_LZO_DECOMPRESS) += lzo/ -obj-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o -obj-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o -obj-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o +lib-$(CONFIG_DECOMPRESS_GZIP) += decompress_inflate.o +lib-$(CONFIG_DECOMPRESS_BZIP2) += decompress_bunzip2.o +lib-$(CONFIG_DECOMPRESS_LZMA) += decompress_unlzma.o obj-$(CONFIG_TEXTSEARCH) += textsearch.o obj-$(CONFIG_TEXTSEARCH_KMP) += ts_kmp.o diff --git a/lib/decompress.c b/lib/decompress.c new file mode 100644 index 0000000..edac55c --- /dev/null +++ b/lib/decompress.c @@ -0,0 +1,50 @@ +/* + * decompress.c + * + * Detect the decompression method based on magic number + */ + +#include + +#include +#include +#include + +#include +#include + +static const struct compress_format { + unsigned char magic[2]; + const char *name; + decompress_fn decompressor; +} compressed_formats[] = { +#ifdef CONFIG_DECOMPRESS_GZIP + { {037, 0213}, "gzip", gunzip }, + { {037, 0236}, "gzip", gunzip }, +#endif +#ifdef CONFIG_DECOMPRESS_BZIP2 + { {0x42, 0x5a}, "bzip2", bunzip2 }, +#endif +#ifdef CONFIG_DECOMPRESS_LZMA + { {0x5d, 0x00}, "lzma", unlzma }, +#endif + { {0, 0}, NULL, NULL } +}; + +decompress_fn decompress_method(const unsigned char *inbuf, int len, + const char **name) +{ + const struct compress_format *cf; + + if (len < 2) + return NULL; /* Need at least this much... */ + + for (cf = compressed_formats; cf->decompressor; cf++) { + if (!memcmp(inbuf, cf->magic, 2)) + break; + + } + if (name) + *name = cf->name; + return cf->decompressor; +} -- cgit v1.1 From 0b8698ab5847cbe25775083659f00c658a8161c9 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Fri, 9 Jan 2009 18:32:09 +0000 Subject: swiotlb: range_needs_mapping should take a physical address. The swiotlb_arch_range_needs_mapping() hook should take a physical address rather than a virtual address in order to support highmem pages. Signed-off-by: Ian Campbell Signed-off-by: Ingo Molnar --- lib/swiotlb.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 30fe65e..31bae40 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -145,7 +145,7 @@ static void *swiotlb_bus_to_virt(dma_addr_t address) return phys_to_virt(swiotlb_bus_to_phys(address)); } -int __weak swiotlb_arch_range_needs_mapping(void *ptr, size_t size) +int __weak swiotlb_arch_range_needs_mapping(phys_addr_t paddr, size_t size) { return 0; } @@ -315,9 +315,9 @@ address_needs_mapping(struct device *hwdev, dma_addr_t addr, size_t size) return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size); } -static inline int range_needs_mapping(void *ptr, size_t size) +static inline int range_needs_mapping(phys_addr_t paddr, size_t size) { - return swiotlb_force || swiotlb_arch_range_needs_mapping(ptr, size); + return swiotlb_force || swiotlb_arch_range_needs_mapping(paddr, size); } static int is_swiotlb_buffer(char *addr) @@ -653,7 +653,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, * buffering it. */ if (!address_needs_mapping(dev, dev_addr, size) && - !range_needs_mapping(ptr, size)) + !range_needs_mapping(virt_to_phys(ptr), size)) return dev_addr; /* @@ -804,7 +804,7 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, void *addr = sg_virt(sg); dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, addr); - if (range_needs_mapping(addr, sg->length) || + if (range_needs_mapping(sg_phys(sg), sg->length) || address_needs_mapping(hwdev, dev_addr, sg->length)) { void *map = map_single(hwdev, sg_phys(sg), sg->length, dir); -- cgit v1.1 From 961d7d0ee5150e0197cc81c2a8884ecb230276e2 Mon Sep 17 00:00:00 2001 From: Ian Campbell Date: Fri, 9 Jan 2009 18:32:10 +0000 Subject: swiotlb: do not use sg_virt() Scatterlists containing HighMem pages do not have a useful virtual address. Use the physical address instead. Signed-off-by: Ian Campbell Signed-off-by: Ingo Molnar --- lib/swiotlb.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'lib') diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 31bae40..32e2bd3 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -801,10 +801,10 @@ swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl, int nelems, BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { - void *addr = sg_virt(sg); - dma_addr_t dev_addr = swiotlb_virt_to_bus(hwdev, addr); + phys_addr_t paddr = sg_phys(sg); + dma_addr_t dev_addr = swiotlb_phys_to_bus(hwdev, paddr); - if (range_needs_mapping(sg_phys(sg), sg->length) || + if (range_needs_mapping(paddr, sg->length) || address_needs_mapping(hwdev, dev_addr, sg->length)) { void *map = map_single(hwdev, sg_phys(sg), sg->length, dir); @@ -848,11 +848,11 @@ swiotlb_unmap_sg_attrs(struct device *hwdev, struct scatterlist *sgl, BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { - if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg))) + if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) unmap_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), sg->dma_length, dir); else if (dir == DMA_FROM_DEVICE) - dma_mark_clean(sg_virt(sg), sg->dma_length); + dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); } } EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); @@ -882,11 +882,11 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sgl, BUG_ON(dir == DMA_NONE); for_each_sg(sgl, sg, nelems, i) { - if (sg->dma_address != swiotlb_virt_to_bus(hwdev, sg_virt(sg))) + if (sg->dma_address != swiotlb_phys_to_bus(hwdev, sg_phys(sg))) sync_single(hwdev, swiotlb_bus_to_virt(sg->dma_address), sg->dma_length, dir, target); else if (dir == DMA_FROM_DEVICE) - dma_mark_clean(sg_virt(sg), sg->dma_length); + dma_mark_clean(swiotlb_bus_to_virt(sg->dma_address), sg->dma_length); } } -- cgit v1.1 From 23a22d57a8962479ca630c9542e62d6f86fdf927 Mon Sep 17 00:00:00 2001 From: "H. Peter Anvin" Date: Mon, 12 Jan 2009 14:24:04 -0800 Subject: bzip2/lzma: comprehensible error messages for missing decompressor Instead of failing to identify a compressed image with a decompressor that we don't have compiled in, identify it and fail with a comprehensible panic message. Signed-off-by: H. Peter Anvin --- lib/decompress.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'lib') diff --git a/lib/decompress.c b/lib/decompress.c index edac55c..961f367 100644 --- a/lib/decompress.c +++ b/lib/decompress.c @@ -13,21 +13,25 @@ #include #include +#ifndef CONFIG_DECOMPRESS_GZIP +# define gunzip NULL +#endif +#ifndef CONFIG_DECOMPRESS_BZIP2 +# define bunzip2 NULL +#endif +#ifndef CONFIG_DECOMPRESS_LZMA +# define unlzma NULL +#endif + static const struct compress_format { unsigned char magic[2]; const char *name; decompress_fn decompressor; } compressed_formats[] = { -#ifdef CONFIG_DECOMPRESS_GZIP { {037, 0213}, "gzip", gunzip }, { {037, 0236}, "gzip", gunzip }, -#endif -#ifdef CONFIG_DECOMPRESS_BZIP2 { {0x42, 0x5a}, "bzip2", bunzip2 }, -#endif -#ifdef CONFIG_DECOMPRESS_LZMA { {0x5d, 0x00}, "lzma", unlzma }, -#endif { {0, 0}, NULL, NULL } }; -- cgit v1.1 From ceacc2c1c85ac498ca4cf297bdfe5b4aaa9fd0e0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 16 Jan 2009 14:46:40 +0100 Subject: sched: make plist a library facility Ingo Molnar wrote: > here's a new build failure with tip/sched/rt: > > LD .tmp_vmlinux1 > kernel/built-in.o: In function `set_curr_task_rt': > sched.c:(.text+0x3675): undefined reference to `plist_del' > kernel/built-in.o: In function `pick_next_task_rt': > sched.c:(.text+0x37ce): undefined reference to `plist_del' > kernel/built-in.o: In function `enqueue_pushable_task': > sched.c:(.text+0x381c): undefined reference to `plist_del' Eliminate the plist library kconfig and make it available unconditionally. Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- lib/Kconfig | 6 ------ lib/Makefile | 4 ++-- 2 files changed, 2 insertions(+), 8 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig b/lib/Kconfig index 03c2c24..fc8ea1c 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -136,12 +136,6 @@ config TEXTSEARCH_BM config TEXTSEARCH_FSM tristate -# -# plist support is select#ed if needed -# -config PLIST - boolean - config HAS_IOMEM boolean depends on !NO_IOMEM diff --git a/lib/Makefile b/lib/Makefile index 32b0e64..902d738 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -11,7 +11,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ rbtree.o radix-tree.o dump_stack.o \ idr.o int_sqrt.o extable.o prio_tree.o \ sha1.o irq_regs.o reciprocal_div.o argv_split.o \ - proportions.o prio_heap.o ratelimit.o show_mem.o is_single_threaded.o + proportions.o prio_heap.o ratelimit.o show_mem.o \ + is_single_threaded.o plist.o lib-$(CONFIG_MMU) += ioremap.o lib-$(CONFIG_SMP) += cpumask.o @@ -40,7 +41,6 @@ lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o -obj-$(CONFIG_PLIST) += plist.o obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o obj-$(CONFIG_DEBUG_LIST) += list_debug.o obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o -- cgit v1.1 From ff491a7334acfd74e515c896632e37e401f52676 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 5 Feb 2009 23:56:36 -0800 Subject: netlink: change return-value logic of netlink_broadcast() Currently, netlink_broadcast() reports errors to the caller if no messages at all were delivered: 1) If, at least, one message has been delivered correctly, returns 0. 2) Otherwise, if no messages at all were delivered due to skb_clone() failure, return -ENOBUFS. 3) Otherwise, if there are no listeners, return -ESRCH. With this patch, the caller knows if the delivery of any of the messages to the listeners have failed: 1) If it fails to deliver any message (for whatever reason), return -ENOBUFS. 2) Otherwise, if all messages were delivered OK, returns 0. 3) Otherwise, if no listeners, return -ESRCH. In the current ctnetlink code and in Netfilter in general, we can add reliable logging and connection tracking event delivery by dropping the packets whose events were not successfully delivered over Netlink. Of course, this option would be settable via /proc as this approach reduces performance (in terms of filtered connections per seconds by a stateful firewall) but providing reliable logging and event delivery (for conntrackd) in return. This patch also changes some clients of netlink_broadcast() that may report ENOBUFS errors via printk. This error handling is not of any help. Instead, the userspace daemons that are listening to those netlink messages should resync themselves with the kernel-side if they hit ENOBUFS. BTW, netlink_broadcast() clients include those that call cn_netlink_send(), nlmsg_multicast() and genlmsg_multicast() since they internally call netlink_broadcast() and return its error value. Signed-off-by: Pablo Neira Ayuso Signed-off-by: David S. Miller --- lib/kobject_uevent.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'lib') diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 318328d..3813102 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -227,6 +227,9 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, NETLINK_CB(skb).dst_group = 1; retval = netlink_broadcast(uevent_sock, skb, 0, 1, GFP_KERNEL); + /* ENOBUFS should be handled in userspace */ + if (retval == -ENOBUFS) + retval = 0; } else retval = -ENOMEM; } -- cgit v1.1 From c37682d907a615c9a8751748b58e9ba47d415429 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Wed, 14 Jan 2009 20:46:02 +0000 Subject: lmb: Rework lmb_dump_all() output The lmb_dump_all() output didn't include the RMO size, which is interesting on powerpc. The output was also a bit spacey and not well aligned, and didn't show you the end addresses. Signed-off-by: Michael Ellerman Acked-by: David S. Miller Signed-off-by: Benjamin Herrenschmidt --- lib/lmb.c | 42 +++++++++++++++++++++--------------------- 1 file changed, 21 insertions(+), 21 deletions(-) (limited to 'lib') diff --git a/lib/lmb.c b/lib/lmb.c index 97e54703..e4a6482 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -29,33 +29,33 @@ static int __init early_lmb(char *p) } early_param("lmb", early_lmb); -void lmb_dump_all(void) +static void lmb_dump(struct lmb_region *region, char *name) { - unsigned long i; + unsigned long long base, size; + int i; + + pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); + + for (i = 0; i < region->cnt; i++) { + base = region->region[i].base; + size = region->region[i].size; + + pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", + name, i, base, base + size - 1, size); + } +} +void lmb_dump_all(void) +{ if (!lmb_debug) return; - pr_info("lmb_dump_all:\n"); - pr_info(" memory.cnt = 0x%lx\n", lmb.memory.cnt); - pr_info(" memory.size = 0x%llx\n", - (unsigned long long)lmb.memory.size); - for (i=0; i < lmb.memory.cnt ;i++) { - pr_info(" memory.region[0x%lx].base = 0x%llx\n", - i, (unsigned long long)lmb.memory.region[i].base); - pr_info(" .size = 0x%llx\n", - (unsigned long long)lmb.memory.region[i].size); - } + pr_info("LMB configuration:\n"); + pr_info(" rmo_size = 0x%llx\n", (unsigned long long)lmb.rmo_size); + pr_info(" memory.size = 0x%llx\n", (unsigned long long)lmb.memory.size); - pr_info(" reserved.cnt = 0x%lx\n", lmb.reserved.cnt); - pr_info(" reserved.size = 0x%llx\n", - (unsigned long long)lmb.memory.size); - for (i=0; i < lmb.reserved.cnt ;i++) { - pr_info(" reserved.region[0x%lx].base = 0x%llx\n", - i, (unsigned long long)lmb.reserved.region[i].base); - pr_info(" .size = 0x%llx\n", - (unsigned long long)lmb.reserved.region[i].size); - } + lmb_dump(&lmb.memory, "memory"); + lmb_dump(&lmb.reserved, "reserved"); } static unsigned long lmb_addrs_overlap(u64 base1, u64 size1, u64 base2, -- cgit v1.1 From adf8b37bafc1495393201a2ae4235846371870d0 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 12 Feb 2009 13:56:34 +0100 Subject: [ARM] 5386/2: unwind: Add Makefile and Kconfig entries for ARM stack unwinding This patch also makes the frame pointer default to y only if !ARM_UNWIND. LOCKDEP no longer selects FRAME_POINTER if ARM_UNWIND is enabled. Signed-off-by: Catalin Marinas Signed-off-by: Russell King --- lib/Kconfig.debug | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 29044f5..08275a5 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -402,7 +402,7 @@ config LOCKDEP bool depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT select STACKTRACE - select FRAME_POINTER if !X86 && !MIPS && !PPC + select FRAME_POINTER if !X86 && !MIPS && !PPC && !ARM_UNWIND select KALLSYMS select KALLSYMS_ALL -- cgit v1.1 From e4aa7ca5a2e6d44f07ceb87d9448113f5b48a334 Mon Sep 17 00:00:00 2001 From: Alain Knaff Date: Thu, 19 Feb 2009 13:36:55 -0800 Subject: bzip2/lzma: don't stop search at first unconfigured compression Impact: Bugfix, avoids kernels which build but panic on boot Fix a bug in decompress.c : only scanned until the first non-configured compressor (with disastrous result especially if that was gzip.) Signed-off-by: Alain Knaff Signed-off-by: H. Peter Anvin --- lib/decompress.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/decompress.c b/lib/decompress.c index 961f367..d2842f5 100644 --- a/lib/decompress.c +++ b/lib/decompress.c @@ -43,7 +43,7 @@ decompress_fn decompress_method(const unsigned char *inbuf, int len, if (len < 2) return NULL; /* Need at least this much... */ - for (cf = compressed_formats; cf->decompressor; cf++) { + for (cf = compressed_formats; cf->name; cf++) { if (!memcmp(inbuf, cf->magic, 2)) break; -- cgit v1.1 From e9cc8bddaea3944fabfebb968bc88d603239beed Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 4 Mar 2009 14:53:30 +0800 Subject: netlink: Move netlink attribute parsing support to lib Netlink attribute parsing may be used even if CONFIG_NET is not set. Move it from net/netlink to lib and control its inclusion based on the new config symbol CONFIG_NLATTR, which is selected by CONFIG_NET. Signed-off-by: Geert Uytterhoeven Acked-by: David S. Miller Signed-off-by: Herbert Xu --- lib/Kconfig | 6 + lib/Makefile | 2 + lib/nlattr.c | 473 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 481 insertions(+) create mode 100644 lib/nlattr.c (limited to 'lib') diff --git a/lib/Kconfig b/lib/Kconfig index 03c2c24..cea9e30 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -174,4 +174,10 @@ config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS depends on EXPERIMENTAL && BROKEN +# +# Netlink attribute parsing support is select'ed if needed +# +config NLATTR + bool + endmenu diff --git a/lib/Makefile b/lib/Makefile index 32b0e64..b2c09da 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -84,6 +84,8 @@ obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o obj-$(CONFIG_DYNAMIC_PRINTK_DEBUG) += dynamic_printk.o +obj-$(CONFIG_NLATTR) += nlattr.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/nlattr.c b/lib/nlattr.c new file mode 100644 index 0000000..56c3ce7 --- /dev/null +++ b/lib/nlattr.c @@ -0,0 +1,473 @@ +/* + * NETLINK Netlink attributes + * + * Authors: Thomas Graf + * Alexey Kuznetsov + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = { + [NLA_U8] = sizeof(u8), + [NLA_U16] = sizeof(u16), + [NLA_U32] = sizeof(u32), + [NLA_U64] = sizeof(u64), + [NLA_NESTED] = NLA_HDRLEN, +}; + +static int validate_nla(struct nlattr *nla, int maxtype, + const struct nla_policy *policy) +{ + const struct nla_policy *pt; + int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla); + + if (type <= 0 || type > maxtype) + return 0; + + pt = &policy[type]; + + BUG_ON(pt->type > NLA_TYPE_MAX); + + switch (pt->type) { + case NLA_FLAG: + if (attrlen > 0) + return -ERANGE; + break; + + case NLA_NUL_STRING: + if (pt->len) + minlen = min_t(int, attrlen, pt->len + 1); + else + minlen = attrlen; + + if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) + return -EINVAL; + /* fall through */ + + case NLA_STRING: + if (attrlen < 1) + return -ERANGE; + + if (pt->len) { + char *buf = nla_data(nla); + + if (buf[attrlen - 1] == '\0') + attrlen--; + + if (attrlen > pt->len) + return -ERANGE; + } + break; + + case NLA_BINARY: + if (pt->len && attrlen > pt->len) + return -ERANGE; + break; + + case NLA_NESTED_COMPAT: + if (attrlen < pt->len) + return -ERANGE; + if (attrlen < NLA_ALIGN(pt->len)) + break; + if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN) + return -ERANGE; + nla = nla_data(nla) + NLA_ALIGN(pt->len); + if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla)) + return -ERANGE; + break; + case NLA_NESTED: + /* a nested attributes is allowed to be empty; if its not, + * it must have a size of at least NLA_HDRLEN. + */ + if (attrlen == 0) + break; + default: + if (pt->len) + minlen = pt->len; + else if (pt->type != NLA_UNSPEC) + minlen = nla_attr_minlen[pt->type]; + + if (attrlen < minlen) + return -ERANGE; + } + + return 0; +} + +/** + * nla_validate - Validate a stream of attributes + * @head: head of attribute stream + * @len: length of attribute stream + * @maxtype: maximum attribute type to be expected + * @policy: validation policy + * + * Validates all attributes in the specified attribute stream against the + * specified policy. Attributes with a type exceeding maxtype will be + * ignored. See documenation of struct nla_policy for more details. + * + * Returns 0 on success or a negative error code. + */ +int nla_validate(struct nlattr *head, int len, int maxtype, + const struct nla_policy *policy) +{ + struct nlattr *nla; + int rem, err; + + nla_for_each_attr(nla, head, len, rem) { + err = validate_nla(nla, maxtype, policy); + if (err < 0) + goto errout; + } + + err = 0; +errout: + return err; +} + +/** + * nla_parse - Parse a stream of attributes into a tb buffer + * @tb: destination array with maxtype+1 elements + * @maxtype: maximum attribute type to be expected + * @head: head of attribute stream + * @len: length of attribute stream + * @policy: validation policy + * + * Parses a stream of attributes and stores a pointer to each attribute in + * the tb array accessable via the attribute type. Attributes with a type + * exceeding maxtype will be silently ignored for backwards compatibility + * reasons. policy may be set to NULL if no validation is required. + * + * Returns 0 on success or a negative error code. + */ +int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, + const struct nla_policy *policy) +{ + struct nlattr *nla; + int rem, err; + + memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); + + nla_for_each_attr(nla, head, len, rem) { + u16 type = nla_type(nla); + + if (type > 0 && type <= maxtype) { + if (policy) { + err = validate_nla(nla, maxtype, policy); + if (err < 0) + goto errout; + } + + tb[type] = nla; + } + } + + if (unlikely(rem > 0)) + printk(KERN_WARNING "netlink: %d bytes leftover after parsing " + "attributes.\n", rem); + + err = 0; +errout: + return err; +} + +/** + * nla_find - Find a specific attribute in a stream of attributes + * @head: head of attribute stream + * @len: length of attribute stream + * @attrtype: type of attribute to look for + * + * Returns the first attribute in the stream matching the specified type. + */ +struct nlattr *nla_find(struct nlattr *head, int len, int attrtype) +{ + struct nlattr *nla; + int rem; + + nla_for_each_attr(nla, head, len, rem) + if (nla_type(nla) == attrtype) + return nla; + + return NULL; +} + +/** + * nla_strlcpy - Copy string attribute payload into a sized buffer + * @dst: where to copy the string to + * @nla: attribute to copy the string from + * @dstsize: size of destination buffer + * + * Copies at most dstsize - 1 bytes into the destination buffer. + * The result is always a valid NUL-terminated string. Unlike + * strlcpy the destination buffer is always padded out. + * + * Returns the length of the source buffer. + */ +size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize) +{ + size_t srclen = nla_len(nla); + char *src = nla_data(nla); + + if (srclen > 0 && src[srclen - 1] == '\0') + srclen--; + + if (dstsize > 0) { + size_t len = (srclen >= dstsize) ? dstsize - 1 : srclen; + + memset(dst, 0, dstsize); + memcpy(dst, src, len); + } + + return srclen; +} + +/** + * nla_memcpy - Copy a netlink attribute into another memory area + * @dest: where to copy to memcpy + * @src: netlink attribute to copy from + * @count: size of the destination area + * + * Note: The number of bytes copied is limited by the length of + * attribute's payload. memcpy + * + * Returns the number of bytes copied. + */ +int nla_memcpy(void *dest, const struct nlattr *src, int count) +{ + int minlen = min_t(int, count, nla_len(src)); + + memcpy(dest, nla_data(src), minlen); + + return minlen; +} + +/** + * nla_memcmp - Compare an attribute with sized memory area + * @nla: netlink attribute + * @data: memory area + * @size: size of memory area + */ +int nla_memcmp(const struct nlattr *nla, const void *data, + size_t size) +{ + int d = nla_len(nla) - size; + + if (d == 0) + d = memcmp(nla_data(nla), data, size); + + return d; +} + +/** + * nla_strcmp - Compare a string attribute against a string + * @nla: netlink string attribute + * @str: another string + */ +int nla_strcmp(const struct nlattr *nla, const char *str) +{ + int len = strlen(str) + 1; + int d = nla_len(nla) - len; + + if (d == 0) + d = memcmp(nla_data(nla), str, len); + + return d; +} + +/** + * __nla_reserve - reserve room for attribute on the skb + * @skb: socket buffer to reserve room on + * @attrtype: attribute type + * @attrlen: length of attribute payload + * + * Adds a netlink attribute header to a socket buffer and reserves + * room for the payload but does not copy it. + * + * The caller is responsible to ensure that the skb provides enough + * tailroom for the attribute header and payload. + */ +struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) +{ + struct nlattr *nla; + + nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen)); + nla->nla_type = attrtype; + nla->nla_len = nla_attr_size(attrlen); + + memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen)); + + return nla; +} + +/** + * __nla_reserve_nohdr - reserve room for attribute without header + * @skb: socket buffer to reserve room on + * @attrlen: length of attribute payload + * + * Reserves room for attribute payload without a header. + * + * The caller is responsible to ensure that the skb provides enough + * tailroom for the payload. + */ +void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen) +{ + void *start; + + start = skb_put(skb, NLA_ALIGN(attrlen)); + memset(start, 0, NLA_ALIGN(attrlen)); + + return start; +} + +/** + * nla_reserve - reserve room for attribute on the skb + * @skb: socket buffer to reserve room on + * @attrtype: attribute type + * @attrlen: length of attribute payload + * + * Adds a netlink attribute header to a socket buffer and reserves + * room for the payload but does not copy it. + * + * Returns NULL if the tailroom of the skb is insufficient to store + * the attribute header and payload. + */ +struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) +{ + if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen))) + return NULL; + + return __nla_reserve(skb, attrtype, attrlen); +} + +/** + * nla_reserve_nohdr - reserve room for attribute without header + * @skb: socket buffer to reserve room on + * @attrlen: length of attribute payload + * + * Reserves room for attribute payload without a header. + * + * Returns NULL if the tailroom of the skb is insufficient to store + * the attribute payload. + */ +void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen) +{ + if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) + return NULL; + + return __nla_reserve_nohdr(skb, attrlen); +} + +/** + * __nla_put - Add a netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @attrlen: length of attribute payload + * @data: head of attribute payload + * + * The caller is responsible to ensure that the skb provides enough + * tailroom for the attribute header and payload. + */ +void __nla_put(struct sk_buff *skb, int attrtype, int attrlen, + const void *data) +{ + struct nlattr *nla; + + nla = __nla_reserve(skb, attrtype, attrlen); + memcpy(nla_data(nla), data, attrlen); +} + +/** + * __nla_put_nohdr - Add a netlink attribute without header + * @skb: socket buffer to add attribute to + * @attrlen: length of attribute payload + * @data: head of attribute payload + * + * The caller is responsible to ensure that the skb provides enough + * tailroom for the attribute payload. + */ +void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) +{ + void *start; + + start = __nla_reserve_nohdr(skb, attrlen); + memcpy(start, data, attrlen); +} + +/** + * nla_put - Add a netlink attribute to a socket buffer + * @skb: socket buffer to add attribute to + * @attrtype: attribute type + * @attrlen: length of attribute payload + * @data: head of attribute payload + * + * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store + * the attribute header and payload. + */ +int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data) +{ + if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen))) + return -EMSGSIZE; + + __nla_put(skb, attrtype, attrlen, data); + return 0; +} + +/** + * nla_put_nohdr - Add a netlink attribute without header + * @skb: socket buffer to add attribute to + * @attrlen: length of attribute payload + * @data: head of attribute payload + * + * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store + * the attribute payload. + */ +int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) +{ + if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) + return -EMSGSIZE; + + __nla_put_nohdr(skb, attrlen, data); + return 0; +} + +/** + * nla_append - Add a netlink attribute without header or padding + * @skb: socket buffer to add attribute to + * @attrlen: length of attribute payload + * @data: head of attribute payload + * + * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store + * the attribute payload. + */ +int nla_append(struct sk_buff *skb, int attrlen, const void *data) +{ + if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) + return -EMSGSIZE; + + memcpy(skb_put(skb, attrlen), data, attrlen); + return 0; +} + +EXPORT_SYMBOL(nla_validate); +EXPORT_SYMBOL(nla_parse); +EXPORT_SYMBOL(nla_find); +EXPORT_SYMBOL(nla_strlcpy); +EXPORT_SYMBOL(__nla_reserve); +EXPORT_SYMBOL(__nla_reserve_nohdr); +EXPORT_SYMBOL(nla_reserve); +EXPORT_SYMBOL(nla_reserve_nohdr); +EXPORT_SYMBOL(__nla_put); +EXPORT_SYMBOL(__nla_put_nohdr); +EXPORT_SYMBOL(nla_put); +EXPORT_SYMBOL(nla_put_nohdr); +EXPORT_SYMBOL(nla_memcpy); +EXPORT_SYMBOL(nla_memcmp); +EXPORT_SYMBOL(nla_strcmp); +EXPORT_SYMBOL(nla_append); -- cgit v1.1 From 5ee00bd4691e7364bb7b62e2068d473cd5cb9320 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 9 Jan 2009 12:14:24 +0100 Subject: dma-debug: add Kconfig entry Impact: add a Kconfig entry for DMA-API debugging Signed-off-by: Joerg Roedel --- lib/Kconfig.debug | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1bcf9cd..d9cbada 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -902,6 +902,17 @@ config DYNAMIC_PRINTK_DEBUG debugging for all modules. This mode can be turned off via the above disable command. +config DMA_API_DEBUG + bool "Enable debugging of DMA-API usage" + depends on HAVE_DMA_API_DEBUG + help + Enable this option to debug the use of the DMA API by device drivers. + With this option you will be able to detect common bugs in device + drivers like double-freeing of DMA mappings or freeing mappings that + were never allocated. + This option causes a performance degredation. Use only if you want + to debug device drivers. If unsure, say N. + source "samples/Kconfig" source "lib/Kconfig.kgdb" -- cgit v1.1 From f2f45e5f3c921c73c913e9a9c00f21ec01c86b4d Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 9 Jan 2009 12:19:52 +0100 Subject: dma-debug: add header file and core data structures Impact: add groundwork for DMA-API debugging Signed-off-by: Joerg Roedel --- lib/Makefile | 2 ++ lib/dma-debug.c | 42 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 lib/dma-debug.c (limited to 'lib') diff --git a/lib/Makefile b/lib/Makefile index 32b0e64..50b48cf 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -84,6 +84,8 @@ obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o obj-$(CONFIG_DYNAMIC_PRINTK_DEBUG) += dynamic_printk.o +obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/dma-debug.c b/lib/dma-debug.c new file mode 100644 index 0000000..3109971 --- /dev/null +++ b/lib/dma-debug.c @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2008 Advanced Micro Devices, Inc. + * + * Author: Joerg Roedel + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include + +enum { + dma_debug_single, + dma_debug_page, + dma_debug_sg, + dma_debug_coherent, +}; + +struct dma_debug_entry { + struct list_head list; + struct device *dev; + int type; + phys_addr_t paddr; + u64 dev_addr; + u64 size; + int direction; + int sg_call_ents; + int sg_mapped_ents; +}; + -- cgit v1.1 From 30dfa90cc8c4c9621d8d5aa9499f3a5df3376307 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 9 Jan 2009 12:34:49 +0100 Subject: dma-debug: add hash functions for dma_debug_entries Impact: implement necessary functions for the core hash Signed-off-by: Joerg Roedel --- lib/dma-debug.c | 101 ++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 3109971..5ff7d2e 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -18,9 +18,14 @@ */ #include +#include #include #include +#define HASH_SIZE 1024ULL +#define HASH_FN_SHIFT 13 +#define HASH_FN_MASK (HASH_SIZE - 1) + enum { dma_debug_single, dma_debug_page, @@ -40,3 +45,99 @@ struct dma_debug_entry { int sg_mapped_ents; }; +struct hash_bucket { + struct list_head list; + spinlock_t lock; +} __cacheline_aligned_in_smp; + +/* Hash list to save the allocated dma addresses */ +static struct hash_bucket dma_entry_hash[HASH_SIZE]; + +/* + * Hash related functions + * + * Every DMA-API request is saved into a struct dma_debug_entry. To + * have quick access to these structs they are stored into a hash. + */ +static int hash_fn(struct dma_debug_entry *entry) +{ + /* + * Hash function is based on the dma address. + * We use bits 20-27 here as the index into the hash + */ + return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; +} + +/* + * Request exclusive access to a hash bucket for a given dma_debug_entry. + */ +static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, + unsigned long *flags) +{ + int idx = hash_fn(entry); + unsigned long __flags; + + spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); + *flags = __flags; + return &dma_entry_hash[idx]; +} + +/* + * Give up exclusive access to the hash bucket + */ +static void put_hash_bucket(struct hash_bucket *bucket, + unsigned long *flags) +{ + unsigned long __flags = *flags; + + spin_unlock_irqrestore(&bucket->lock, __flags); +} + +/* + * Search a given entry in the hash bucket list + */ +static struct dma_debug_entry *hash_bucket_find(struct hash_bucket *bucket, + struct dma_debug_entry *ref) +{ + struct dma_debug_entry *entry; + + list_for_each_entry(entry, &bucket->list, list) { + if ((entry->dev_addr == ref->dev_addr) && + (entry->dev == ref->dev)) + return entry; + } + + return NULL; +} + +/* + * Add an entry to a hash bucket + */ +static void hash_bucket_add(struct hash_bucket *bucket, + struct dma_debug_entry *entry) +{ + list_add_tail(&entry->list, &bucket->list); +} + +/* + * Remove entry from a hash bucket list + */ +static void hash_bucket_del(struct dma_debug_entry *entry) +{ + list_del(&entry->list); +} + +/* + * Wrapper function for adding an entry to the hash. + * This function takes care of locking itself. + */ +static void add_dma_entry(struct dma_debug_entry *entry) +{ + struct hash_bucket *bucket; + unsigned long flags; + + bucket = get_hash_bucket(entry, &flags); + hash_bucket_add(bucket, entry); + put_hash_bucket(bucket, &flags); +} + -- cgit v1.1 From 3b1e79ed734f58ac41ca0a287ff03ca355f120ad Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 9 Jan 2009 12:42:46 +0100 Subject: dma-debug: add allocator code Impact: add allocator code for struct dma_debug_entry Signed-off-by: Joerg Roedel --- lib/dma-debug.c | 57 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 5ff7d2e..b609146 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -52,6 +52,16 @@ struct hash_bucket { /* Hash list to save the allocated dma addresses */ static struct hash_bucket dma_entry_hash[HASH_SIZE]; +/* List of pre-allocated dma_debug_entry's */ +static LIST_HEAD(free_entries); +/* Lock for the list above */ +static DEFINE_SPINLOCK(free_entries_lock); + +/* Global disable flag - will be set in case of an error */ +static bool global_disable __read_mostly; + +static u32 num_free_entries; +static u32 min_free_entries; /* * Hash related functions @@ -141,3 +151,50 @@ static void add_dma_entry(struct dma_debug_entry *entry) put_hash_bucket(bucket, &flags); } +/* struct dma_entry allocator + * + * The next two functions implement the allocator for + * struct dma_debug_entries. + */ +static struct dma_debug_entry *dma_entry_alloc(void) +{ + struct dma_debug_entry *entry = NULL; + unsigned long flags; + + spin_lock_irqsave(&free_entries_lock, flags); + + if (list_empty(&free_entries)) { + printk(KERN_ERR "DMA-API: debugging out of memory " + "- disabling\n"); + global_disable = true; + goto out; + } + + entry = list_entry(free_entries.next, struct dma_debug_entry, list); + list_del(&entry->list); + memset(entry, 0, sizeof(*entry)); + + num_free_entries -= 1; + if (num_free_entries < min_free_entries) + min_free_entries = num_free_entries; + +out: + spin_unlock_irqrestore(&free_entries_lock, flags); + + return entry; +} + +static void dma_entry_free(struct dma_debug_entry *entry) +{ + unsigned long flags; + + /* + * add to beginning of the list - this way the entries are + * more likely cache hot when they are reallocated. + */ + spin_lock_irqsave(&free_entries_lock, flags); + list_add(&entry->list, &free_entries); + num_free_entries += 1; + spin_unlock_irqrestore(&free_entries_lock, flags); +} + -- cgit v1.1 From 6bf078715c1998d4d10716251cc10ce45908594c Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 9 Jan 2009 12:54:42 +0100 Subject: dma-debug: add initialization code Impact: add code to initialize dma-debug core data structures Signed-off-by: Joerg Roedel --- lib/dma-debug.c | 66 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index b609146..5b50bb3 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -21,6 +21,7 @@ #include #include #include +#include #define HASH_SIZE 1024ULL #define HASH_FN_SHIFT 13 @@ -198,3 +199,68 @@ static void dma_entry_free(struct dma_debug_entry *entry) spin_unlock_irqrestore(&free_entries_lock, flags); } +/* + * DMA-API debugging init code + * + * The init code does two things: + * 1. Initialize core data structures + * 2. Preallocate a given number of dma_debug_entry structs + */ + +static int prealloc_memory(u32 num_entries) +{ + struct dma_debug_entry *entry, *next_entry; + int i; + + for (i = 0; i < num_entries; ++i) { + entry = kzalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) + goto out_err; + + list_add_tail(&entry->list, &free_entries); + } + + num_free_entries = num_entries; + min_free_entries = num_entries; + + printk(KERN_INFO "DMA-API: preallocated %d debug entries\n", + num_entries); + + return 0; + +out_err: + + list_for_each_entry_safe(entry, next_entry, &free_entries, list) { + list_del(&entry->list); + kfree(entry); + } + + return -ENOMEM; +} + +/* + * Let the architectures decide how many entries should be preallocated. + */ +void dma_debug_init(u32 num_entries) +{ + int i; + + if (global_disable) + return; + + for (i = 0; i < HASH_SIZE; ++i) { + INIT_LIST_HEAD(&dma_entry_hash[i].list); + dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; + } + + if (prealloc_memory(num_entries) != 0) { + printk(KERN_ERR "DMA-API: debugging out of memory error " + "- disabled\n"); + global_disable = true; + + return; + } + + printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); +} + -- cgit v1.1 From 59d3daafa17265f01149df8eab3fb69b9b42cb2e Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 9 Jan 2009 13:01:56 +0100 Subject: dma-debug: add kernel command line parameters Impact: add dma_debug= and dma_debug_entries= kernel parameters Signed-off-by: Joerg Roedel --- lib/dma-debug.c | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 5b50bb3..2ede463 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -64,6 +64,9 @@ static bool global_disable __read_mostly; static u32 num_free_entries; static u32 min_free_entries; +/* number of preallocated entries requested by kernel cmdline */ +static u32 req_entries; + /* * Hash related functions * @@ -253,6 +256,9 @@ void dma_debug_init(u32 num_entries) dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; } + if (req_entries) + num_entries = req_entries; + if (prealloc_memory(num_entries) != 0) { printk(KERN_ERR "DMA-API: debugging out of memory error " "- disabled\n"); @@ -264,3 +270,35 @@ void dma_debug_init(u32 num_entries) printk(KERN_INFO "DMA-API: debugging enabled by kernel config\n"); } +static __init int dma_debug_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (strncmp(str, "off", 3) == 0) { + printk(KERN_INFO "DMA-API: debugging disabled on kernel " + "command line\n"); + global_disable = true; + } + + return 0; +} + +static __init int dma_debug_entries_cmdline(char *str) +{ + int res; + + if (!str) + return -EINVAL; + + res = get_option(&str, &req_entries); + + if (!res) + req_entries = 0; + + return 0; +} + +__setup("dma_debug=", dma_debug_cmdline); +__setup("dma_debug_entries=", dma_debug_entries_cmdline); + -- cgit v1.1 From 788dcfa6f17424695823152890d30da09f62f9c3 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 9 Jan 2009 13:13:27 +0100 Subject: dma-debug: add debugfs interface Impact: add debugfs interface for configuring DMA-API debugging Signed-off-by: Joerg Roedel --- lib/dma-debug.c | 78 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 2ede463..20d6cdb 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -61,12 +62,29 @@ static DEFINE_SPINLOCK(free_entries_lock); /* Global disable flag - will be set in case of an error */ static bool global_disable __read_mostly; +/* Global error count */ +static u32 error_count; + +/* Global error show enable*/ +static u32 show_all_errors __read_mostly; +/* Number of errors to show */ +static u32 show_num_errors = 1; + static u32 num_free_entries; static u32 min_free_entries; /* number of preallocated entries requested by kernel cmdline */ static u32 req_entries; +/* debugfs dentry's for the stuff above */ +static struct dentry *dma_debug_dent __read_mostly; +static struct dentry *global_disable_dent __read_mostly; +static struct dentry *error_count_dent __read_mostly; +static struct dentry *show_all_errors_dent __read_mostly; +static struct dentry *show_num_errors_dent __read_mostly; +static struct dentry *num_free_entries_dent __read_mostly; +static struct dentry *min_free_entries_dent __read_mostly; + /* * Hash related functions * @@ -241,6 +259,58 @@ out_err: return -ENOMEM; } +static int dma_debug_fs_init(void) +{ + dma_debug_dent = debugfs_create_dir("dma-api", NULL); + if (!dma_debug_dent) { + printk(KERN_ERR "DMA-API: can not create debugfs directory\n"); + return -ENOMEM; + } + + global_disable_dent = debugfs_create_bool("disabled", 0444, + dma_debug_dent, + (u32 *)&global_disable); + if (!global_disable_dent) + goto out_err; + + error_count_dent = debugfs_create_u32("error_count", 0444, + dma_debug_dent, &error_count); + if (!error_count_dent) + goto out_err; + + show_all_errors_dent = debugfs_create_u32("all_errors", 0644, + dma_debug_dent, + &show_all_errors); + if (!show_all_errors_dent) + goto out_err; + + show_num_errors_dent = debugfs_create_u32("num_errors", 0644, + dma_debug_dent, + &show_num_errors); + if (!show_num_errors_dent) + goto out_err; + + num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, + dma_debug_dent, + &num_free_entries); + if (!num_free_entries_dent) + goto out_err; + + min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, + dma_debug_dent, + &min_free_entries); + if (!min_free_entries_dent) + goto out_err; + + return 0; + +out_err: + debugfs_remove_recursive(dma_debug_dent); + + return -ENOMEM; +} + + /* * Let the architectures decide how many entries should be preallocated. */ @@ -256,6 +326,14 @@ void dma_debug_init(u32 num_entries) dma_entry_hash[i].lock = SPIN_LOCK_UNLOCKED; } + if (dma_debug_fs_init() != 0) { + printk(KERN_ERR "DMA-API: error creating debugfs entries " + "- disabling\n"); + global_disable = true; + + return; + } + if (req_entries) num_entries = req_entries; -- cgit v1.1 From 2d62ece14fe04168a7d16688ddd2d17ac472268c Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 9 Jan 2009 14:10:26 +0100 Subject: dma-debug: add core checking functions Impact: add functions to check on dma unmap and sync Signed-off-by: Joerg Roedel --- lib/dma-debug.c | 188 +++++++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 187 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 20d6cdb..d0cb47a 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -17,10 +17,13 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include #include #include #include +#include #include +#include #include #include @@ -50,7 +53,7 @@ struct dma_debug_entry { struct hash_bucket { struct list_head list; spinlock_t lock; -} __cacheline_aligned_in_smp; +} ____cacheline_aligned_in_smp; /* Hash list to save the allocated dma addresses */ static struct hash_bucket dma_entry_hash[HASH_SIZE]; @@ -85,6 +88,36 @@ static struct dentry *show_num_errors_dent __read_mostly; static struct dentry *num_free_entries_dent __read_mostly; static struct dentry *min_free_entries_dent __read_mostly; +static const char *type2name[4] = { "single", "page", + "scather-gather", "coherent" }; + +static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", + "DMA_FROM_DEVICE", "DMA_NONE" }; + +/* + * The access to some variables in this macro is racy. We can't use atomic_t + * here because all these variables are exported to debugfs. Some of them even + * writeable. This is also the reason why a lock won't help much. But anyway, + * the races are no big deal. Here is why: + * + * error_count: the addition is racy, but the worst thing that can happen is + * that we don't count some errors + * show_num_errors: the subtraction is racy. Also no big deal because in + * worst case this will result in one warning more in the + * system log than the user configured. This variable is + * writeable via debugfs. + */ +#define err_printk(dev, format, arg...) do { \ + error_count += 1; \ + if (show_all_errors || show_num_errors > 0) { \ + WARN(1, "%s %s: " format, \ + dev_driver_string(dev), \ + dev_name(dev) , ## arg); \ + } \ + if (!show_all_errors && show_num_errors > 0) \ + show_num_errors -= 1; \ + } while (0); + /* * Hash related functions * @@ -380,3 +413,156 @@ static __init int dma_debug_entries_cmdline(char *str) __setup("dma_debug=", dma_debug_cmdline); __setup("dma_debug_entries=", dma_debug_entries_cmdline); +static void check_unmap(struct dma_debug_entry *ref) +{ + struct dma_debug_entry *entry; + struct hash_bucket *bucket; + unsigned long flags; + + if (dma_mapping_error(ref->dev, ref->dev_addr)) + return; + + bucket = get_hash_bucket(ref, &flags); + entry = hash_bucket_find(bucket, ref); + + if (!entry) { + err_printk(ref->dev, "DMA-API: device driver tries " + "to free DMA memory it has not allocated " + "[device address=0x%016llx] [size=%llu bytes]\n", + ref->dev_addr, ref->size); + goto out; + } + + if (ref->size != entry->size) { + err_printk(ref->dev, "DMA-API: device driver frees " + "DMA memory with different size " + "[device address=0x%016llx] [map size=%llu bytes] " + "[unmap size=%llu bytes]\n", + ref->dev_addr, entry->size, ref->size); + } + + if (ref->type != entry->type) { + err_printk(ref->dev, "DMA-API: device driver frees " + "DMA memory with wrong function " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped as %s] [unmapped as %s]\n", + ref->dev_addr, ref->size, + type2name[entry->type], type2name[ref->type]); + } else if ((entry->type == dma_debug_coherent) && + (ref->paddr != entry->paddr)) { + err_printk(ref->dev, "DMA-API: device driver frees " + "DMA memory with different CPU address " + "[device address=0x%016llx] [size=%llu bytes] " + "[cpu alloc address=%p] [cpu free address=%p]", + ref->dev_addr, ref->size, + (void *)entry->paddr, (void *)ref->paddr); + } + + if (ref->sg_call_ents && ref->type == dma_debug_sg && + ref->sg_call_ents != entry->sg_call_ents) { + err_printk(ref->dev, "DMA-API: device driver frees " + "DMA sg list with different entry count " + "[map count=%d] [unmap count=%d]\n", + entry->sg_call_ents, ref->sg_call_ents); + } + + /* + * This may be no bug in reality - but most implementations of the + * DMA API don't handle this properly, so check for it here + */ + if (ref->direction != entry->direction) { + err_printk(ref->dev, "DMA-API: device driver frees " + "DMA memory with different direction " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped with %s] [unmapped with %s]\n", + ref->dev_addr, ref->size, + dir2name[entry->direction], + dir2name[ref->direction]); + } + + hash_bucket_del(entry); + dma_entry_free(entry); + +out: + put_hash_bucket(bucket, &flags); +} + +static void check_for_stack(struct device *dev, void *addr) +{ + if (object_is_on_stack(addr)) + err_printk(dev, "DMA-API: device driver maps memory from stack" + " [addr=%p]\n", addr); +} + +static void check_sync(struct device *dev, dma_addr_t addr, + u64 size, u64 offset, int direction, bool to_cpu) +{ + struct dma_debug_entry ref = { + .dev = dev, + .dev_addr = addr, + .size = size, + .direction = direction, + }; + struct dma_debug_entry *entry; + struct hash_bucket *bucket; + unsigned long flags; + + bucket = get_hash_bucket(&ref, &flags); + + entry = hash_bucket_find(bucket, &ref); + + if (!entry) { + err_printk(dev, "DMA-API: device driver tries " + "to sync DMA memory it has not allocated " + "[device address=0x%016llx] [size=%llu bytes]\n", + addr, size); + goto out; + } + + if ((offset + size) > entry->size) { + err_printk(dev, "DMA-API: device driver syncs" + " DMA memory outside allocated range " + "[device address=0x%016llx] " + "[allocation size=%llu bytes] [sync offset=%llu] " + "[sync size=%llu]\n", entry->dev_addr, entry->size, + offset, size); + } + + if (direction != entry->direction) { + err_printk(dev, "DMA-API: device driver syncs " + "DMA memory with different direction " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped with %s] [synced with %s]\n", + addr, entry->size, + dir2name[entry->direction], + dir2name[direction]); + } + + if (entry->direction == DMA_BIDIRECTIONAL) + goto out; + + if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && + !(direction == DMA_TO_DEVICE)) + err_printk(dev, "DMA-API: device driver syncs " + "device read-only DMA memory for cpu " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped with %s] [synced with %s]\n", + addr, entry->size, + dir2name[entry->direction], + dir2name[direction]); + + if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && + !(direction == DMA_FROM_DEVICE)) + err_printk(dev, "DMA-API: device driver syncs " + "device write-only DMA memory to device " + "[device address=0x%016llx] [size=%llu bytes] " + "[mapped with %s] [synced with %s]\n", + addr, entry->size, + dir2name[entry->direction], + dir2name[direction]); + +out: + put_hash_bucket(bucket, &flags); + +} + -- cgit v1.1 From f62bc980e6fd26434012c0d5676ecb17179d9ee4 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 9 Jan 2009 14:14:49 +0100 Subject: dma-debug: add checking for map/unmap_page/single Impact: add debug callbacks for dma_{un}map_[page|single] Signed-off-by: Joerg Roedel --- lib/dma-debug.c | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index d0cb47a..a2ed2b7 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -566,3 +566,56 @@ out: } +void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, + size_t size, int direction, dma_addr_t dma_addr, + bool map_single) +{ + struct dma_debug_entry *entry; + + if (unlikely(global_disable)) + return; + + if (unlikely(dma_mapping_error(dev, dma_addr))) + return; + + entry = dma_entry_alloc(); + if (!entry) + return; + + entry->dev = dev; + entry->type = dma_debug_page; + entry->paddr = page_to_phys(page) + offset; + entry->dev_addr = dma_addr; + entry->size = size; + entry->direction = direction; + + if (map_single) { + entry->type = dma_debug_single; + check_for_stack(dev, page_address(page) + offset); + } + + add_dma_entry(entry); +} +EXPORT_SYMBOL(debug_dma_map_page); + +void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, int direction, bool map_single) +{ + struct dma_debug_entry ref = { + .type = dma_debug_page, + .dev = dev, + .dev_addr = addr, + .size = size, + .direction = direction, + }; + + if (unlikely(global_disable)) + return; + + if (map_single) + ref.type = dma_debug_single; + + check_unmap(&ref); +} +EXPORT_SYMBOL(debug_dma_unmap_page); + -- cgit v1.1 From 972aa45ceaf65376f33aa75958fcaefc9e752fa4 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 9 Jan 2009 14:19:54 +0100 Subject: dma-debug: add add checking for map/unmap_sg Impact: add debug callbacks for dma_{un}map_sg Signed-off-by: Joerg Roedel --- lib/dma-debug.c | 73 +++++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 73 insertions(+) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index a2ed2b7..26e40e9 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -17,6 +17,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include #include #include #include @@ -619,3 +620,75 @@ void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, } EXPORT_SYMBOL(debug_dma_unmap_page); +void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, + int nents, int mapped_ents, int direction) +{ + struct dma_debug_entry *entry; + struct scatterlist *s; + int i; + + if (unlikely(global_disable)) + return; + + for_each_sg(sg, s, mapped_ents, i) { + entry = dma_entry_alloc(); + if (!entry) + return; + + entry->type = dma_debug_sg; + entry->dev = dev; + entry->paddr = sg_phys(s); + entry->size = s->length; + entry->dev_addr = s->dma_address; + entry->direction = direction; + entry->sg_call_ents = nents; + entry->sg_mapped_ents = mapped_ents; + + check_for_stack(dev, sg_virt(s)); + + add_dma_entry(entry); + } +} +EXPORT_SYMBOL(debug_dma_map_sg); + +void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, + int nelems, int dir) +{ + struct dma_debug_entry *entry; + struct scatterlist *s; + int mapped_ents = 0, i; + unsigned long flags; + + if (unlikely(global_disable)) + return; + + for_each_sg(sglist, s, nelems, i) { + + struct dma_debug_entry ref = { + .type = dma_debug_sg, + .dev = dev, + .paddr = sg_phys(s), + .dev_addr = s->dma_address, + .size = s->length, + .direction = dir, + .sg_call_ents = 0, + }; + + if (mapped_ents && i >= mapped_ents) + break; + + if (mapped_ents == 0) { + struct hash_bucket *bucket; + ref.sg_call_ents = nelems; + bucket = get_hash_bucket(&ref, &flags); + entry = hash_bucket_find(bucket, &ref); + if (entry) + mapped_ents = entry->sg_mapped_ents; + put_hash_bucket(bucket, &flags); + } + + check_unmap(&ref); + } +} +EXPORT_SYMBOL(debug_dma_unmap_sg); + -- cgit v1.1 From 6bfd4498764d6201399849d2e80fda95db7742c0 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 9 Jan 2009 14:38:50 +0100 Subject: dma-debug: add checking for [alloc|free]_coherent Impact: add debug callbacks for dma_[alloc|free]_coherent Signed-off-by: Joerg Roedel --- lib/dma-debug.c | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 26e40e9..44af837 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -692,3 +692,48 @@ void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, } EXPORT_SYMBOL(debug_dma_unmap_sg); +void debug_dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t dma_addr, void *virt) +{ + struct dma_debug_entry *entry; + + if (unlikely(global_disable)) + return; + + if (unlikely(virt == NULL)) + return; + + entry = dma_entry_alloc(); + if (!entry) + return; + + entry->type = dma_debug_coherent; + entry->dev = dev; + entry->paddr = virt_to_phys(virt); + entry->size = size; + entry->dev_addr = dma_addr; + entry->direction = DMA_BIDIRECTIONAL; + + add_dma_entry(entry); +} +EXPORT_SYMBOL(debug_dma_alloc_coherent); + +void debug_dma_free_coherent(struct device *dev, size_t size, + void *virt, dma_addr_t addr) +{ + struct dma_debug_entry ref = { + .type = dma_debug_coherent, + .dev = dev, + .paddr = virt_to_phys(virt), + .dev_addr = addr, + .size = size, + .direction = DMA_BIDIRECTIONAL, + }; + + if (unlikely(global_disable)) + return; + + check_unmap(&ref); +} +EXPORT_SYMBOL(debug_dma_free_coherent); + -- cgit v1.1 From b9d2317e0c4aed02afd20022083b2a485289605d Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 9 Jan 2009 14:43:04 +0100 Subject: dma-debug: add checks for sync_single_* Impact: add debug callbacks for dma_sync_single_for_* functions Signed-off-by: Joerg Roedel --- lib/dma-debug.c | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 44af837..714cfb6 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -737,3 +737,24 @@ void debug_dma_free_coherent(struct device *dev, size_t size, } EXPORT_SYMBOL(debug_dma_free_coherent); +void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, + size_t size, int direction) +{ + if (unlikely(global_disable)) + return; + + check_sync(dev, dma_handle, size, 0, direction, true); +} +EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); + +void debug_dma_sync_single_for_device(struct device *dev, + dma_addr_t dma_handle, size_t size, + int direction) +{ + if (unlikely(global_disable)) + return; + + check_sync(dev, dma_handle, size, 0, direction, false); +} +EXPORT_SYMBOL(debug_dma_sync_single_for_device); + -- cgit v1.1 From 948408ba3e2a67ed0f95e18ed5be1c622c2c5fc3 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 9 Jan 2009 14:55:38 +0100 Subject: dma-debug: add checks for sync_single_range_* Impact: add debug callbacks for dma_sync_single_range_for_* functions Signed-off-by: Joerg Roedel --- lib/dma-debug.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 714cfb6..d1c0ac1 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -758,3 +758,27 @@ void debug_dma_sync_single_for_device(struct device *dev, } EXPORT_SYMBOL(debug_dma_sync_single_for_device); +void debug_dma_sync_single_range_for_cpu(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, size_t size, + int direction) +{ + if (unlikely(global_disable)) + return; + + check_sync(dev, dma_handle, size, offset, direction, true); +} +EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); + +void debug_dma_sync_single_range_for_device(struct device *dev, + dma_addr_t dma_handle, + unsigned long offset, + size_t size, int direction) +{ + if (unlikely(global_disable)) + return; + + check_sync(dev, dma_handle, size, offset, direction, false); +} +EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); + -- cgit v1.1 From a31fba5d68cebf8f5fefd03e079dab94875e25f5 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Fri, 9 Jan 2009 15:01:12 +0100 Subject: dma-debug: add checks for sync_single_sg_* Impact: add debug callbacks for dma_sync_sg_* functions Signed-off-by: Joerg Roedel --- lib/dma-debug.c | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index d1c0ac1..9d11e89 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -782,3 +782,35 @@ void debug_dma_sync_single_range_for_device(struct device *dev, } EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); +void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, int direction) +{ + struct scatterlist *s; + int i; + + if (unlikely(global_disable)) + return; + + for_each_sg(sg, s, nelems, i) { + check_sync(dev, s->dma_address, s->dma_length, 0, + direction, true); + } +} +EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); + +void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, int direction) +{ + struct scatterlist *s; + int i; + + if (unlikely(global_disable)) + return; + + for_each_sg(sg, s, nelems, i) { + check_sync(dev, s->dma_address, s->dma_length, 0, + direction, false); + } +} +EXPORT_SYMBOL(debug_dma_sync_sg_for_device); + -- cgit v1.1 From 5ed0cec0ac5f1b3759bdbe4d9df32ee4ff8afb5a Mon Sep 17 00:00:00 2001 From: Lai Jiangshan Date: Fri, 6 Mar 2009 19:40:20 +0800 Subject: sched: TIF_NEED_RESCHED -> need_reshed() cleanup Impact: cleanup Use test_tsk_need_resched(), set_tsk_need_resched(), need_resched() instead of using TIF_NEED_RESCHED. Signed-off-by: Lai Jiangshan Cc: Peter Zijlstra LKML-Reference: <49B10BA4.9070209@cn.fujitsu.com> Signed-off-by: Ingo Molnar --- lib/kernel_lock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c index 01a3c22..39f1029 100644 --- a/lib/kernel_lock.c +++ b/lib/kernel_lock.c @@ -39,7 +39,7 @@ static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kernel_flag); int __lockfunc __reacquire_kernel_lock(void) { while (!_raw_spin_trylock(&kernel_flag)) { - if (test_thread_flag(TIF_NEED_RESCHED)) + if (need_resched()) return -EAGAIN; cpu_relax(); } -- cgit v1.1 From 908002161247e6e68c478052926b62d9a3d72418 Mon Sep 17 00:00:00 2001 From: Herbert Xu Date: Wed, 11 Mar 2009 23:18:32 +0800 Subject: nlattr: Fix build error with NET off We moved the netlink attribute support from net to lib in order for it to be available for general consumption. However, parts of the code (the bits that we don't need :) really depends on NET because the target object is sk_buff. This patch fixes this by wrapping them in CONFIG_NET. Some EXPORTs have been moved to make this work. Tested-by: Geert Uytterhoeven Signed-off-by: Herbert Xu --- lib/nlattr.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) (limited to 'lib') diff --git a/lib/nlattr.c b/lib/nlattr.c index 56c3ce7..80009a2 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -281,6 +281,7 @@ int nla_strcmp(const struct nlattr *nla, const char *str) return d; } +#ifdef CONFIG_NET /** * __nla_reserve - reserve room for attribute on the skb * @skb: socket buffer to reserve room on @@ -305,6 +306,7 @@ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) return nla; } +EXPORT_SYMBOL(__nla_reserve); /** * __nla_reserve_nohdr - reserve room for attribute without header @@ -325,6 +327,7 @@ void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen) return start; } +EXPORT_SYMBOL(__nla_reserve_nohdr); /** * nla_reserve - reserve room for attribute on the skb @@ -345,6 +348,7 @@ struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) return __nla_reserve(skb, attrtype, attrlen); } +EXPORT_SYMBOL(nla_reserve); /** * nla_reserve_nohdr - reserve room for attribute without header @@ -363,6 +367,7 @@ void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen) return __nla_reserve_nohdr(skb, attrlen); } +EXPORT_SYMBOL(nla_reserve_nohdr); /** * __nla_put - Add a netlink attribute to a socket buffer @@ -382,6 +387,7 @@ void __nla_put(struct sk_buff *skb, int attrtype, int attrlen, nla = __nla_reserve(skb, attrtype, attrlen); memcpy(nla_data(nla), data, attrlen); } +EXPORT_SYMBOL(__nla_put); /** * __nla_put_nohdr - Add a netlink attribute without header @@ -399,6 +405,7 @@ void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) start = __nla_reserve_nohdr(skb, attrlen); memcpy(start, data, attrlen); } +EXPORT_SYMBOL(__nla_put_nohdr); /** * nla_put - Add a netlink attribute to a socket buffer @@ -418,6 +425,7 @@ int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data) __nla_put(skb, attrtype, attrlen, data); return 0; } +EXPORT_SYMBOL(nla_put); /** * nla_put_nohdr - Add a netlink attribute without header @@ -436,6 +444,7 @@ int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) __nla_put_nohdr(skb, attrlen, data); return 0; } +EXPORT_SYMBOL(nla_put_nohdr); /** * nla_append - Add a netlink attribute without header or padding @@ -454,20 +463,13 @@ int nla_append(struct sk_buff *skb, int attrlen, const void *data) memcpy(skb_put(skb, attrlen), data, attrlen); return 0; } +EXPORT_SYMBOL(nla_append); +#endif EXPORT_SYMBOL(nla_validate); EXPORT_SYMBOL(nla_parse); EXPORT_SYMBOL(nla_find); EXPORT_SYMBOL(nla_strlcpy); -EXPORT_SYMBOL(__nla_reserve); -EXPORT_SYMBOL(__nla_reserve_nohdr); -EXPORT_SYMBOL(nla_reserve); -EXPORT_SYMBOL(nla_reserve_nohdr); -EXPORT_SYMBOL(__nla_put); -EXPORT_SYMBOL(__nla_put_nohdr); -EXPORT_SYMBOL(nla_put); -EXPORT_SYMBOL(nla_put_nohdr); EXPORT_SYMBOL(nla_memcpy); EXPORT_SYMBOL(nla_memcmp); EXPORT_SYMBOL(nla_strcmp); -EXPORT_SYMBOL(nla_append); -- cgit v1.1 From aa8e4fc68d8024cd3132035d13c3cefa7baeac8f Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 12 Mar 2009 19:32:51 -0700 Subject: bitmap: fix end condition in bitmap_find_free_region Guennadi Liakhovetski noticed that the end condition for the loop in bitmap_find_free_region() is wrong, and the "return if error" was also using the wrong conditional that would only trigger if the bitmap was an exact multiple of the allocation size, which is not necessarily the case with dma_alloc_from_coherent(). Such a failure would end up in bitmap_find_free_region() accessing beyond the end of the bitmap. Reported-by: Guennadi Liakhovetski Cc: Andrew Morton Signed-off-by: Linus Torvalds --- lib/bitmap.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'lib') diff --git a/lib/bitmap.c b/lib/bitmap.c index 1338469..35a1f7f 100644 --- a/lib/bitmap.c +++ b/lib/bitmap.c @@ -948,15 +948,15 @@ done: */ int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) { - int pos; /* scans bitmap by regions of size order */ + int pos, end; /* scans bitmap by regions of size order */ - for (pos = 0; pos < bits; pos += (1 << order)) - if (__reg_op(bitmap, pos, order, REG_OP_ISFREE)) - break; - if (pos == bits) - return -ENOMEM; - __reg_op(bitmap, pos, order, REG_OP_ALLOC); - return pos; + for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) { + if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) + continue; + __reg_op(bitmap, pos, order, REG_OP_ALLOC); + return pos; + } + return -ENOMEM; } EXPORT_SYMBOL(bitmap_find_free_region); -- cgit v1.1 From ac26c18bd35d982d1ba06020a992b1085fefc3e2 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Thu, 12 Feb 2009 16:19:13 +0100 Subject: dma-debug: add function to dump dma mappings This adds a function to dump the DMA mappings that the debugging code is aware of -- either for a single device, or for _all_ devices. This can be useful for debugging -- sticking a call to it in the DMA page fault handler, for example, to see if the faulting address _should_ be mapped or not, and hence work out whether it's IOMMU bugs we're seeing, or driver bugs. Signed-off-by: David Woodhouse --- lib/dma-debug.c | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 9d11e89..91ed1df 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -194,6 +194,36 @@ static void hash_bucket_del(struct dma_debug_entry *entry) } /* + * Dump mapping entries for debugging purposes + */ +void debug_dma_dump_mappings(struct device *dev) +{ + int idx; + + for (idx = 0; idx < HASH_SIZE; idx++) { + struct hash_bucket *bucket = &dma_entry_hash[idx]; + struct dma_debug_entry *entry; + unsigned long flags; + + spin_lock_irqsave(&bucket->lock, flags); + + list_for_each_entry(entry, &bucket->list, list) { + if (!dev || dev == entry->dev) { + dev_info(entry->dev, + "%s idx %d P=%Lx D=%Lx L=%Lx %s\n", + type2name[entry->type], idx, + (unsigned long long)entry->paddr, + entry->dev_addr, entry->size, + dir2name[entry->direction]); + } + } + + spin_unlock_irqrestore(&bucket->lock, flags); + } +} +EXPORT_SYMBOL(debug_dma_dump_mappings); + +/* * Wrapper function for adding an entry to the hash. * This function takes care of locking itself. */ -- cgit v1.1 From 6c132d1bcdc716e898b4092bb1abc696505c37e7 Mon Sep 17 00:00:00 2001 From: David Woodhouse Date: Mon, 19 Jan 2009 16:52:39 +0100 Subject: dma-debug: print stacktrace of mapping path on unmap error Impact: saves stacktrace of a dma mapping and prints it if there is an error Signed-off-by: David Woodhouse Signed-off-by: Joerg Roedel --- lib/dma-debug.c | 52 ++++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 14 deletions(-) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 91ed1df..dba02f1 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -19,6 +19,7 @@ #include #include +#include #include #include #include @@ -39,6 +40,8 @@ enum { dma_debug_coherent, }; +#define DMA_DEBUG_STACKTRACE_ENTRIES 5 + struct dma_debug_entry { struct list_head list; struct device *dev; @@ -49,6 +52,10 @@ struct dma_debug_entry { int direction; int sg_call_ents; int sg_mapped_ents; +#ifdef CONFIG_STACKTRACE + struct stack_trace stacktrace; + unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; +#endif }; struct hash_bucket { @@ -108,12 +115,23 @@ static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", * system log than the user configured. This variable is * writeable via debugfs. */ -#define err_printk(dev, format, arg...) do { \ +static inline void dump_entry_trace(struct dma_debug_entry *entry) +{ +#ifdef CONFIG_STACKTRACE + if (entry) { + printk(KERN_WARNING "Mapped at:\n"); + print_stack_trace(&entry->stacktrace, 0); + } +#endif +} + +#define err_printk(dev, entry, format, arg...) do { \ error_count += 1; \ if (show_all_errors || show_num_errors > 0) { \ WARN(1, "%s %s: " format, \ dev_driver_string(dev), \ dev_name(dev) , ## arg); \ + dump_entry_trace(entry); \ } \ if (!show_all_errors && show_num_errors > 0) \ show_num_errors -= 1; \ @@ -260,6 +278,12 @@ static struct dma_debug_entry *dma_entry_alloc(void) list_del(&entry->list); memset(entry, 0, sizeof(*entry)); +#ifdef CONFIG_STACKTRACE + entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; + entry->stacktrace.entries = entry->st_entries; + entry->stacktrace.skip = 2; + save_stack_trace(&entry->stacktrace); +#endif num_free_entries -= 1; if (num_free_entries < min_free_entries) min_free_entries = num_free_entries; @@ -457,7 +481,7 @@ static void check_unmap(struct dma_debug_entry *ref) entry = hash_bucket_find(bucket, ref); if (!entry) { - err_printk(ref->dev, "DMA-API: device driver tries " + err_printk(ref->dev, NULL, "DMA-API: device driver tries " "to free DMA memory it has not allocated " "[device address=0x%016llx] [size=%llu bytes]\n", ref->dev_addr, ref->size); @@ -465,7 +489,7 @@ static void check_unmap(struct dma_debug_entry *ref) } if (ref->size != entry->size) { - err_printk(ref->dev, "DMA-API: device driver frees " + err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA memory with different size " "[device address=0x%016llx] [map size=%llu bytes] " "[unmap size=%llu bytes]\n", @@ -473,7 +497,7 @@ static void check_unmap(struct dma_debug_entry *ref) } if (ref->type != entry->type) { - err_printk(ref->dev, "DMA-API: device driver frees " + err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA memory with wrong function " "[device address=0x%016llx] [size=%llu bytes] " "[mapped as %s] [unmapped as %s]\n", @@ -481,7 +505,7 @@ static void check_unmap(struct dma_debug_entry *ref) type2name[entry->type], type2name[ref->type]); } else if ((entry->type == dma_debug_coherent) && (ref->paddr != entry->paddr)) { - err_printk(ref->dev, "DMA-API: device driver frees " + err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA memory with different CPU address " "[device address=0x%016llx] [size=%llu bytes] " "[cpu alloc address=%p] [cpu free address=%p]", @@ -491,7 +515,7 @@ static void check_unmap(struct dma_debug_entry *ref) if (ref->sg_call_ents && ref->type == dma_debug_sg && ref->sg_call_ents != entry->sg_call_ents) { - err_printk(ref->dev, "DMA-API: device driver frees " + err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA sg list with different entry count " "[map count=%d] [unmap count=%d]\n", entry->sg_call_ents, ref->sg_call_ents); @@ -502,7 +526,7 @@ static void check_unmap(struct dma_debug_entry *ref) * DMA API don't handle this properly, so check for it here */ if (ref->direction != entry->direction) { - err_printk(ref->dev, "DMA-API: device driver frees " + err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA memory with different direction " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [unmapped with %s]\n", @@ -521,8 +545,8 @@ out: static void check_for_stack(struct device *dev, void *addr) { if (object_is_on_stack(addr)) - err_printk(dev, "DMA-API: device driver maps memory from stack" - " [addr=%p]\n", addr); + err_printk(dev, NULL, "DMA-API: device driver maps memory from" + "stack [addr=%p]\n", addr); } static void check_sync(struct device *dev, dma_addr_t addr, @@ -543,7 +567,7 @@ static void check_sync(struct device *dev, dma_addr_t addr, entry = hash_bucket_find(bucket, &ref); if (!entry) { - err_printk(dev, "DMA-API: device driver tries " + err_printk(dev, NULL, "DMA-API: device driver tries " "to sync DMA memory it has not allocated " "[device address=0x%016llx] [size=%llu bytes]\n", addr, size); @@ -551,7 +575,7 @@ static void check_sync(struct device *dev, dma_addr_t addr, } if ((offset + size) > entry->size) { - err_printk(dev, "DMA-API: device driver syncs" + err_printk(dev, entry, "DMA-API: device driver syncs" " DMA memory outside allocated range " "[device address=0x%016llx] " "[allocation size=%llu bytes] [sync offset=%llu] " @@ -560,7 +584,7 @@ static void check_sync(struct device *dev, dma_addr_t addr, } if (direction != entry->direction) { - err_printk(dev, "DMA-API: device driver syncs " + err_printk(dev, entry, "DMA-API: device driver syncs " "DMA memory with different direction " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s]\n", @@ -574,7 +598,7 @@ static void check_sync(struct device *dev, dma_addr_t addr, if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && !(direction == DMA_TO_DEVICE)) - err_printk(dev, "DMA-API: device driver syncs " + err_printk(dev, entry, "DMA-API: device driver syncs " "device read-only DMA memory for cpu " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s]\n", @@ -584,7 +608,7 @@ static void check_sync(struct device *dev, dma_addr_t addr, if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && !(direction == DMA_FROM_DEVICE)) - err_printk(dev, "DMA-API: device driver syncs " + err_printk(dev, entry, "DMA-API: device driver syncs " "device write-only DMA memory to device " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s]\n", -- cgit v1.1 From 2e34bde18576a02c897ae6b699ea26301d92be1b Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 16 Mar 2009 16:51:55 +0100 Subject: dma-debug: add checks for kernel text and rodata Impact: get notified if a device dma maps illegal areas This patch adds a check to print a warning message when a device driver tries to map a memory area from the kernel text segment or rodata. Signed-off-by: Joerg Roedel --- lib/dma-debug.c | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index dba02f1..6022eb4 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -29,6 +29,8 @@ #include #include +#include + #define HASH_SIZE 1024ULL #define HASH_FN_SHIFT 13 #define HASH_FN_MASK (HASH_SIZE - 1) @@ -549,6 +551,24 @@ static void check_for_stack(struct device *dev, void *addr) "stack [addr=%p]\n", addr); } +static inline bool overlap(void *addr, u64 size, void *start, void *end) +{ + void *addr2 = (char *)addr + size; + + return ((addr >= start && addr < end) || + (addr2 >= start && addr2 < end) || + ((addr < start) && (addr2 >= end))); +} + +static void check_for_illegal_area(struct device *dev, void *addr, u64 size) +{ + if (overlap(addr, size, _text, _etext) || + overlap(addr, size, __start_rodata, __end_rodata)) + err_printk(dev, NULL, "DMA-API: device driver maps " + "memory from kernel text or rodata " + "[addr=%p] [size=%llu]\n", addr, size); +} + static void check_sync(struct device *dev, dma_addr_t addr, u64 size, u64 offset, int direction, bool to_cpu) { @@ -645,8 +665,11 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, entry->direction = direction; if (map_single) { + void *addr = ((char *)page_address(page)) + offset; + entry->type = dma_debug_single; - check_for_stack(dev, page_address(page) + offset); + check_for_stack(dev, addr); + check_for_illegal_area(dev, addr, size); } add_dma_entry(entry); @@ -699,6 +722,7 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, entry->sg_mapped_ents = mapped_ents; check_for_stack(dev, sg_virt(s)); + check_for_illegal_area(dev, sg_virt(s), s->length); add_dma_entry(entry); } -- cgit v1.1 From 41531c8f5f05aba5ec645d9770557eedbf75b422 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 16 Mar 2009 17:32:14 +0100 Subject: dma-debug: add a check dma memory leaks Impact: allow architectures to monitor busses for dma mem leakage This patch adds checking code to detect if a device has pending DMA operations when it is about to be unbound from its device driver. Signed-off-by: Joerg Roedel --- lib/dma-debug.c | 55 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 6022eb4..9a350b4 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -400,6 +400,61 @@ out_err: return -ENOMEM; } +static int device_dma_allocations(struct device *dev) +{ + struct dma_debug_entry *entry; + unsigned long flags; + int count = 0, i; + + for (i = 0; i < HASH_SIZE; ++i) { + spin_lock_irqsave(&dma_entry_hash[i].lock, flags); + list_for_each_entry(entry, &dma_entry_hash[i].list, list) { + if (entry->dev == dev) + count += 1; + } + spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags); + } + + return count; +} + +static int dma_debug_device_change(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct device *dev = data; + int count; + + + switch (action) { + case BUS_NOTIFY_UNBIND_DRIVER: + count = device_dma_allocations(dev); + if (count == 0) + break; + err_printk(dev, NULL, "DMA-API: device driver has pending " + "DMA allocations while released from device " + "[count=%d]\n", count); + break; + default: + break; + } + + return 0; +} + +void dma_debug_add_bus(struct bus_type *bus) +{ + struct notifier_block *nb; + + nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); + if (nb == NULL) { + printk(KERN_ERR "dma_debug_add_bus: out of memory\n"); + return; + } + + nb->notifier_call = dma_debug_device_change; + + bus_register_notifier(bus, nb); +} /* * Let the architectures decide how many entries should be preallocated. -- cgit v1.1 From 35d40952dba7b0689a16bd1463fb7698f8dbe639 Mon Sep 17 00:00:00 2001 From: FUJITA Tomonori Date: Thu, 19 Mar 2009 10:39:31 +0900 Subject: dma-debug: warn of unmapping an invalid dma address Impact: extend DMA-debug checks Calling dma_unmap families against an invalid dma address should be a bug. Signed-off-by: FUJITA Tomonori Cc: Joerg Roedel LKML-Reference: <20090319103743N.fujita.tomonori@lab.ntt.co.jp> Signed-off-by: Ingo Molnar --- lib/dma-debug.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 9a350b4..f9e6d38 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -531,8 +531,11 @@ static void check_unmap(struct dma_debug_entry *ref) struct hash_bucket *bucket; unsigned long flags; - if (dma_mapping_error(ref->dev, ref->dev_addr)) + if (dma_mapping_error(ref->dev, ref->dev_addr)) { + err_printk(ref->dev, NULL, "DMA-API: device driver tries " + "to free an invalid DMA memory address\n"); return; + } bucket = get_hash_bucket(ref, &flags); entry = hash_bucket_find(bucket, ref); -- cgit v1.1 From 9537a48ed4b9e4b738943d6da0a0fd4278adf905 Mon Sep 17 00:00:00 2001 From: Joerg Roedel Date: Mon, 23 Mar 2009 15:35:08 +0100 Subject: dma-debug: make memory range checks more consistent Impact: extend on-kernel-stack DMA debug checks to all !highmem pages We only checked dma_map_single() - extend it to dma_map_page() and dma_map_sg() as well. Also, fix dma_map_single() corner case bug: make sure we dont stack-check highmem (not mapped) pages. Reported-by: FUJITA Tomonori Signed-off-by: Joerg Roedel Cc: iommu@lists.linux-foundation.org LKML-Reference: <1237818908-26516-1-git-send-email-joerg.roedel@amd.com> Signed-off-by: Ingo Molnar --- lib/dma-debug.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index f9e6d38..1a99208 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -722,10 +722,11 @@ void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, entry->size = size; entry->direction = direction; - if (map_single) { - void *addr = ((char *)page_address(page)) + offset; - + if (map_single) entry->type = dma_debug_single; + + if (!PageHighMem(page)) { + void *addr = ((char *)page_address(page)) + offset; check_for_stack(dev, addr); check_for_illegal_area(dev, addr, size); } @@ -779,8 +780,10 @@ void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, entry->sg_call_ents = nents; entry->sg_mapped_ents = mapped_ents; - check_for_stack(dev, sg_virt(s)); - check_for_illegal_area(dev, sg_virt(s), s->length); + if (!PageHighMem(sg_page(s))) { + check_for_stack(dev, sg_virt(s)); + check_for_illegal_area(dev, sg_virt(s), s->length); + } add_dma_entry(entry); } -- cgit v1.1 From 1fa5ae857bb14f6046205171d98506d8112dd74e Mon Sep 17 00:00:00 2001 From: Kay Sievers Date: Sun, 25 Jan 2009 15:17:37 +0100 Subject: driver core: get rid of struct device's bus_id string array Now that all users of bus_id is gone, we can remove it from struct device. Signed-off-by: Kay Sievers Signed-off-by: Greg Kroah-Hartman --- lib/kobject.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/kobject.c b/lib/kobject.c index 0487d1f..a6dec32 100644 --- a/lib/kobject.c +++ b/lib/kobject.c @@ -212,7 +212,7 @@ static int kobject_add_internal(struct kobject *kobj) * @fmt: format string used to build the name * @vargs: vargs to format the string. */ -static int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, +int kobject_set_name_vargs(struct kobject *kobj, const char *fmt, va_list vargs) { const char *old_name = kobj->name; -- cgit v1.1 From f67f129e519fa87f8ebd236b6336fe43f31ee141 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Sun, 1 Mar 2009 21:10:49 +0800 Subject: Driver core: implement uevent suppress in kobject This patch implements uevent suppress in kobject and removes it from struct device, based on the following ideas: 1,Uevent sending should be one attribute of kobject, so suppressing it in kobject layer is more natural than in device layer. By this way, we can do it for other objects embedded with kobject. 2,It may save several bytes for each instance of struct device.(On my omap3(32bit ARM) based box, can save 8bytes per device object) This patch also introduces dev_set|get_uevent_suppress() helpers to set and query uevent_suppress attribute in case to help kobject as private part of struct device in future. [This version is against the latest driver-core patch set of Greg,please ignore the last version.] Signed-off-by: Ming Lei Signed-off-by: Greg Kroah-Hartman --- lib/kobject_uevent.c | 7 +++++++ 1 file changed, 7 insertions(+) (limited to 'lib') diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index 318328d..b2181cc 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -118,6 +118,13 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, kset = top_kobj->kset; uevent_ops = kset->uevent_ops; + /* skip the event, if uevent_suppress is set*/ + if (kobj->uevent_suppress) { + pr_debug("kobject: '%s' (%p): %s: uevent_suppress " + "caused the event to drop!\n", + kobject_name(kobj), kobj, __func__); + return 0; + } /* skip the event, if the filter returns zero. */ if (uevent_ops && uevent_ops->filter) if (!uevent_ops->filter(kset, kobj)) { -- cgit v1.1 From f520360d93cdc37de5d972dac4bf3bdef6a7f6a7 Mon Sep 17 00:00:00 2001 From: Arjan van de Ven Date: Thu, 19 Mar 2009 09:09:05 -0700 Subject: kobject: don't block for each kobject_uevent Right now, the kobject_uevent code blocks for each uevent that's being generated, due to using (for hystoric reasons) UHM_WAIT_EXEC as flag to call_usermode_helper(). Specifically, the effect is that each uevent that is being sent causes the code to wake up keventd, then block until keventd has processed the work. Needless to say, this happens many times during the system boot. This patches changes that to UHN_NO_WAIT (brilliant name for a constant btw) so that we only schedule the work to fire the uevent message, but do not wait for keventd to process the work. This removes one of the bottlenecks during boot; each one of them is only a small effect, but the sum of them does add up. [Note, distros that need this are broken, they should be setting CONFIG_UEVENT_HELPER_PATH to "", that way this code path will never be excuted at all -- gregkh] Signed-off-by: Arjan van de Ven Signed-off-by: Greg Kroah-Hartman --- lib/kobject_uevent.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/kobject_uevent.c b/lib/kobject_uevent.c index b2181cc..e68e743 100644 --- a/lib/kobject_uevent.c +++ b/lib/kobject_uevent.c @@ -255,7 +255,7 @@ int kobject_uevent_env(struct kobject *kobj, enum kobject_action action, goto exit; retval = call_usermodehelper(argv[0], argv, - env->envp, UMH_WAIT_EXEC); + env->envp, UMH_NO_WAIT); } exit: -- cgit v1.1 From e9d376f0fa66bd630fe27403669c6ae6c22a868f Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Thu, 5 Feb 2009 11:51:38 -0500 Subject: dynamic debug: combine dprintk and dynamic printk This patch combines Greg Bank's dprintk() work with the existing dynamic printk patchset, we are now calling it 'dynamic debug'. The new feature of this patchset is a richer /debugfs control file interface, (an example output from my system is at the bottom), which allows fined grained control over the the debug output. The output can be controlled by function, file, module, format string, and line number. for example, enabled all debug messages in module 'nf_conntrack': echo -n 'module nf_conntrack +p' > /mnt/debugfs/dynamic_debug/control to disable them: echo -n 'module nf_conntrack -p' > /mnt/debugfs/dynamic_debug/control A further explanation can be found in the documentation patch. Signed-off-by: Greg Banks Signed-off-by: Jason Baron Signed-off-by: Greg Kroah-Hartman --- lib/Kconfig.debug | 2 +- lib/Makefile | 2 +- lib/dynamic_debug.c | 756 +++++++++++++++++++++++++++++++++++++++++++++++++++ lib/dynamic_printk.c | 414 ---------------------------- 4 files changed, 758 insertions(+), 416 deletions(-) create mode 100644 lib/dynamic_debug.c delete mode 100644 lib/dynamic_printk.c (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1bcf9cd..0dd1c04 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -847,7 +847,7 @@ config BUILD_DOCSRC Say N if you are unsure. -config DYNAMIC_PRINTK_DEBUG +config DYNAMIC_DEBUG bool "Enable dynamic printk() call support" default n depends on PRINTK diff --git a/lib/Makefile b/lib/Makefile index 32b0e64..8633d6b 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -82,7 +82,7 @@ obj-$(CONFIG_HAVE_LMB) += lmb.o obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o -obj-$(CONFIG_DYNAMIC_PRINTK_DEBUG) += dynamic_printk.o +obj-$(CONFIG_DYNAMIC_DEBUG) += dynamic_debug.o hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c new file mode 100644 index 0000000..9e123ae --- /dev/null +++ b/lib/dynamic_debug.c @@ -0,0 +1,756 @@ +/* + * lib/dynamic_debug.c + * + * make pr_debug()/dev_dbg() calls runtime configurable based upon their + * source module. + * + * Copyright (C) 2008 Jason Baron + * By Greg Banks + * Copyright (c) 2008 Silicon Graphics Inc. All Rights Reserved. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +extern struct _ddebug __start___verbose[]; +extern struct _ddebug __stop___verbose[]; + +/* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which + * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They + * use independent hash functions, to reduce the chance of false positives. + */ +long long dynamic_debug_enabled; +EXPORT_SYMBOL_GPL(dynamic_debug_enabled); +long long dynamic_debug_enabled2; +EXPORT_SYMBOL_GPL(dynamic_debug_enabled2); + +struct ddebug_table { + struct list_head link; + char *mod_name; + unsigned int num_ddebugs; + unsigned int num_enabled; + struct _ddebug *ddebugs; +}; + +struct ddebug_query { + const char *filename; + const char *module; + const char *function; + const char *format; + unsigned int first_lineno, last_lineno; +}; + +struct ddebug_iter { + struct ddebug_table *table; + unsigned int idx; +}; + +static DEFINE_MUTEX(ddebug_lock); +static LIST_HEAD(ddebug_tables); +static int verbose = 0; + +/* Return the last part of a pathname */ +static inline const char *basename(const char *path) +{ + const char *tail = strrchr(path, '/'); + return tail ? tail+1 : path; +} + +/* format a string into buf[] which describes the _ddebug's flags */ +static char *ddebug_describe_flags(struct _ddebug *dp, char *buf, + size_t maxlen) +{ + char *p = buf; + + BUG_ON(maxlen < 4); + if (dp->flags & _DPRINTK_FLAGS_PRINT) + *p++ = 'p'; + if (p == buf) + *p++ = '-'; + *p = '\0'; + + return buf; +} + +/* + * must be called with ddebug_lock held + */ + +static int disabled_hash(char hash, bool first_table) +{ + struct ddebug_table *dt; + char table_hash_value; + + list_for_each_entry(dt, &ddebug_tables, link) { + if (first_table) + table_hash_value = dt->ddebugs->primary_hash; + else + table_hash_value = dt->ddebugs->secondary_hash; + if (dt->num_enabled && (hash == table_hash_value)) + return 0; + } + return 1; +} + +/* + * Search the tables for _ddebug's which match the given + * `query' and apply the `flags' and `mask' to them. Tells + * the user which ddebug's were changed, or whether none + * were matched. + */ +static void ddebug_change(const struct ddebug_query *query, + unsigned int flags, unsigned int mask) +{ + int i; + struct ddebug_table *dt; + unsigned int newflags; + unsigned int nfound = 0; + char flagbuf[8]; + + /* search for matching ddebugs */ + mutex_lock(&ddebug_lock); + list_for_each_entry(dt, &ddebug_tables, link) { + + /* match against the module name */ + if (query->module != NULL && + strcmp(query->module, dt->mod_name)) + continue; + + for (i = 0 ; i < dt->num_ddebugs ; i++) { + struct _ddebug *dp = &dt->ddebugs[i]; + + /* match against the source filename */ + if (query->filename != NULL && + strcmp(query->filename, dp->filename) && + strcmp(query->filename, basename(dp->filename))) + continue; + + /* match against the function */ + if (query->function != NULL && + strcmp(query->function, dp->function)) + continue; + + /* match against the format */ + if (query->format != NULL && + strstr(dp->format, query->format) == NULL) + continue; + + /* match against the line number range */ + if (query->first_lineno && + dp->lineno < query->first_lineno) + continue; + if (query->last_lineno && + dp->lineno > query->last_lineno) + continue; + + nfound++; + + newflags = (dp->flags & mask) | flags; + if (newflags == dp->flags) + continue; + + if (!newflags) + dt->num_enabled--; + else if (!dp-flags) + dt->num_enabled++; + dp->flags = newflags; + if (newflags) { + dynamic_debug_enabled |= + (1LL << dp->primary_hash); + dynamic_debug_enabled2 |= + (1LL << dp->secondary_hash); + } else { + if (disabled_hash(dp->primary_hash, true)) + dynamic_debug_enabled &= + ~(1LL << dp->primary_hash); + if (disabled_hash(dp->secondary_hash, false)) + dynamic_debug_enabled2 &= + ~(1LL << dp->secondary_hash); + } + if (verbose) + printk(KERN_INFO + "ddebug: changed %s:%d [%s]%s %s\n", + dp->filename, dp->lineno, + dt->mod_name, dp->function, + ddebug_describe_flags(dp, flagbuf, + sizeof(flagbuf))); + } + } + mutex_unlock(&ddebug_lock); + + if (!nfound && verbose) + printk(KERN_INFO "ddebug: no matches for query\n"); +} + +/* + * Wrapper around strsep() to collapse the multiple empty tokens + * that it returns when fed sequences of separator characters. + * Now, if we had strtok_r()... + */ +static inline char *nearly_strtok_r(char **p, const char *sep) +{ + char *r; + + while ((r = strsep(p, sep)) != NULL && *r == '\0') + ; + return r; +} + +/* + * Split the buffer `buf' into space-separated words. + * Return the number of such words or <0 on error. + */ +static int ddebug_tokenize(char *buf, char *words[], int maxwords) +{ + int nwords = 0; + + while (nwords < maxwords && + (words[nwords] = nearly_strtok_r(&buf, " \t\r\n")) != NULL) + nwords++; + if (buf) + return -EINVAL; /* ran out of words[] before bytes */ + + if (verbose) { + int i; + printk(KERN_INFO "%s: split into words:", __func__); + for (i = 0 ; i < nwords ; i++) + printk(" \"%s\"", words[i]); + printk("\n"); + } + + return nwords; +} + +/* + * Parse a single line number. Note that the empty string "" + * is treated as a special case and converted to zero, which + * is later treated as a "don't care" value. + */ +static inline int parse_lineno(const char *str, unsigned int *val) +{ + char *end = NULL; + BUG_ON(str == NULL); + if (*str == '\0') { + *val = 0; + return 0; + } + *val = simple_strtoul(str, &end, 10); + return end == NULL || end == str || *end != '\0' ? -EINVAL : 0; +} + +/* + * Undo octal escaping in a string, inplace. This is useful to + * allow the user to express a query which matches a format + * containing embedded spaces. + */ +#define isodigit(c) ((c) >= '0' && (c) <= '7') +static char *unescape(char *str) +{ + char *in = str; + char *out = str; + + while (*in) { + if (*in == '\\') { + if (in[1] == '\\') { + *out++ = '\\'; + in += 2; + continue; + } else if (in[1] == 't') { + *out++ = '\t'; + in += 2; + continue; + } else if (in[1] == 'n') { + *out++ = '\n'; + in += 2; + continue; + } else if (isodigit(in[1]) && + isodigit(in[2]) && + isodigit(in[3])) { + *out++ = ((in[1] - '0')<<6) | + ((in[2] - '0')<<3) | + (in[3] - '0'); + in += 4; + continue; + } + } + *out++ = *in++; + } + *out = '\0'; + + return str; +} + +/* + * Parse words[] as a ddebug query specification, which is a series + * of (keyword, value) pairs chosen from these possibilities: + * + * func + * file + * file + * module + * format + * line + * line - // where either may be empty + */ +static int ddebug_parse_query(char *words[], int nwords, + struct ddebug_query *query) +{ + unsigned int i; + + /* check we have an even number of words */ + if (nwords % 2 != 0) + return -EINVAL; + memset(query, 0, sizeof(*query)); + + for (i = 0 ; i < nwords ; i += 2) { + if (!strcmp(words[i], "func")) + query->function = words[i+1]; + else if (!strcmp(words[i], "file")) + query->filename = words[i+1]; + else if (!strcmp(words[i], "module")) + query->module = words[i+1]; + else if (!strcmp(words[i], "format")) + query->format = unescape(words[i+1]); + else if (!strcmp(words[i], "line")) { + char *first = words[i+1]; + char *last = strchr(first, '-'); + if (last) + *last++ = '\0'; + if (parse_lineno(first, &query->first_lineno) < 0) + return -EINVAL; + if (last != NULL) { + /* range - */ + if (parse_lineno(last, &query->last_lineno) < 0) + return -EINVAL; + } else { + query->last_lineno = query->first_lineno; + } + } else { + if (verbose) + printk(KERN_ERR "%s: unknown keyword \"%s\"\n", + __func__, words[i]); + return -EINVAL; + } + } + + if (verbose) + printk(KERN_INFO "%s: q->function=\"%s\" q->filename=\"%s\" " + "q->module=\"%s\" q->format=\"%s\" q->lineno=%u-%u\n", + __func__, query->function, query->filename, + query->module, query->format, query->first_lineno, + query->last_lineno); + + return 0; +} + +/* + * Parse `str' as a flags specification, format [-+=][p]+. + * Sets up *maskp and *flagsp to be used when changing the + * flags fields of matched _ddebug's. Returns 0 on success + * or <0 on error. + */ +static int ddebug_parse_flags(const char *str, unsigned int *flagsp, + unsigned int *maskp) +{ + unsigned flags = 0; + int op = '='; + + switch (*str) { + case '+': + case '-': + case '=': + op = *str++; + break; + default: + return -EINVAL; + } + if (verbose) + printk(KERN_INFO "%s: op='%c'\n", __func__, op); + + for ( ; *str ; ++str) { + switch (*str) { + case 'p': + flags |= _DPRINTK_FLAGS_PRINT; + break; + default: + return -EINVAL; + } + } + if (flags == 0) + return -EINVAL; + if (verbose) + printk(KERN_INFO "%s: flags=0x%x\n", __func__, flags); + + /* calculate final *flagsp, *maskp according to mask and op */ + switch (op) { + case '=': + *maskp = 0; + *flagsp = flags; + break; + case '+': + *maskp = ~0U; + *flagsp = flags; + break; + case '-': + *maskp = ~flags; + *flagsp = 0; + break; + } + if (verbose) + printk(KERN_INFO "%s: *flagsp=0x%x *maskp=0x%x\n", + __func__, *flagsp, *maskp); + return 0; +} + +/* + * File_ops->write method for /dynamic_debug/conrol. Gathers the + * command text from userspace, parses and executes it. + */ +static ssize_t ddebug_proc_write(struct file *file, const char __user *ubuf, + size_t len, loff_t *offp) +{ + unsigned int flags = 0, mask = 0; + struct ddebug_query query; +#define MAXWORDS 9 + int nwords; + char *words[MAXWORDS]; + char tmpbuf[256]; + + if (len == 0) + return 0; + /* we don't check *offp -- multiple writes() are allowed */ + if (len > sizeof(tmpbuf)-1) + return -E2BIG; + if (copy_from_user(tmpbuf, ubuf, len)) + return -EFAULT; + tmpbuf[len] = '\0'; + if (verbose) + printk(KERN_INFO "%s: read %d bytes from userspace\n", + __func__, (int)len); + + nwords = ddebug_tokenize(tmpbuf, words, MAXWORDS); + if (nwords < 0) + return -EINVAL; + if (ddebug_parse_query(words, nwords-1, &query)) + return -EINVAL; + if (ddebug_parse_flags(words[nwords-1], &flags, &mask)) + return -EINVAL; + + /* actually go and implement the change */ + ddebug_change(&query, flags, mask); + + *offp += len; + return len; +} + +/* + * Set the iterator to point to the first _ddebug object + * and return a pointer to that first object. Returns + * NULL if there are no _ddebugs at all. + */ +static struct _ddebug *ddebug_iter_first(struct ddebug_iter *iter) +{ + if (list_empty(&ddebug_tables)) { + iter->table = NULL; + iter->idx = 0; + return NULL; + } + iter->table = list_entry(ddebug_tables.next, + struct ddebug_table, link); + iter->idx = 0; + return &iter->table->ddebugs[iter->idx]; +} + +/* + * Advance the iterator to point to the next _ddebug + * object from the one the iterator currently points at, + * and returns a pointer to the new _ddebug. Returns + * NULL if the iterator has seen all the _ddebugs. + */ +static struct _ddebug *ddebug_iter_next(struct ddebug_iter *iter) +{ + if (iter->table == NULL) + return NULL; + if (++iter->idx == iter->table->num_ddebugs) { + /* iterate to next table */ + iter->idx = 0; + if (list_is_last(&iter->table->link, &ddebug_tables)) { + iter->table = NULL; + return NULL; + } + iter->table = list_entry(iter->table->link.next, + struct ddebug_table, link); + } + return &iter->table->ddebugs[iter->idx]; +} + +/* + * Seq_ops start method. Called at the start of every + * read() call from userspace. Takes the ddebug_lock and + * seeks the seq_file's iterator to the given position. + */ +static void *ddebug_proc_start(struct seq_file *m, loff_t *pos) +{ + struct ddebug_iter *iter = m->private; + struct _ddebug *dp; + int n = *pos; + + if (verbose) + printk(KERN_INFO "%s: called m=%p *pos=%lld\n", + __func__, m, (unsigned long long)*pos); + + mutex_lock(&ddebug_lock); + + if (!n) + return SEQ_START_TOKEN; + if (n < 0) + return NULL; + dp = ddebug_iter_first(iter); + while (dp != NULL && --n > 0) + dp = ddebug_iter_next(iter); + return dp; +} + +/* + * Seq_ops next method. Called several times within a read() + * call from userspace, with ddebug_lock held. Walks to the + * next _ddebug object with a special case for the header line. + */ +static void *ddebug_proc_next(struct seq_file *m, void *p, loff_t *pos) +{ + struct ddebug_iter *iter = m->private; + struct _ddebug *dp; + + if (verbose) + printk(KERN_INFO "%s: called m=%p p=%p *pos=%lld\n", + __func__, m, p, (unsigned long long)*pos); + + if (p == SEQ_START_TOKEN) + dp = ddebug_iter_first(iter); + else + dp = ddebug_iter_next(iter); + ++*pos; + return dp; +} + +/* + * Seq_ops show method. Called several times within a read() + * call from userspace, with ddebug_lock held. Formats the + * current _ddebug as a single human-readable line, with a + * special case for the header line. + */ +static int ddebug_proc_show(struct seq_file *m, void *p) +{ + struct ddebug_iter *iter = m->private; + struct _ddebug *dp = p; + char flagsbuf[8]; + + if (verbose) + printk(KERN_INFO "%s: called m=%p p=%p\n", + __func__, m, p); + + if (p == SEQ_START_TOKEN) { + seq_puts(m, + "# filename:lineno [module]function flags format\n"); + return 0; + } + + seq_printf(m, "%s:%u [%s]%s %s \"", + dp->filename, dp->lineno, + iter->table->mod_name, dp->function, + ddebug_describe_flags(dp, flagsbuf, sizeof(flagsbuf))); + seq_escape(m, dp->format, "\t\r\n\""); + seq_puts(m, "\"\n"); + + return 0; +} + +/* + * Seq_ops stop method. Called at the end of each read() + * call from userspace. Drops ddebug_lock. + */ +static void ddebug_proc_stop(struct seq_file *m, void *p) +{ + if (verbose) + printk(KERN_INFO "%s: called m=%p p=%p\n", + __func__, m, p); + mutex_unlock(&ddebug_lock); +} + +static const struct seq_operations ddebug_proc_seqops = { + .start = ddebug_proc_start, + .next = ddebug_proc_next, + .show = ddebug_proc_show, + .stop = ddebug_proc_stop +}; + +/* + * File_ops->open method for /dynamic_debug/control. Does the seq_file + * setup dance, and also creates an iterator to walk the _ddebugs. + * Note that we create a seq_file always, even for O_WRONLY files + * where it's not needed, as doing so simplifies the ->release method. + */ +static int ddebug_proc_open(struct inode *inode, struct file *file) +{ + struct ddebug_iter *iter; + int err; + + if (verbose) + printk(KERN_INFO "%s: called\n", __func__); + + iter = kzalloc(sizeof(*iter), GFP_KERNEL); + if (iter == NULL) + return -ENOMEM; + + err = seq_open(file, &ddebug_proc_seqops); + if (err) { + kfree(iter); + return err; + } + ((struct seq_file *) file->private_data)->private = iter; + return 0; +} + +static const struct file_operations ddebug_proc_fops = { + .owner = THIS_MODULE, + .open = ddebug_proc_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, + .write = ddebug_proc_write +}; + +/* + * Allocate a new ddebug_table for the given module + * and add it to the global list. + */ +int ddebug_add_module(struct _ddebug *tab, unsigned int n, + const char *name) +{ + struct ddebug_table *dt; + char *new_name; + + dt = kzalloc(sizeof(*dt), GFP_KERNEL); + if (dt == NULL) + return -ENOMEM; + new_name = kstrdup(name, GFP_KERNEL); + if (new_name == NULL) { + kfree(dt); + return -ENOMEM; + } + dt->mod_name = new_name; + dt->num_ddebugs = n; + dt->num_enabled = 0; + dt->ddebugs = tab; + + mutex_lock(&ddebug_lock); + list_add_tail(&dt->link, &ddebug_tables); + mutex_unlock(&ddebug_lock); + + if (verbose) + printk(KERN_INFO "%u debug prints in module %s\n", + n, dt->mod_name); + return 0; +} +EXPORT_SYMBOL_GPL(ddebug_add_module); + +static void ddebug_table_free(struct ddebug_table *dt) +{ + list_del_init(&dt->link); + kfree(dt->mod_name); + kfree(dt); +} + +/* + * Called in response to a module being unloaded. Removes + * any ddebug_table's which point at the module. + */ +int ddebug_remove_module(char *mod_name) +{ + struct ddebug_table *dt, *nextdt; + int ret = -ENOENT; + + if (verbose) + printk(KERN_INFO "%s: removing module \"%s\"\n", + __func__, mod_name); + + mutex_lock(&ddebug_lock); + list_for_each_entry_safe(dt, nextdt, &ddebug_tables, link) { + if (!strcmp(dt->mod_name, mod_name)) { + ddebug_table_free(dt); + ret = 0; + } + } + mutex_unlock(&ddebug_lock); + return ret; +} +EXPORT_SYMBOL_GPL(ddebug_remove_module); + +static void ddebug_remove_all_tables(void) +{ + mutex_lock(&ddebug_lock); + while (!list_empty(&ddebug_tables)) { + struct ddebug_table *dt = list_entry(ddebug_tables.next, + struct ddebug_table, + link); + ddebug_table_free(dt); + } + mutex_unlock(&ddebug_lock); +} + +static int __init dynamic_debug_init(void) +{ + struct dentry *dir, *file; + struct _ddebug *iter, *iter_start; + const char *modname = NULL; + int ret = 0; + int n = 0; + + dir = debugfs_create_dir("dynamic_debug", NULL); + if (!dir) + return -ENOMEM; + file = debugfs_create_file("control", 0644, dir, NULL, + &ddebug_proc_fops); + if (!file) { + debugfs_remove(dir); + return -ENOMEM; + } + if (__start___verbose != __stop___verbose) { + iter = __start___verbose; + modname = iter->modname; + iter_start = iter; + for (; iter < __stop___verbose; iter++) { + if (strcmp(modname, iter->modname)) { + ret = ddebug_add_module(iter_start, n, modname); + if (ret) + goto out_free; + n = 0; + modname = iter->modname; + iter_start = iter; + } + n++; + } + ret = ddebug_add_module(iter_start, n, modname); + } +out_free: + if (ret) { + ddebug_remove_all_tables(); + debugfs_remove(dir); + debugfs_remove(file); + } + return 0; +} +module_init(dynamic_debug_init); diff --git a/lib/dynamic_printk.c b/lib/dynamic_printk.c deleted file mode 100644 index 165a197..0000000 --- a/lib/dynamic_printk.c +++ /dev/null @@ -1,414 +0,0 @@ -/* - * lib/dynamic_printk.c - * - * make pr_debug()/dev_dbg() calls runtime configurable based upon their - * their source module. - * - * Copyright (C) 2008 Red Hat, Inc., Jason Baron - */ - -#include -#include -#include -#include -#include -#include - -extern struct mod_debug __start___verbose[]; -extern struct mod_debug __stop___verbose[]; - -struct debug_name { - struct hlist_node hlist; - struct hlist_node hlist2; - int hash1; - int hash2; - char *name; - int enable; - int type; -}; - -static int nr_entries; -static int num_enabled; -int dynamic_enabled = DYNAMIC_ENABLED_NONE; -static struct hlist_head module_table[DEBUG_HASH_TABLE_SIZE] = - { [0 ... DEBUG_HASH_TABLE_SIZE-1] = HLIST_HEAD_INIT }; -static struct hlist_head module_table2[DEBUG_HASH_TABLE_SIZE] = - { [0 ... DEBUG_HASH_TABLE_SIZE-1] = HLIST_HEAD_INIT }; -static DECLARE_MUTEX(debug_list_mutex); - -/* dynamic_printk_enabled, and dynamic_printk_enabled2 are bitmasks in which - * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They - * use independent hash functions, to reduce the chance of false positives. - */ -long long dynamic_printk_enabled; -EXPORT_SYMBOL_GPL(dynamic_printk_enabled); -long long dynamic_printk_enabled2; -EXPORT_SYMBOL_GPL(dynamic_printk_enabled2); - -/* returns the debug module pointer. */ -static struct debug_name *find_debug_module(char *module_name) -{ - int i; - struct hlist_head *head; - struct hlist_node *node; - struct debug_name *element; - - element = NULL; - for (i = 0; i < DEBUG_HASH_TABLE_SIZE; i++) { - head = &module_table[i]; - hlist_for_each_entry_rcu(element, node, head, hlist) - if (!strcmp(element->name, module_name)) - return element; - } - return NULL; -} - -/* returns the debug module pointer. */ -static struct debug_name *find_debug_module_hash(char *module_name, int hash) -{ - struct hlist_head *head; - struct hlist_node *node; - struct debug_name *element; - - element = NULL; - head = &module_table[hash]; - hlist_for_each_entry_rcu(element, node, head, hlist) - if (!strcmp(element->name, module_name)) - return element; - return NULL; -} - -/* caller must hold mutex*/ -static int __add_debug_module(char *mod_name, int hash, int hash2) -{ - struct debug_name *new; - char *module_name; - int ret = 0; - - if (find_debug_module(mod_name)) { - ret = -EINVAL; - goto out; - } - module_name = kmalloc(strlen(mod_name) + 1, GFP_KERNEL); - if (!module_name) { - ret = -ENOMEM; - goto out; - } - module_name = strcpy(module_name, mod_name); - module_name[strlen(mod_name)] = '\0'; - new = kzalloc(sizeof(struct debug_name), GFP_KERNEL); - if (!new) { - kfree(module_name); - ret = -ENOMEM; - goto out; - } - INIT_HLIST_NODE(&new->hlist); - INIT_HLIST_NODE(&new->hlist2); - new->name = module_name; - new->hash1 = hash; - new->hash2 = hash2; - hlist_add_head_rcu(&new->hlist, &module_table[hash]); - hlist_add_head_rcu(&new->hlist2, &module_table2[hash2]); - nr_entries++; -out: - return ret; -} - -int unregister_dynamic_debug_module(char *mod_name) -{ - struct debug_name *element; - int ret = 0; - - down(&debug_list_mutex); - element = find_debug_module(mod_name); - if (!element) { - ret = -EINVAL; - goto out; - } - hlist_del_rcu(&element->hlist); - hlist_del_rcu(&element->hlist2); - synchronize_rcu(); - kfree(element->name); - if (element->enable) - num_enabled--; - kfree(element); - nr_entries--; -out: - up(&debug_list_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(unregister_dynamic_debug_module); - -int register_dynamic_debug_module(char *mod_name, int type, char *share_name, - char *flags, int hash, int hash2) -{ - struct debug_name *elem; - int ret = 0; - - down(&debug_list_mutex); - elem = find_debug_module(mod_name); - if (!elem) { - if (__add_debug_module(mod_name, hash, hash2)) - goto out; - elem = find_debug_module(mod_name); - if (dynamic_enabled == DYNAMIC_ENABLED_ALL && - !strcmp(mod_name, share_name)) { - elem->enable = true; - num_enabled++; - } - } - elem->type |= type; -out: - up(&debug_list_mutex); - return ret; -} -EXPORT_SYMBOL_GPL(register_dynamic_debug_module); - -int __dynamic_dbg_enabled_helper(char *mod_name, int type, int value, int hash) -{ - struct debug_name *elem; - int ret = 0; - - if (dynamic_enabled == DYNAMIC_ENABLED_ALL) - return 1; - rcu_read_lock(); - elem = find_debug_module_hash(mod_name, hash); - if (elem && elem->enable) - ret = 1; - rcu_read_unlock(); - return ret; -} -EXPORT_SYMBOL_GPL(__dynamic_dbg_enabled_helper); - -static void set_all(bool enable) -{ - struct debug_name *e; - struct hlist_node *node; - int i; - long long enable_mask; - - for (i = 0; i < DEBUG_HASH_TABLE_SIZE; i++) { - if (module_table[i].first != NULL) { - hlist_for_each_entry(e, node, &module_table[i], hlist) { - e->enable = enable; - } - } - } - if (enable) - enable_mask = ULLONG_MAX; - else - enable_mask = 0; - dynamic_printk_enabled = enable_mask; - dynamic_printk_enabled2 = enable_mask; -} - -static int disabled_hash(int i, bool first_table) -{ - struct debug_name *e; - struct hlist_node *node; - - if (first_table) { - hlist_for_each_entry(e, node, &module_table[i], hlist) { - if (e->enable) - return 0; - } - } else { - hlist_for_each_entry(e, node, &module_table2[i], hlist2) { - if (e->enable) - return 0; - } - } - return 1; -} - -static ssize_t pr_debug_write(struct file *file, const char __user *buf, - size_t length, loff_t *ppos) -{ - char *buffer, *s, *value_str, *setting_str; - int err, value; - struct debug_name *elem = NULL; - int all = 0; - - if (length > PAGE_SIZE || length < 0) - return -EINVAL; - - buffer = (char *)__get_free_page(GFP_KERNEL); - if (!buffer) - return -ENOMEM; - - err = -EFAULT; - if (copy_from_user(buffer, buf, length)) - goto out; - - err = -EINVAL; - if (length < PAGE_SIZE) - buffer[length] = '\0'; - else if (buffer[PAGE_SIZE-1]) - goto out; - - err = -EINVAL; - down(&debug_list_mutex); - - if (strncmp("set", buffer, 3)) - goto out_up; - s = buffer + 3; - setting_str = strsep(&s, "="); - if (s == NULL) - goto out_up; - setting_str = strstrip(setting_str); - value_str = strsep(&s, " "); - if (s == NULL) - goto out_up; - s = strstrip(s); - if (!strncmp(s, "all", 3)) - all = 1; - else - elem = find_debug_module(s); - if (!strncmp(setting_str, "enable", 6)) { - value = !!simple_strtol(value_str, NULL, 10); - if (all) { - if (value) { - set_all(true); - num_enabled = nr_entries; - dynamic_enabled = DYNAMIC_ENABLED_ALL; - } else { - set_all(false); - num_enabled = 0; - dynamic_enabled = DYNAMIC_ENABLED_NONE; - } - err = 0; - } else if (elem) { - if (value && (elem->enable == 0)) { - dynamic_printk_enabled |= (1LL << elem->hash1); - dynamic_printk_enabled2 |= (1LL << elem->hash2); - elem->enable = 1; - num_enabled++; - dynamic_enabled = DYNAMIC_ENABLED_SOME; - err = 0; - printk(KERN_DEBUG - "debugging enabled for module %s\n", - elem->name); - } else if (!value && (elem->enable == 1)) { - elem->enable = 0; - num_enabled--; - if (disabled_hash(elem->hash1, true)) - dynamic_printk_enabled &= - ~(1LL << elem->hash1); - if (disabled_hash(elem->hash2, false)) - dynamic_printk_enabled2 &= - ~(1LL << elem->hash2); - if (num_enabled) - dynamic_enabled = DYNAMIC_ENABLED_SOME; - else - dynamic_enabled = DYNAMIC_ENABLED_NONE; - err = 0; - printk(KERN_DEBUG - "debugging disabled for module %s\n", - elem->name); - } - } - } - if (!err) - err = length; -out_up: - up(&debug_list_mutex); -out: - free_page((unsigned long)buffer); - return err; -} - -static void *pr_debug_seq_start(struct seq_file *f, loff_t *pos) -{ - return (*pos < DEBUG_HASH_TABLE_SIZE) ? pos : NULL; -} - -static void *pr_debug_seq_next(struct seq_file *s, void *v, loff_t *pos) -{ - (*pos)++; - if (*pos >= DEBUG_HASH_TABLE_SIZE) - return NULL; - return pos; -} - -static void pr_debug_seq_stop(struct seq_file *s, void *v) -{ - /* Nothing to do */ -} - -static int pr_debug_seq_show(struct seq_file *s, void *v) -{ - struct hlist_head *head; - struct hlist_node *node; - struct debug_name *elem; - unsigned int i = *(loff_t *) v; - - rcu_read_lock(); - head = &module_table[i]; - hlist_for_each_entry_rcu(elem, node, head, hlist) { - seq_printf(s, "%s enabled=%d", elem->name, elem->enable); - seq_printf(s, "\n"); - } - rcu_read_unlock(); - return 0; -} - -static struct seq_operations pr_debug_seq_ops = { - .start = pr_debug_seq_start, - .next = pr_debug_seq_next, - .stop = pr_debug_seq_stop, - .show = pr_debug_seq_show -}; - -static int pr_debug_open(struct inode *inode, struct file *filp) -{ - return seq_open(filp, &pr_debug_seq_ops); -} - -static const struct file_operations pr_debug_operations = { - .open = pr_debug_open, - .read = seq_read, - .write = pr_debug_write, - .llseek = seq_lseek, - .release = seq_release, -}; - -static int __init dynamic_printk_init(void) -{ - struct dentry *dir, *file; - struct mod_debug *iter; - unsigned long value; - - dir = debugfs_create_dir("dynamic_printk", NULL); - if (!dir) - return -ENOMEM; - file = debugfs_create_file("modules", 0644, dir, NULL, - &pr_debug_operations); - if (!file) { - debugfs_remove(dir); - return -ENOMEM; - } - for (value = (unsigned long)__start___verbose; - value < (unsigned long)__stop___verbose; - value += sizeof(struct mod_debug)) { - iter = (struct mod_debug *)value; - register_dynamic_debug_module(iter->modname, - iter->type, - iter->logical_modname, - iter->flag_names, iter->hash, iter->hash2); - } - if (dynamic_enabled == DYNAMIC_ENABLED_ALL) - set_all(true); - return 0; -} -module_init(dynamic_printk_init); -/* may want to move this earlier so we can get traces as early as possible */ - -static int __init dynamic_printk_setup(char *str) -{ - if (str) - return -ENOENT; - dynamic_enabled = DYNAMIC_ENABLED_ALL; - return 0; -} -/* Use early_param(), so we can get debug output as early as possible */ -early_param("dynamic_printk", dynamic_printk_setup); -- cgit v1.1 From 86151fdf38b3795f292b39defbff39d2684b9c8c Mon Sep 17 00:00:00 2001 From: Jason Baron Date: Thu, 5 Feb 2009 11:53:15 -0500 Subject: dynamic debug: update docs updates the documentation for 'dynamic debug' feature. Signed-off-by: Greg Banks Signed-off-by: Jason Baron Signed-off-by: Greg Kroah-Hartman --- lib/Kconfig.debug | 72 +++++++++++++++++++++++++++++++------------------------ 1 file changed, 41 insertions(+), 31 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 0dd1c04..8fee0a1 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -848,59 +848,69 @@ config BUILD_DOCSRC Say N if you are unsure. config DYNAMIC_DEBUG - bool "Enable dynamic printk() call support" + bool "Enable dynamic printk() support" default n depends on PRINTK + depends on DEBUG_FS select PRINTK_DEBUG help Compiles debug level messages into the kernel, which would not otherwise be available at runtime. These messages can then be - enabled/disabled on a per module basis. This mechanism implicitly - enables all pr_debug() and dev_dbg() calls. The impact of this - compile option is a larger kernel text size of about 2%. + enabled/disabled based on various levels of scope - per source file, + function, module, format string, and line number. This mechanism + implicitly enables all pr_debug() and dev_dbg() calls. The impact of + this compile option is a larger kernel text size of about 2%. Usage: - Dynamic debugging is controlled by the debugfs file, - dynamic_printk/modules. This file contains a list of the modules that - can be enabled. The format of the file is the module name, followed - by a set of flags that can be enabled. The first flag is always the - 'enabled' flag. For example: + Dynamic debugging is controlled via the 'dynamic_debug/ddebug' file, + which is contained in the 'debugfs' filesystem. Thus, the debugfs + filesystem must first be mounted before making use of this feature. + We refer the control file as: /dynamic_debug/ddebug. This + file contains a list of the debug statements that can be enabled. The + format for each line of the file is: - - . - . - . + filename:lineno [module]function flags format - : Name of the module in which the debug call resides - : whether the messages are enabled or not + filename : source file of the debug statement + lineno : line number of the debug statement + module : module that contains the debug statement + function : function that contains the debug statement + flags : 'p' means the line is turned 'on' for printing + format : the format used for the debug statement From a live system: - snd_hda_intel enabled=0 - fixup enabled=0 - driver enabled=0 + nullarbor:~ # cat /dynamic_debug/ddebug + # filename:lineno [module]function flags format + fs/aio.c:222 [aio]__put_ioctx - "__put_ioctx:\040freeing\040%p\012" + fs/aio.c:248 [aio]ioctx_alloc - "ENOMEM:\040nr_events\040too\040high\012" + fs/aio.c:1770 [aio]sys_io_cancel - "calling\040cancel\012" - Enable a module: + Example usage: - $echo "set enabled=1 " > dynamic_printk/modules + // enable the message at line 1603 of file svcsock.c + nullarbor:~ # echo -n 'file svcsock.c line 1603 +p' > + /dynamic_debug/ddebug - Disable a module: + // enable all the messages in file svcsock.c + nullarbor:~ # echo -n 'file svcsock.c +p' > + /dynamic_debug/ddebug - $echo "set enabled=0 " > dynamic_printk/modules + // enable all the messages in the NFS server module + nullarbor:~ # echo -n 'module nfsd +p' > + /dynamic_debug/ddebug - Enable all modules: + // enable all 12 messages in the function svc_process() + nullarbor:~ # echo -n 'func svc_process +p' > + /dynamic_debug/ddebug - $echo "set enabled=1 all" > dynamic_printk/modules + // disable all 12 messages in the function svc_process() + nullarbor:~ # echo -n 'func svc_process -p' > + /dynamic_debug/ddebug - Disable all modules: - - $echo "set enabled=0 all" > dynamic_printk/modules - - Finally, passing "dynamic_printk" at the command line enables - debugging for all modules. This mode can be turned off via the above - disable command. + See Documentation/dynamic-debug-howto.txt for additional information. source "samples/Kconfig" -- cgit v1.1 From 9898abb3d23311fa227a7f46bf4e40fd2954057f Mon Sep 17 00:00:00 2001 From: Greg Banks Date: Fri, 6 Feb 2009 12:54:26 +1100 Subject: Dynamic debug: allow simple quoting of words Allow simple quoting of words in the dynamic debug control language. This allows more natural specification when using the control language to match against printk formats, e.g #echo -n 'format "Setting node for non-present cpu" +p' > /mnt/debugfs/dynamic_debug/control instead of #echo -n 'format Setting\040node\040for\040non-present\040cpu +p' > /mnt/debugfs/dynamic_debug/control Adjust the dynamic debug documention to describe that and provide a new example. Adjust the existing examples in the documentation to reflect the current whitespace escaping behaviour when reading the control file. Fix some minor documentation trailing whitespace. Signed-off-by: Greg Banks Acked-by: Jason Baron Signed-off-by: Greg Kroah-Hartman --- lib/dynamic_debug.c | 53 +++++++++++++++++++++++++++++++++-------------------- 1 file changed, 33 insertions(+), 20 deletions(-) (limited to 'lib') diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index 9e123ae..833139c 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -196,32 +196,45 @@ static void ddebug_change(const struct ddebug_query *query, } /* - * Wrapper around strsep() to collapse the multiple empty tokens - * that it returns when fed sequences of separator characters. - * Now, if we had strtok_r()... - */ -static inline char *nearly_strtok_r(char **p, const char *sep) -{ - char *r; - - while ((r = strsep(p, sep)) != NULL && *r == '\0') - ; - return r; -} - -/* * Split the buffer `buf' into space-separated words. - * Return the number of such words or <0 on error. + * Handles simple " and ' quoting, i.e. without nested, + * embedded or escaped \". Return the number of words + * or <0 on error. */ static int ddebug_tokenize(char *buf, char *words[], int maxwords) { int nwords = 0; - while (nwords < maxwords && - (words[nwords] = nearly_strtok_r(&buf, " \t\r\n")) != NULL) - nwords++; - if (buf) - return -EINVAL; /* ran out of words[] before bytes */ + while (*buf) { + char *end; + + /* Skip leading whitespace */ + while (*buf && isspace(*buf)) + buf++; + if (!*buf) + break; /* oh, it was trailing whitespace */ + + /* Run `end' over a word, either whitespace separated or quoted */ + if (*buf == '"' || *buf == '\'') { + int quote = *buf++; + for (end = buf ; *end && *end != quote ; end++) + ; + if (!*end) + return -EINVAL; /* unclosed quote */ + } else { + for (end = buf ; *end && !isspace(*end) ; end++) + ; + BUG_ON(end == buf); + } + /* Here `buf' is the start of the word, `end' is one past the end */ + + if (nwords == maxwords) + return -EINVAL; /* ran out of words[] before bytes */ + if (*end) + *end++ = '\0'; /* terminate the word */ + words[nwords++] = buf; + buf = end; + } if (verbose) { int i; -- cgit v1.1 From 93c36ed8348934b462044d2d60ab345055318933 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Mon, 30 Mar 2009 14:08:44 -0700 Subject: dma-debug: fix printk formats (i386) Fix printk format warnings in dma-debug: lib/dma-debug.c:645: warning: format '%016llx' expects type 'long long unsigned int', but argument 6 has type 'dma_addr_t' lib/dma-debug.c:662: warning: format '%016llx' expects type 'long long unsigned int', but argument 6 has type 'dma_addr_t' lib/dma-debug.c:676: warning: format '%016llx' expects type 'long long unsigned int', but argument 6 has type 'dma_addr_t' lib/dma-debug.c:686: warning: format '%016llx' expects type 'long long unsigned int', but argument 6 has type 'dma_addr_t' Signed-off-by: Randy Dunlap Signed-off-by: Linus Torvalds --- lib/dma-debug.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 1a99208..d3da7ed 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -648,7 +648,7 @@ static void check_sync(struct device *dev, dma_addr_t addr, err_printk(dev, NULL, "DMA-API: device driver tries " "to sync DMA memory it has not allocated " "[device address=0x%016llx] [size=%llu bytes]\n", - addr, size); + (unsigned long long)addr, size); goto out; } @@ -666,7 +666,7 @@ static void check_sync(struct device *dev, dma_addr_t addr, "DMA memory with different direction " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s]\n", - addr, entry->size, + (unsigned long long)addr, entry->size, dir2name[entry->direction], dir2name[direction]); } @@ -680,7 +680,7 @@ static void check_sync(struct device *dev, dma_addr_t addr, "device read-only DMA memory for cpu " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s]\n", - addr, entry->size, + (unsigned long long)addr, entry->size, dir2name[entry->direction], dir2name[direction]); @@ -690,7 +690,7 @@ static void check_sync(struct device *dev, dma_addr_t addr, "device write-only DMA memory to device " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s]\n", - addr, entry->size, + (unsigned long long)addr, entry->size, dir2name[entry->direction], dir2name[direction]); -- cgit v1.1