How does NSData's implementation of the hash method work? - nsdata

When calling hash on the built in NSData class in the foundation framework -- what implementation is used to return the hash value? (CRC32, something else?)

Something else. Actually it's an implementation detail, which does not need to use a fixed algorithm in different versions.
You can check the implementation in the open source version of Core Foundation. Note that NSData is toll-free bridged to CFDataRef. From http://opensource.apple.com/source/CF/CF-635.21/CFData.c:
static CFHashCode __CFDataHash(CFTypeRef cf) {
CFDataRef data = (CFDataRef)cf;
return CFHashBytes((uint8_t *)CFDataGetBytePtr(data), __CFMin(__CFDataLength(data), 80));
}
we see that the first 80 bytes are used to compute the hash. The function CFHashBytes is implemented as using the ELF hash algorithm:
#define ELF_STEP(B) T1 = (H << 4) + B; T2 = T1 & 0xF0000000; if (T2) T1 ^= (T2 >> 24); T1 &= (~T2); H = T1;
CFHashCode CFHashBytes(uint8_t *bytes, CFIndex length) {
/* The ELF hash algorithm, used in the ELF object file format */
UInt32 H = 0, T1, T2;
SInt32 rem = length;
while (3 < rem) {
ELF_STEP(bytes[length - rem]);
ELF_STEP(bytes[length - rem + 1]);
ELF_STEP(bytes[length - rem + 2]);
ELF_STEP(bytes[length - rem + 3]);
rem -= 4;
}
switch (rem) {
case 3: ELF_STEP(bytes[length - 3]);
case 2: ELF_STEP(bytes[length - 2]);
case 1: ELF_STEP(bytes[length - 1]);
case 0: ;
}
return H;
}
#undef ELF_STEP

Related

c++ code convert in python with ctypes and mmap

I am trying to convert c++ code into python.
int fd_vdma = open("/dev/mem", O_RDWR|O_SYNC); // open uiox device for vdma access
if (fd_vdma < 1) {
printf("Invalid mem device file\n");
}
// mmap the vdma device for vdma access
unsigned int *ptr_vdma;
ptr_vdma = (unsigned int*)mmap(NULL, VDMA_MAP_SIZE,PROT_READ|PROT_WRITE,
MAP_SHARED, fd_vdma, VDMA_ADDR);
printf("DMA 1 virtual address: 0x%08x \n",ptr_vdma);
*(ptr_vdma+5) = FRBUF_ADDR_0;
*(ptr_vdma+7) = 2; // use internal fifos to trigger xfer
*(ptr_vdma+8) = 20480;
*(ptr_vdma+6) = (75 << 16) + (HORIZ_PIXELS_SMALL+75);
*(ptr_vdma+0x0D) = 200; // no. FIFO threshhold .. max.. 240
I have done so far,
# vdm memory check
try:
fd_vdm_path = "/dev/mem"
mode = "rb+"
fd_vdm = open(fd_vdm_path, mode)
# print(fd_vdm.fileno())
print("[INFO] " + fd_vdm_path + " checked")
except Exception as error:
print("{}".format(error))
# mmap the VDMA device for VDM access
vdma_buf = mmap.mmap(fd_vdm.fileno(), int(VDMA_MAP_SIZE),
mmap.MAP_SHARED, mmap.PROT_READ|mmap.PROT_WRITE, 0)
ptr_vdm = ctypes.c_uint.from_buffer(vdma_buf)
print(type(ctypes.addressof(ptr_vdm)))
print("[INFO] " + fd_vdm_path + " has allocated virtual address : " +
hex(ctypes.addressof(ptr_vdm)))
but I have stuck here, I do not know how to perform below operations in python.
*(ptr_vdma+5) = FRBUF_ADDR_0;
*(ptr_vdma+7) = 2; // use internal fifos to trigger xfer
*(ptr_vdma+8) = 20480;
*(ptr_vdma+6) = (75 << 16) + (HORIZ_PIXELS_SMALL+75);
*(ptr_vdma+0x0D) = 200; // no. FIFO threshhold .. max.. 240
I have tried to vdma_buf.write(FRBUF_ADDR_0) but it is not working and I could not find anything on the internet (possibly I did not search it well)
can you please help with this problem or else suggest me some links or tutorials?
You can use memoryview and struct.pack:
...
mm = memoryview(vdma_buf)
mm[5*4:6*4] = struct.pack("I", FRBUF_ADDR_0)
mm[7*4:8*4] = struct.pack("I", 2)
mm[8*4:9*4] = struct.pack("I", 20480)
mm[6*4:7*4] = struct.pack("I", (75 << 16) + (HORIZ_PIXELS_SMALL+75))
mm[0x0D*4:0x0E*4] = struct.pack("I", 200)
memoryview represents sequence of bytes, and if you want to write some other type to memory, you must convert it to bytes using struct.pack.
So, let's say you want to to write an unsigned int at index N.
First, you call struct.pack("I", ...) to get int's byte representation. Then you must calculate destination memory address. Because unsigned int's size is 4 bytes, the address is equal to N*4.
memoryview supports slicing so you can just write:
mm[StartAddress:EndAddress] = BytesYouWantToWrite
where StartAddress is equal to N*4 and EndAddress is (N+1) * 4

Multiple markers at this line(C++) Regarding function

I'm getting the above error in my code.
File Scope Prototypes
static void pressure_val_update(void);
The above prototype is been used in below function
void ui_vcr_menu_update(const MENU_CONTROL_T *p, UINT8 HAL)
{
pressure_val_update();
ratio_val_update();
pressure_unit_update();
}
Below is the function definition
static void pressure_val_update(void)
{
UINT8 fl_pressure_value_U8;
/* Get the Pressure value from RTE */
Rte_Read_rpVCRDisplayValue_Pressure(&fl_pressure_value_U8);
/* Case 1 : Step 0 - Step 16 */
if(fl_pressure_value_U8 < 17U)
{
l_pressure_value_S16 = (-270 + (fl_pressure_value_U8 * 5.4));
rbmp_U8 = DISABLE
l_pressure_lbbmp_U8 = IMAGE_ENABLE;
l_pressure_ltbmp_U8 = DISABLE
}
/* Case 2 : Step 17 - Step 33 */
else if((fl_pressure_value_U8 >= 17U) && (fl_pressure_value_U8 < 34U))
{
l_pressure_value_S16 = (-270 + (fl_pressure_value_U8 * 5.4));
rbmp_U8 = DISABLE
l_pressure_lbbmp_U8 = IMAGE_ENABLE;
l_pressure_ltbmp_U8 = IMAGE_ENABLE;
}
/* Case 3 : Step 34 - Step 50 */
else if((fl_pressure_value_U8 >= 34U) && (fl_pressure_value_U8 < 51U))
{
l_pressure_value_S16 = (-270 + (fl_pressure_value_U8 * 5.4));
rbmp_U8 = IMAGE_ENABLE;
l_pressure_lbbmp_U8 = IMAGE_ENABLE;
l_pressure_ltbmp_U8 = IMAGE_ENABLE;
}
else
{
l_pressure_value_S16 = -270;
rbmp_U8 = DISABLE
l_pressure_lbbmp_U8 = DISABLE
l_pressure_ltbmp_U8 = DISABLE
}
}
/*
How to solve this, Why do it get multiple markers at this line (static void pressure_val_update(void)).
I am working application projects and facing this issue.
I have gone through other internet solutions but those are not related to me.
Please let me know the exact reason why we will get this warning (I got this warning in coverity analysis)

Error C2668: 'boost::bind' : ambiguous call to overloaded function

I am trying to build Quantlib on VS2013 in the Release x64 mode.
I added the Boost libraries using Property Manager and then Went to solutions explorer and clicked on Build.
The final output was: Build: 18 succeeded, 1 failed. 0 up-to-date, 0 skipped.
When I double clicked on the error this file opened up (convolvedstudentt.cpp)
Copyright (C) 2014 Jose Aparicio
This file is part of QuantLib, a free-software/open-source library
for financial quantitative analysts and developers - http://quantlib.org/
QuantLib is free software: you can redistribute it and/or modify it
under the terms of the QuantLib license. You should have received a
copy of the license along with this program; if not, please email
<quantlib-dev#lists.sf.net>. The license is also available online at
<http://quantlib.org/license.shtml>.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the license for more details.
*/
#include <ql/experimental/math/convolvedstudentt.hpp>
#include <ql/errors.hpp>
#include <ql/math/factorial.hpp>
#include <ql/math/distributions/normaldistribution.hpp>
#include <ql/math/solvers1d/brent.hpp>
#include <boost/function.hpp>
#if defined(__GNUC__) && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)) || (__GNUC__ > 4))
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-local-typedefs"
#endif
#include <boost/bind.hpp>
#include <boost/math/distributions/students_t.hpp>
#if defined(__GNUC__) && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 8)) || (__GNUC__ > 4))
#pragma GCC diagnostic pop
#endif
namespace QuantLib {
CumulativeBehrensFisher::CumulativeBehrensFisher(
const std::vector<Integer>& degreesFreedom,
const std::vector<Real>& factors
)
: degreesFreedom_(degreesFreedom), factors_(factors),
polyConvolved_(std::vector<Real>(1, 1.)), // value to start convolution
a_(0.)
{
QL_REQUIRE(degreesFreedom.size() == factors.size(),
"Incompatible sizes in convolution.");
for(Size i=0; i<degreesFreedom.size(); i++) {
QL_REQUIRE(degreesFreedom[i]%2 != 0,
"Even degree of freedom not allowed");
QL_REQUIRE(degreesFreedom[i] >= 0,
"Negative degree of freedom not allowed");
}
for(Size i=0; i<degreesFreedom_.size(); i++)
polynCharFnc_.push_back(polynCharactT((degreesFreedom[i]-1)/2));
// adjust the polynomial coefficients by the factors in the linear
// combination:
for(Size i=0; i<degreesFreedom_.size(); i++) {
Real multiplier = 1.;
for(Size k=1; k<polynCharFnc_[i].size(); k++) {
multiplier *= std::abs(factors_[i]);
polynCharFnc_[i][k] *= multiplier;
}
}
//convolution, here it is a product of polynomials and exponentials
for(Size i=0; i<polynCharFnc_.size(); i++)
polyConvolved_ =
convolveVectorPolynomials(polyConvolved_, polynCharFnc_[i]);
// trim possible zeros that might have arised:
std::vector<Real>::reverse_iterator it = polyConvolved_.rbegin();
while(it != polyConvolved_.rend()) {
if(*it == 0.) {
polyConvolved_.pop_back();
it = polyConvolved_.rbegin();
}else{
break;
}
}
// cache 'a' value (the exponent)
for(Size i=0; i<degreesFreedom_.size(); i++)
a_ += std::sqrt(static_cast<Real>(degreesFreedom_[i]))
* std::abs(factors_[i]);
a2_ = a_ * a_;
}
Disposable<std::vector<Real> >
CumulativeBehrensFisher::polynCharactT(Natural n) const {
Natural nu = 2 * n +1;
std::vector<Real> low(1,1.), high(1,1.);
high.push_back(std::sqrt(static_cast<Real>(nu)));
if(n==0) return low;
if(n==1) return high;
for(Size k=1; k<n; k++) {
std::vector<Real> recursionFactor(1,0.); // 0 coef
recursionFactor.push_back(0.); // 1 coef
recursionFactor.push_back(nu/((2.*k+1.)*(2.*k-1.))); // 2 coef
std::vector<Real> lowUp =
convolveVectorPolynomials(recursionFactor, low);
//add them up:
for(Size i=0; i<high.size(); i++)
lowUp[i] += high[i];
low = high;
high = lowUp;
}
return high;
}
Disposable<std::vector<Real> >
CumulativeBehrensFisher::convolveVectorPolynomials(
const std::vector<Real>& v1,
const std::vector<Real>& v2) const {
#if defined(QL_EXTRA_SAFETY_CHECKS)
QL_REQUIRE(!v1.empty() && !v2.empty(),
"Incorrect vectors in polynomial.");
#endif
const std::vector<Real>& shorter = v1.size() < v2.size() ? v1 : v2;
const std::vector<Real>& longer = (v1 == shorter) ? v2 : v1;
Size newDegree = v1.size()+v2.size()-2;
std::vector<Real> resultB(newDegree+1, 0.);
for(Size polyOrdr=0; polyOrdr<resultB.size(); polyOrdr++) {
for(Size i=std::max<Integer>(0, polyOrdr-longer.size()+1);
i<=std::min(polyOrdr, shorter.size()-1); i++)
resultB[polyOrdr] += shorter[i]*longer[polyOrdr-i];
}
return resultB;
}
Probability CumulativeBehrensFisher::operator()(const Real x) const {
// 1st & 0th terms with the table integration
Real integral = polyConvolved_[0] * std::atan(x/a_);
Real squared = a2_ + x*x;
Real rootsqr = std::sqrt(squared);
Real atan2xa = std::atan2(-x,a_);
if(polyConvolved_.size()>1)
integral += polyConvolved_[1] * x/squared;
for(Size exponent = 2; exponent <polyConvolved_.size(); exponent++) {
integral -= polyConvolved_[exponent] *
Factorial::get(exponent-1) * std::sin((exponent)*atan2xa)
/std::pow(rootsqr, static_cast<Real>(exponent));
}
return .5 + integral / M_PI;
}
Probability
CumulativeBehrensFisher::density(const Real x) const {
Real squared = a2_ + x*x;
Real integral = polyConvolved_[0] * a_ / squared;
Real rootsqr = std::sqrt(squared);
Real atan2xa = std::atan2(-x,a_);
for(Size exponent=1; exponent <polyConvolved_.size(); exponent++) {
integral += polyConvolved_[exponent] *
Factorial::get(exponent) * std::cos((exponent+1)*atan2xa)
/std::pow(rootsqr, static_cast<Real>(exponent+1) );
}
return integral / M_PI;
}
InverseCumulativeBehrensFisher::InverseCumulativeBehrensFisher(
const std::vector<Integer>& degreesFreedom,
const std::vector<Real>& factors,
Real accuracy)
: normSqr_(std::inner_product(factors.begin(), factors.end(),
factors.begin(), 0.)),
accuracy_(accuracy), distrib_(degreesFreedom, factors) { }
Real InverseCumulativeBehrensFisher::operator()(const Probability q) const {
Probability effectiveq;
Real sign;
// since the distrib is symmetric solve only on the right side:
if(q==0.5) {
return 0.;
}else if(q < 0.5) {
sign = -1.;
effectiveq = 1.-q;
}else{
sign = 1.;
effectiveq = q;
}
Real xMin =
InverseCumulativeNormal::standard_value(effectiveq) * normSqr_;
// inversion will fail at the Brent's bounds-check if this is not enough
// (q is very close to 1.), in a bad combination fails around 1.-1.e-7
Real xMax = 1.e6;
return sign *
Brent().solve(boost::bind(std::bind2nd(std::minus<Real>(),
effectiveq), boost::bind<Real>(
&CumulativeBehrensFisher::operator (),
distrib_, _1)), accuracy_, (xMin+xMax)/2., xMin, xMax);
}
}
The error seems to be in the third line from the bottom. That's the one that's highlighted.
effectiveq), boost::bind<Real>(
&CumulativeBehrensFisher::operator (),
distrib_, _1)), accuracy_, (xMin+xMax)/2., xMin, xMax);
When I hover a mouse over it, it says
Error: more than one instance of overloaded function "boost::bind" matches the argument list: function template "boost_bi::bind_t " etc. Please see the attached screenshot
How can I fix this? Please help.
This came up quite a few times lately on the QuantLib mailing list. In short, the code worked with Boost 1.57 (the latest version at the time of the QuantLib 1.5 release) but broke with Boost 1.58.
There's a fix for this in the QuantLib master branch on GitHub, but it hasn't made it into a release yet. If you want to (or have to) use Boost 1.58, you can check out the latest code from there. If you want to use a released QuantLib version instead, the workaround is to downgrade to Boost 1.57.

GIF LZW decompression

I am trying to implement a simple Gif-Reader in c++.
I currently stuck with decompressing the Imagedata.
If an image includes a Clear Code my decompression algorithm fails.
After the Clear Code I rebuild the CodeTable reset the CodeSize to MinimumLzwCodeSize + 1.
Then I read the next code and add it to the indexstream. The problem is that after clearing, the next codes include values greater than the size of the current codetable.
For example the sample file from wikipedia: rotating-earth.gif has a code value of 262 but the GlobalColorTable is only 256. How do I handle this?
I implemented the lzw decompression according to gif spec..
here is the main code part of decompressing:
int prevCode = GetCode(ptr, offset, codeSize);
codeStream.push_back(prevCode);
while (true)
{
auto code = GetCode(ptr, offset, codeSize);
//
//Clear code
//
if (code == IndexClearCode)
{
//reset codesize
codeSize = blockA.LZWMinimumCodeSize + 1;
currentNodeValue = pow(2, codeSize) - 1;
//reset codeTable
codeTable.resize(colorTable.size() + 2);
//read next code
prevCode = GetCode(ptr, offset, codeSize);
codeStream.push_back(prevCode);
continue;
}
else if (code == IndexEndOfInformationCode)
break;
//exists in dictionary
if (codeTable.size() > code)
{
if (prevCode >= codeTable.size())
{
prevCode = code;
continue;
}
for (auto c : codeTable[code])
codeStream.push_back(c);
newEntry = codeTable[prevCode];
newEntry.push_back(codeTable[code][0]);
codeTable.push_back(newEntry);
prevCode = code;
if (codeTable.size() - 1 == currentNodeValue)
{
codeSize++;
currentNodeValue = pow(2, codeSize) - 1;
}
}
else
{
if (prevCode >= codeTable.size())
{
prevCode = code;
continue;
}
newEntry = codeTable[prevCode];
newEntry.push_back(codeTable[prevCode][0]);
for (auto c : newEntry)
codeStream.push_back(c);
codeTable.push_back(newEntry);
prevCode = codeTable.size() - 1;
if (codeTable.size() - 1 == currentNodeValue)
{
codeSize++;
currentNodeValue = pow(2, codeSize) - 1;
}
}
}
Found the solution.
It is called Deferred clear code. So when I check if the codeSize needs to be incremented I also need to check if the codeSize is already max(12), as it is possible to to get codes that are of the maximum Code Size. See spec-gif89a.txt.
if (codeTable.size() - 1 == currentNodeValue && codeSize < 12)
{
codeSize++;
currentNodeValue = (1 << codeSize) - 1;
}

How to decode huffman code quickly?

I have implementated a simple compressor using pure huffman code under Windows.But I do not know much about how to decode the compressed file quickly,my bad algorithm is:
Enumerate all the huffman code in the code table then compare it with the bits in the compressed file.It turns out horrible result:decompressing 3MB file would need 6 hours.
Could you provide a much more efficient algorithm?Should I use Hash or something?
Update:
I have implementated the decoder with state table,based on my friend Lin's advice.I think this method should be better than travesal huffman tree,3MB within 6s.
thanks.
One way to optimise the binary-tree approach is to use a lookup table. You arrange the table so that you can look up a particular encoded bit-pattern directly, allowing for the maximum possible bit-width of any code.
Since most codes don't use the full maximum width, they are included at multiple locations in the table - one location for each combination of the unused bits. The table indicates how many bits to discard from the input as well as the decoded output.
If the longest code is too long, so the table is impractical, a compromise is to use a tree of smaller fixed-width-subscript lookups. For example, you can use a 256-item table to handle a byte. If the input code is more than 8 bits, the table entry indicates that decoding is incomplete and directs you to a table that handles the next up-to 8 bits. Larger tables trade memory for speed - 256 items is probably too small.
I believe this general approach is called "prefix tables", and is what BobMcGees quoted code is doing. A likely difference is that some compression algorithms require the prefix table to be updated during decompression - this is not needed for simple Huffman. IIRC, I first saw it in a book about bitmapped graphics file formats which included GIF, some time before the patent panic.
It should be easy to precalculate either a full lookup table, a hashtable equivalent, or a tree-of-small-tables from a binary tree model. The binary tree is still the key representation (mental model) of how the code works - this lookup table is just an optimised way to implement it.
Why not take a look at how the GZIP source does it, specifically the Huffman decompression code in specifically unpack.c? It's doing exactly what you are, except it's doing it much, much faster.
From what I can tell, it's using a lookup array and shift/mask operations operating on whole words to run faster. Pretty dense code though.
EDIT: here is the complete source
/* unpack.c -- decompress files in pack format.
* Copyright (C) 1992-1993 Jean-loup Gailly
* This is free software; you can redistribute it and/or modify it under the
* terms of the GNU General Public License, see the file COPYING.
*/
#ifdef RCSID
static char rcsid[] = "$Id: unpack.c,v 1.4 1993/06/11 19:25:36 jloup Exp $";
#endif
#include "tailor.h"
#include "gzip.h"
#include "crypt.h"
#define MIN(a,b) ((a) <= (b) ? (a) : (b))
/* The arguments must not have side effects. */
#define MAX_BITLEN 25
/* Maximum length of Huffman codes. (Minor modifications to the code
* would be needed to support 32 bits codes, but pack never generates
* more than 24 bits anyway.)
*/
#define LITERALS 256
/* Number of literals, excluding the End of Block (EOB) code */
#define MAX_PEEK 12
/* Maximum number of 'peek' bits used to optimize traversal of the
* Huffman tree.
*/
local ulg orig_len; /* original uncompressed length */
local int max_len; /* maximum bit length of Huffman codes */
local uch literal[LITERALS];
/* The literal bytes present in the Huffman tree. The EOB code is not
* represented.
*/
local int lit_base[MAX_BITLEN+1];
/* All literals of a given bit length are contiguous in literal[] and
* have contiguous codes. literal[code+lit_base[len]] is the literal
* for a code of len bits.
*/
local int leaves [MAX_BITLEN+1]; /* Number of leaves for each bit length */
local int parents[MAX_BITLEN+1]; /* Number of parents for each bit length */
local int peek_bits; /* Number of peek bits currently used */
/* local uch prefix_len[1 << MAX_PEEK]; */
#define prefix_len outbuf
/* For each bit pattern b of peek_bits bits, prefix_len[b] is the length
* of the Huffman code starting with a prefix of b (upper bits), or 0
* if all codes of prefix b have more than peek_bits bits. It is not
* necessary to have a huge table (large MAX_PEEK) because most of the
* codes encountered in the input stream are short codes (by construction).
* So for most codes a single lookup will be necessary.
*/
#if (1<<MAX_PEEK) > OUTBUFSIZ
error cannot overlay prefix_len and outbuf
#endif
local ulg bitbuf;
/* Bits are added on the low part of bitbuf and read from the high part. */
local int valid; /* number of valid bits in bitbuf */
/* all bits above the last valid bit are always zero */
/* Set code to the next 'bits' input bits without skipping them. code
* must be the name of a simple variable and bits must not have side effects.
* IN assertions: bits <= 25 (so that we still have room for an extra byte
* when valid is only 24), and mask = (1<<bits)-1.
*/
#define look_bits(code,bits,mask) \
{ \
while (valid < (bits)) bitbuf = (bitbuf<<8) | (ulg)get_byte(), valid += 8; \
code = (bitbuf >> (valid-(bits))) & (mask); \
}
/* Skip the given number of bits (after having peeked at them): */
#define skip_bits(bits) (valid -= (bits))
#define clear_bitbuf() (valid = 0, bitbuf = 0)
/* Local functions */
local void read_tree OF((void));
local void build_tree OF((void));
/* ===========================================================================
* Read the Huffman tree.
*/
local void read_tree()
{
int len; /* bit length */
int base; /* base offset for a sequence of leaves */
int n;
/* Read the original input size, MSB first */
orig_len = 0;
for (n = 1; n <= 4; n++) orig_len = (orig_len << 8) | (ulg)get_byte();
max_len = (int)get_byte(); /* maximum bit length of Huffman codes */
if (max_len > MAX_BITLEN) {
error("invalid compressed data -- Huffman code > 32 bits");
}
/* Get the number of leaves at each bit length */
n = 0;
for (len = 1; len <= max_len; len++) {
leaves[len] = (int)get_byte();
n += leaves[len];
}
if (n > LITERALS) {
error("too many leaves in Huffman tree");
}
Trace((stderr, "orig_len %ld, max_len %d, leaves %d\n",
orig_len, max_len, n));
/* There are at least 2 and at most 256 leaves of length max_len.
* (Pack arbitrarily rejects empty files and files consisting of
* a single byte even repeated.) To fit the last leaf count in a
* byte, it is offset by 2. However, the last literal is the EOB
* code, and is not transmitted explicitly in the tree, so we must
* adjust here by one only.
*/
leaves[max_len]++;
/* Now read the leaves themselves */
base = 0;
for (len = 1; len <= max_len; len++) {
/* Remember where the literals of this length start in literal[] : */
lit_base[len] = base;
/* And read the literals: */
for (n = leaves[len]; n > 0; n--) {
literal[base++] = (uch)get_byte();
}
}
leaves[max_len]++; /* Now include the EOB code in the Huffman tree */
}
/* ===========================================================================
* Build the Huffman tree and the prefix table.
*/
local void build_tree()
{
int nodes = 0; /* number of nodes (parents+leaves) at current bit length */
int len; /* current bit length */
uch *prefixp; /* pointer in prefix_len */
for (len = max_len; len >= 1; len--) {
/* The number of parent nodes at this level is half the total
* number of nodes at parent level:
*/
nodes >>= 1;
parents[len] = nodes;
/* Update lit_base by the appropriate bias to skip the parent nodes
* (which are not represented in the literal array):
*/
lit_base[len] -= nodes;
/* Restore nodes to be parents+leaves: */
nodes += leaves[len];
}
/* Construct the prefix table, from shortest leaves to longest ones.
* The shortest code is all ones, so we start at the end of the table.
*/
peek_bits = MIN(max_len, MAX_PEEK);
prefixp = &prefix_len[1<<peek_bits];
for (len = 1; len <= peek_bits; len++) {
int prefixes = leaves[len] << (peek_bits-len); /* may be 0 */
while (prefixes--) *--prefixp = (uch)len;
}
/* The length of all other codes is unknown: */
while (prefixp > prefix_len) *--prefixp = 0;
}
/* ===========================================================================
* Unpack in to out. This routine does not support the old pack format
* with magic header \037\037.
*
* IN assertions: the buffer inbuf contains already the beginning of
* the compressed data, from offsets inptr to insize-1 included.
* The magic header has already been checked. The output buffer is cleared.
*/
int unpack(in, out)
int in, out; /* input and output file descriptors */
{
int len; /* Bit length of current code */
unsigned eob; /* End Of Block code */
register unsigned peek; /* lookahead bits */
unsigned peek_mask; /* Mask for peek_bits bits */
ifd = in;
ofd = out;
read_tree(); /* Read the Huffman tree */
build_tree(); /* Build the prefix table */
clear_bitbuf(); /* Initialize bit input */
peek_mask = (1<<peek_bits)-1;
/* The eob code is the largest code among all leaves of maximal length: */
eob = leaves[max_len]-1;
Trace((stderr, "eob %d %x\n", max_len, eob));
/* Decode the input data: */
for (;;) {
/* Since eob is the longest code and not shorter than max_len,
* we can peek at max_len bits without having the risk of reading
* beyond the end of file.
*/
look_bits(peek, peek_bits, peek_mask);
len = prefix_len[peek];
if (len > 0) {
peek >>= peek_bits - len; /* discard the extra bits */
} else {
/* Code of more than peek_bits bits, we must traverse the tree */
ulg mask = peek_mask;
len = peek_bits;
do {
len++, mask = (mask<<1)+1;
look_bits(peek, len, mask);
} while (peek < (unsigned)parents[len]);
/* loop as long as peek is a parent node */
}
/* At this point, peek is the next complete code, of len bits */
if (peek == eob && len == max_len) break; /* end of file? */
put_ubyte(literal[peek+lit_base[len]]);
Tracev((stderr,"%02d %04x %c\n", len, peek,
literal[peek+lit_base[len]]));
skip_bits(len);
} /* for (;;) */
flush_window();
Trace((stderr, "bytes_out %ld\n", bytes_out));
if (orig_len != (ulg)bytes_out) {
error("invalid compressed data--length error");
}
return OK;
}
The typical way to decompress a Huffman code is using a binary tree. You insert your codes in the tree, so that each bit in a code represents a branch either to the left (0) or right (1), with decoded bytes (or whatever values you have) in the leaves.
Decoding is then just a case of reading bits from the coded content, walking the tree for each bit. When you reach a leaf, emit that decoded value, and keep reading until the input is exhausted.
Update: this page describes the technique, and has fancy graphics.
You can perform a kind of batch lookup on the usual Huffmann tree lookup:
Choosing a bit depth (call it depth n); this is a trade-off between speed, memory, and time investment to construct tables;
Build a lookup table for all 2^n bit strings of length n. Each entry may encode several complete tokens; there will commonly also be some bits left over that are only a prefix of Huffman codes: for each of these, make a link to a further lookup table for that code;
Build the further lookup tables. The total number of tables is at most one less than the number of entries coded in the Huffmann tree.
Choosing a depth that is a multiple of four, e.g., depth 8, is a good fit for bit shifting operations.
Postscript This differs from the idea in potatoswatter's comment on unwind's answer and from Steve314's answer in using multiple tables: this means that all of the n-bit lookup is put to use, so should be faster but makes table construction and lookup significantly trickier, and will consume much more space for a given depth.
Why not use the decompress algorithm in the same source module? It appears to be a decent algorithm.
The other answers are right, but here is some code in Rust I wrote recently to make the ideas concrete. This is the key routine:
fn decode( &self, input: &mut InpBitStream ) -> usize
{
let mut sym = self.lookup[ input.peek( self.peekbits ) ];
if sym >= self.ncode
{
sym = self.lookup[ sym - self.ncode + ( input.peek( self.maxbits ) >> self.peekbits ) ];
}
input.advance( self.nbits[ sym ] as usize );
sym
}
The tricky bit is setting up the lookup table, see BitDecoder::setup_code in this complete RFC 1951 decoder in Rust:
// RFC 1951 inflate ( de-compress ).
pub fn inflate( data: &[u8] ) -> Vec<u8>
{
let mut inp = InpBitStream::new( &data );
let mut out = Vec::new();
let _chk = inp.get_bits( 16 ); // Checksum
loop
{
let last = inp.get_bit();
let btype = inp.get_bits( 2 );
match btype
{
2 => { do_dyn( &mut inp, &mut out ); }
1 => { do_fixed( &mut inp, &mut out ); }
0 => { do_copy( &mut inp, &mut out ); }
_ => { }
}
if last != 0 { break; }
}
out
}
fn do_dyn( inp: &mut InpBitStream, out: &mut Vec<u8> )
{
let n_lit_code = 257 + inp.get_bits( 5 );
let n_dist_code = 1 + inp.get_bits( 5 );
let n_len_code = 4 + inp.get_bits( 4 );
let mut len = LenDecoder::new( inp, n_len_code );
let mut lit = BitDecoder::new( n_lit_code );
len.get_lengths( inp, &mut lit.nbits );
lit.init();
let mut dist = BitDecoder::new( n_dist_code );
len.get_lengths( inp, &mut dist.nbits );
dist.init();
loop
{
let x = lit.decode( inp );
match x
{
0..=255 => { out.push( x as u8 ); }
256 => { break; }
_ =>
{
let mc = x - 257;
let length = MATCH_OFF[ mc ] + inp.get_bits( MATCH_EXTRA[ mc ] as usize );
let dc = dist.decode( inp );
let distance = DIST_OFF[ dc ] + inp.get_bits( DIST_EXTRA[ dc ] as usize );
copy( out, distance, length );
}
}
}
} // end do_dyn
fn copy( out: &mut Vec<u8>, distance: usize, mut length: usize )
{
let mut i = out.len() - distance;
while length > 0
{
out.push( out[ i ] );
i += 1;
length -= 1;
}
}
/// Decode length-limited Huffman codes.
struct BitDecoder
{
ncode: usize,
nbits: Vec<u8>,
maxbits: usize,
peekbits: usize,
lookup: Vec<usize>
}
impl BitDecoder
{
fn new( ncode: usize ) -> BitDecoder
{
BitDecoder
{
ncode,
nbits: vec![0; ncode],
maxbits: 0,
peekbits: 0,
lookup: Vec::new()
}
}
/// The key routine, will be called many times.
fn decode( &self, input: &mut InpBitStream ) -> usize
{
let mut sym = self.lookup[ input.peek( self.peekbits ) ];
if sym >= self.ncode
{
sym = self.lookup[ sym - self.ncode + ( input.peek( self.maxbits ) >> self.peekbits ) ];
}
input.advance( self.nbits[ sym ] as usize );
sym
}
fn init( &mut self )
{
let ncode = self.ncode;
let mut max_bits : usize = 0;
for bp in &self.nbits
{
let bits = *bp as usize;
if bits > max_bits { max_bits = bits; }
}
self.maxbits = max_bits;
self.peekbits = if max_bits > 8 { 8 } else { max_bits };
self.lookup.resize( 1 << self.peekbits, 0 );
// Code below is from rfc1951 page 7
let mut bl_count : Vec<usize> = vec![ 0; max_bits + 1 ]; // the number of codes of length N, N >= 1.
for i in 0..ncode { bl_count[ self.nbits[i] as usize ] += 1; }
let mut next_code : Vec<usize> = vec![ 0; max_bits + 1 ];
let mut code = 0;
bl_count[0] = 0;
for i in 0..max_bits
{
code = ( code + bl_count[i] ) << 1;
next_code[ i + 1 ] = code;
}
for i in 0..ncode
{
let len = self.nbits[ i ] as usize;
if len != 0
{
self.setup_code( i, len, next_code[ len ] );
next_code[ len ] += 1;
}
}
}
// Decoding is done using self.lookup ( see decode ). To keep the lookup table small,
// codes longer than 8 bits are looked up in two peeks.
fn setup_code( &mut self, sym: usize, len: usize, mut code: usize )
{
if len <= self.peekbits
{
let diff = self.peekbits - len;
for i in code << diff .. (code << diff) + (1 << diff)
{
// bits are reversed to match InpBitStream::peek
let r = reverse( i, self.peekbits );
self.lookup[ r ] = sym;
}
} else {
// Secondary lookup required.
let peekbits2 = self.maxbits - self.peekbits;
// Split code into peekbits portion ( key ) and remainder ( code).
let diff1 = len - self.peekbits;
let key = code >> diff1;
code &= ( 1 << diff1 ) - 1;
// Get the secondary lookup.
let kr = reverse( key, self.peekbits );
let mut base = self.lookup[ kr ];
if base == 0 // Secondary lookup not yet allocated for this key.
{
base = self.lookup.len();
self.lookup.resize( base + ( 1 << peekbits2 ), 0 );
self.lookup[ kr ] = self.ncode + base;
} else {
base -= self.ncode;
}
// Set the secondary lookup values.
let diff = self.maxbits - len;
for i in code << diff .. (code << diff) + (1<<diff)
{
let r = reverse( i, peekbits2 );
self.lookup[ base + r ] = sym;
}
}
}
} // end impl BitDecoder
struct InpBitStream<'a>
{
data: &'a [u8],
pos: usize,
buf: usize,
got: usize, // Number of bits in buffer.
}
impl <'a> InpBitStream<'a>
{
fn new( data: &'a [u8] ) -> InpBitStream
{
InpBitStream { data, pos: 0, buf: 1, got: 0 }
}
fn peek( &mut self, n: usize ) -> usize
{
while self.got < n
{
if self.pos < self.data.len()
{
self.buf |= ( self.data[ self.pos ] as usize ) << self.got;
}
self.pos += 1;
self.got += 8;
}
self.buf & ( ( 1 << n ) - 1 )
}
fn advance( &mut self, n:usize )
{
self.buf >>= n;
self.got -= n;
}
fn get_bit( &mut self ) -> usize
{
if self.got == 0 { self.peek( 1 ); }
let result = self.buf & 1;
self.advance( 1 );
result
}
fn get_bits( &mut self, n: usize ) -> usize
{
let result = self.peek( n );
self.advance( n );
result
}
fn get_huff( &mut self, mut n: usize ) -> usize
{
let mut result = 0;
while n > 0
{
result = ( result << 1 ) + self.get_bit();
n -= 1;
}
result
}
fn clear_bits( &mut self )
{
self.got = 0;
}
} // end impl InpBitStream
/// Decode code lengths.
struct LenDecoder
{
plenc: u8, // previous length code ( which can be repeated )
rep: usize, // repeat
bd: BitDecoder,
}
/// Decodes an array of lengths. There are special codes for repeats, and repeats of zeros.
impl LenDecoder
{
fn new( inp: &mut InpBitStream, n_len_code: usize ) -> LenDecoder
{
let mut result = LenDecoder { plenc: 0, rep:0, bd: BitDecoder::new( 19 ) };
// Read the array of 3-bit code lengths from input.
for i in 0..n_len_code
{
result.bd.nbits[ CLEN_ALPHABET[i] as usize ] = inp.get_bits(3) as u8;
}
result.bd.init();
result
}
// Per RFC1931 page 13, get array of code lengths.
fn get_lengths( &mut self, inp: &mut InpBitStream, result: &mut Vec<u8> )
{
let n = result.len();
let mut i = 0;
while self.rep > 0 { result[i] = self.plenc; i += 1; self.rep -= 1; }
while i < n
{
let lenc = self.bd.decode( inp ) as u8;
if lenc < 16
{
result[i] = lenc;
i += 1;
self.plenc = lenc;
} else {
if lenc == 16 { self.rep = 3 + inp.get_bits(2); }
else if lenc == 17 { self.rep = 3 + inp.get_bits(3); self.plenc=0; }
else if lenc == 18 { self.rep = 11 + inp.get_bits(7); self.plenc=0; }
while i < n && self.rep > 0 { result[i] = self.plenc; i += 1; self.rep -= 1; }
}
}
} // end get_lengths
} // end impl LenDecoder
/// Reverse a string of bits.
pub fn reverse( mut x:usize, mut bits: usize ) -> usize
{
let mut result: usize = 0;
while bits > 0
{
result = ( result << 1 ) | ( x & 1 );
x >>= 1;
bits -= 1;
}
result
}
fn do_copy( inp: &mut InpBitStream, out: &mut Vec<u8> )
{
inp.clear_bits(); // Discard any bits in the input buffer
let mut n = inp.get_bits( 16 );
let _n1 = inp.get_bits( 16 );
while n > 0 { out.push( inp.data[ inp.pos ] ); n -= 1; inp.pos += 1; }
}
fn do_fixed( inp: &mut InpBitStream, out: &mut Vec<u8> ) // RFC1951 page 12.
{
loop
{
// 0 to 23 ( 7 bits ) => 256 - 279; 48 - 191 ( 8 bits ) => 0 - 143;
// 192 - 199 ( 8 bits ) => 280 - 287; 400..511 ( 9 bits ) => 144 - 255
let mut x = inp.get_huff( 7 );
if x <= 23
{
x += 256;
} else {
x = ( x << 1 ) + inp.get_bit();
if x <= 191 { x -= 48; }
else if x <= 199 { x += 88; }
else { x = ( x << 1 ) + inp.get_bit() - 256; }
}
match x
{
0..=255 => { out.push( x as u8 ); }
256 => { break; }
_ => // 257 <= x && x <= 285
{
x -= 257;
let length = MATCH_OFF[x] + inp.get_bits( MATCH_EXTRA[ x ] as usize );
let dcode = inp.get_huff( 5 );
let distance = DIST_OFF[dcode] + inp.get_bits( DIST_EXTRA[dcode] as usize );
copy( out, distance, length );
}
}
}
} // end do_fixed
// RFC 1951 constants.
pub static CLEN_ALPHABET : [u8; 19] = [ 16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15 ];
pub static MATCH_OFF : [usize; 30] = [ 3,4,5,6, 7,8,9,10, 11,13,15,17, 19,23,27,31, 35,43,51,59,
67,83,99,115, 131,163,195,227, 258, 0xffff ];
pub static MATCH_EXTRA : [u8; 29] = [ 0,0,0,0, 0,0,0,0, 1,1,1,1, 2,2,2,2, 3,3,3,3, 4,4,4,4, 5,5,5,5, 0 ];
pub static DIST_OFF : [usize; 30] = [ 1,2,3,4, 5,7,9,13, 17,25,33,49, 65,97,129,193, 257,385,513,769,
1025,1537,2049,3073, 4097,6145,8193,12289, 16385,24577 ];
pub static DIST_EXTRA : [u8; 30] = [ 0,0,0,0, 1,1,2,2, 3,3,4,4, 5,5,6,6, 7,7,8,8, 9,9,10,10, 11,11,12,12, 13,13 ];
Github repository here