shithub: pokecrystal

Download patch

ref: e3bc378492992f0d7f8724c374d99b629f671219
parent: 4259e7051e2cc621267fc62d6ea11620ab995122
author: Rangi <remy.oukaour+rangi42@gmail.com>
date: Mon Dec 31 12:17:55 EST 2018

Keep gfx.py with other tools, and remove its dependency on the 'extras' submodule

diff: cannot open b/tools/pokemontools//null: file does not exist: 'b/tools/pokemontools//null'
--- a/gfx.py
+++ /dev/null
@@ -1,264 +1,0 @@
-"""Supplementary scripts for graphics conversion."""
-
-import os
-import argparse
-
-from extras.pokemontools import gfx, lz
-
-
-# Graphics with inverted tilemaps that aren't covered by filepath_rules.
-pics = [
-    'gfx/shrink1',
-    'gfx/shrink2',
-]
-
-def recursive_read(filename):
-    def recurse(filename_):
-        lines = []
-        for line in open(filename_):
-            if 'include "' in line.lower():
-                lines += recurse(line.split('"')[1])
-            else:
-                lines += [line]
-        return lines
-    lines = recurse(filename)
-    return ''.join(lines)
-
-base_stats = None
-def get_base_stats():
-    global base_stats
-    if not base_stats:
-        base_stats = recursive_read('data/base_stats.asm')
-    return base_stats
-
-def get_pokemon_dimensions(path):
-    try:
-        byte = bytearray(open(path, 'rb').read())[0]
-        width = byte & 0xf
-        height = (byte >> 8) & 0xf
-        return width, height
-    except:
-        return None
-
-
-def get_animation_frames(path=None, w=7, h=7, bitmask_path=None, frame_path=None):
-    """Retrieve animation frame tilemaps from generated frame/bitmask data."""
-    if not path:
-        path = bitmask_path
-    if not path:
-        path = frame_path
-    if not path:
-        raise Exception("need at least one of path, bitmask_path or frame_path")
-
-    if not bitmask_path:
-        bitmask_path = os.path.join(os.path.split(path)[0], 'bitmask.asm')
-    if not frame_path:
-        frame_path = os.path.join(os.path.split(path)[0], 'frames.asm')
-    bitmask_lines = open(bitmask_path).readlines()
-    frame_lines = open(frame_path).readlines()
-
-    bitmask_length = w * h
-
-    bitmasks = []
-    bitmask = []
-    for line in bitmask_lines:
-        if '\tdb ' in line:
-            value = line.split('\tdb ')[1].strip().replace('%', '0b')
-            value = int(value, 0)
-            #print line.strip(), value, len(bitmasks), len(bitmask)
-            for bit in xrange(8):
-                bitmask += [(value >> bit) & 1]
-                if len(bitmask) >= bitmask_length:
-                    bitmasks += [bitmask]
-                    bitmask = []
-                    break
-    if bitmask:
-        bitmasks += [bitmask]
-
-    frames = []
-    frame_labels = []
-    i = 0
-    for line in frame_lines:
-        if '\tdw ' in line:
-            frame_labels += [line.split('\tdw ')[1].strip()]
-        else:
-            for part in line.split():
-                part = part.strip()
-                if part in frame_labels:
-                    frames += [(part, i)]
-        i += 1
-
-    results = []
-
-    for label, i in frames:
-        result = []
-
-        # get the bitmask and tile ids for each frame
-        # don't care if we read past bounds, so just read the rest of the file
-        values = []
-        for line in frame_lines[i:]:
-            if '\tdb ' in line:
-                values += line.split('\tdb ')[1].split(';')[0].split(',')
-
-        #print bitmasks
-        #print values[0]
-        #print int(values[0].replace('$', '0x'), 0)
-        bitmask = bitmasks[int(values[0].replace('$', '0x'), 0)]
-        tiles = values[1:]
-        k = 0
-        j = 0
-        for bit in bitmask:
-            if bit:
-                result += [int(tiles[k].replace('$', '0x'), 0)]
-                k += 1
-            else:
-                result += [j]
-            j += 1
-
-        results += [result]
-
-    return results
-
-def get_animated_graphics(path, w=7, h=7, bitmask_path=None, frame_path=None):
-    frames = get_animation_frames(path, w, h, bitmask_path, frame_path)
-    new_path = path.replace('.animated.2bpp', '.2bpp')
-    tiles = gfx.get_tiles(bytearray(open(path, 'rb').read()))
-    new_tiles = tiles[:w * h]
-    for frame in frames:
-        for tile in frame:
-            new_tiles += [tiles[tile]]
-    new_graphic = gfx.connect(new_tiles)
-    print new_path, list(new_graphic)
-    open(new_path, 'wb').write(bytearray(new_graphic))
-    return new_path
-
-def filepath_rules(filepath):
-    """Infer attributes of certain graphics by their location in the filesystem."""
-    args = {}
-
-    filedir, filename = os.path.split(filepath)
-    if filedir.startswith('./'):
-        filedir = filedir[2:]
-
-    name, ext = os.path.splitext(filename)
-    if ext == '.lz':
-        name, ext = os.path.splitext(name)
-
-    pokemon_name = ''
-
-    if 'gfx/pics/' in filedir:
-        pokemon_name = filedir.split('/')[-1]
-        if pokemon_name.startswith('unown_'):
-            index = filedir.find(pokemon_name)
-            if index != -1:
-                filedir = filedir[:index + len('unown')] + filedir[index + len('unown_a'):]
-        if name == 'front' or name == 'front.animated':
-            args['pal_file'] = os.path.join(filedir, 'normal.pal')
-            args['pic'] = True
-            args['animate'] = True
-        elif name == 'back':
-            args['pal_file'] = os.path.join(filedir, 'normal.pal')
-            args['pic'] = True
-
-    elif 'gfx/trainers' in filedir:
-        args['pic'] = True
-
-    elif os.path.join(filedir, name) in pics:
-        args['pic'] = True
-
-    elif filedir == 'gfx/tilesets':
-        args['tileset'] = True
-
-    if args.get('pal_file'):
-        if os.path.exists(args['pal_file']):
-            args['palout'] = args['pal_file']
-        else:
-            del args['pal_file']
-
-    if args.get('pic'):
-        if ext == '.png':
-            w, h = gfx.png.Reader(filepath).asRGBA8()[:2]
-            w = min(w/8, h/8)
-            args['pic_dimensions'] = w, w
-        elif ext == '.2bpp':
-            if pokemon_name and name == 'front' or name == 'front.animated':
-                w, h = get_pokemon_dimensions(filepath.replace(ext, '.dimensions')) or (7, 7)
-                args['pic_dimensions'] = w, w
-            elif pokemon_name and name == 'back':
-                args['pic_dimensions'] = 6, 6
-            else:
-                args['pic_dimensions'] = 7, 7
-
-    if args.get('tileset'):
-        args['width'] = 128
-    return args
-
-
-def to_1bpp(filename, **kwargs):
-    name, ext = os.path.splitext(filename)
-    if   ext == '.1bpp': pass
-    elif ext == '.2bpp': gfx.export_2bpp_to_1bpp(filename, **kwargs)
-    elif ext == '.png':  gfx.export_png_to_1bpp(filename, **kwargs)
-    elif ext == '.lz':
-        decompress(filename, **kwargs)
-        to_1bpp(name, **kwargs)
-
-def to_2bpp(filename, **kwargs):
-    name, ext = os.path.splitext(filename)
-    if   ext == '.1bpp': gfx.export_1bpp_to_2bpp(filename, **kwargs)
-    elif ext == '.2bpp': pass
-    elif ext == '.png':  gfx.export_png_to_2bpp(filename, **kwargs)
-    elif ext == '.lz':
-        decompress(filename, **kwargs)
-        to_2bpp(name, **kwargs)
-
-def to_png(filename, **kwargs):
-    name, ext = os.path.splitext(filename)
-    if   ext == '.1bpp': gfx.export_1bpp_to_png(filename, **kwargs)
-    elif ext == '.2bpp' and name.endswith('.animated'):
-        w, h = kwargs.get('pic_dimensions') or (7, 7)
-        new_path = get_animated_graphics(filename, w=w, h=h)
-        return to_png(new_path, **kwargs)
-    elif ext == '.2bpp': gfx.export_2bpp_to_png(filename, **kwargs)
-    elif ext == '.png':  pass
-    elif ext == '.lz':
-        decompress(filename, **kwargs)
-        to_png(name, **kwargs)
-
-def compress(filename, **kwargs):
-    data = open(filename, 'rb').read()
-    lz_data = lz.Compressed(data).output
-    open(filename + '.lz', 'wb').write(bytearray(lz_data))
-
-def decompress(filename, **kwargs):
-    lz_data = open(filename, 'rb').read()
-    data = lz.Decompressed(lz_data).output
-    name, ext = os.path.splitext(filename)
-    open(name, 'wb').write(bytearray(data))
-
-
-methods = {
-    '2bpp': to_2bpp,
-    '1bpp': to_1bpp,
-    'png':  to_png,
-    'lz':   compress,
-    'unlz': decompress,
-}
-
-def main(method_name, filenames=None):
-    if filenames is None: filenames = []
-    for filename in filenames:
-        args = filepath_rules(filename)
-        method = methods.get(method_name)
-        if method:
-            method(filename, **args)
-
-def get_args():
-    ap = argparse.ArgumentParser()
-    ap.add_argument('method_name')
-    ap.add_argument('filenames', nargs='*')
-    args = ap.parse_args()
-    return args
-
-if __name__ == '__main__':
-    main(**get_args().__dict__)
--- /dev/null
+++ b/tools/gfx.py
@@ -1,0 +1,264 @@
+"""Supplementary scripts for graphics conversion."""
+
+import os
+import argparse
+
+from pokemontools import gfx, lz
+
+
+# Graphics with inverted tilemaps that aren't covered by filepath_rules.
+pics = [
+    'gfx/shrink1',
+    'gfx/shrink2',
+]
+
+def recursive_read(filename):
+    def recurse(filename_):
+        lines = []
+        for line in open(filename_):
+            if 'include "' in line.lower():
+                lines += recurse(line.split('"')[1])
+            else:
+                lines += [line]
+        return lines
+    lines = recurse(filename)
+    return ''.join(lines)
+
+base_stats = None
+def get_base_stats():
+    global base_stats
+    if not base_stats:
+        base_stats = recursive_read('data/base_stats.asm')
+    return base_stats
+
+def get_pokemon_dimensions(path):
+    try:
+        byte = bytearray(open(path, 'rb').read())[0]
+        width = byte & 0xf
+        height = (byte >> 8) & 0xf
+        return width, height
+    except:
+        return None
+
+
+def get_animation_frames(path=None, w=7, h=7, bitmask_path=None, frame_path=None):
+    """Retrieve animation frame tilemaps from generated frame/bitmask data."""
+    if not path:
+        path = bitmask_path
+    if not path:
+        path = frame_path
+    if not path:
+        raise Exception("need at least one of path, bitmask_path or frame_path")
+
+    if not bitmask_path:
+        bitmask_path = os.path.join(os.path.split(path)[0], 'bitmask.asm')
+    if not frame_path:
+        frame_path = os.path.join(os.path.split(path)[0], 'frames.asm')
+    bitmask_lines = open(bitmask_path).readlines()
+    frame_lines = open(frame_path).readlines()
+
+    bitmask_length = w * h
+
+    bitmasks = []
+    bitmask = []
+    for line in bitmask_lines:
+        if '\tdb ' in line:
+            value = line.split('\tdb ')[1].strip().replace('%', '0b')
+            value = int(value, 0)
+            #print line.strip(), value, len(bitmasks), len(bitmask)
+            for bit in xrange(8):
+                bitmask += [(value >> bit) & 1]
+                if len(bitmask) >= bitmask_length:
+                    bitmasks += [bitmask]
+                    bitmask = []
+                    break
+    if bitmask:
+        bitmasks += [bitmask]
+
+    frames = []
+    frame_labels = []
+    i = 0
+    for line in frame_lines:
+        if '\tdw ' in line:
+            frame_labels += [line.split('\tdw ')[1].strip()]
+        else:
+            for part in line.split():
+                part = part.strip()
+                if part in frame_labels:
+                    frames += [(part, i)]
+        i += 1
+
+    results = []
+
+    for label, i in frames:
+        result = []
+
+        # get the bitmask and tile ids for each frame
+        # don't care if we read past bounds, so just read the rest of the file
+        values = []
+        for line in frame_lines[i:]:
+            if '\tdb ' in line:
+                values += line.split('\tdb ')[1].split(';')[0].split(',')
+
+        #print bitmasks
+        #print values[0]
+        #print int(values[0].replace('$', '0x'), 0)
+        bitmask = bitmasks[int(values[0].replace('$', '0x'), 0)]
+        tiles = values[1:]
+        k = 0
+        j = 0
+        for bit in bitmask:
+            if bit:
+                result += [int(tiles[k].replace('$', '0x'), 0)]
+                k += 1
+            else:
+                result += [j]
+            j += 1
+
+        results += [result]
+
+    return results
+
+def get_animated_graphics(path, w=7, h=7, bitmask_path=None, frame_path=None):
+    frames = get_animation_frames(path, w, h, bitmask_path, frame_path)
+    new_path = path.replace('.animated.2bpp', '.2bpp')
+    tiles = gfx.get_tiles(bytearray(open(path, 'rb').read()))
+    new_tiles = tiles[:w * h]
+    for frame in frames:
+        for tile in frame:
+            new_tiles += [tiles[tile]]
+    new_graphic = gfx.connect(new_tiles)
+    print new_path, list(new_graphic)
+    open(new_path, 'wb').write(bytearray(new_graphic))
+    return new_path
+
+def filepath_rules(filepath):
+    """Infer attributes of certain graphics by their location in the filesystem."""
+    args = {}
+
+    filedir, filename = os.path.split(filepath)
+    if filedir.startswith('./'):
+        filedir = filedir[2:]
+
+    name, ext = os.path.splitext(filename)
+    if ext == '.lz':
+        name, ext = os.path.splitext(name)
+
+    pokemon_name = ''
+
+    if 'gfx/pokemon/' in filedir:
+        pokemon_name = filedir.split('/')[-1]
+        if pokemon_name.startswith('unown_'):
+            index = filedir.find(pokemon_name)
+            if index != -1:
+                filedir = filedir[:index + len('unown')] + filedir[index + len('unown_a'):]
+        if name == 'front' or name == 'front.animated':
+            args['pal_file'] = os.path.join(filedir, 'normal.pal')
+            args['pic'] = True
+            args['animate'] = True
+        elif name == 'back':
+            args['pal_file'] = os.path.join(filedir, 'normal.pal')
+            args['pic'] = True
+
+    elif 'gfx/trainers' in filedir:
+        args['pic'] = True
+
+    elif os.path.join(filedir, name) in pics:
+        args['pic'] = True
+
+    elif filedir == 'gfx/tilesets':
+        args['tileset'] = True
+
+    if args.get('pal_file'):
+        if os.path.exists(args['pal_file']):
+            args['palout'] = args['pal_file']
+        else:
+            del args['pal_file']
+
+    if args.get('pic'):
+        if ext == '.png':
+            w, h = gfx.png.Reader(filepath).asRGBA8()[:2]
+            w = min(w/8, h/8)
+            args['pic_dimensions'] = w, w
+        elif ext == '.2bpp':
+            if pokemon_name and name == 'front' or name == 'front.animated':
+                w, h = get_pokemon_dimensions(filepath.replace(ext, '.dimensions')) or (7, 7)
+                args['pic_dimensions'] = w, w
+            elif pokemon_name and name == 'back':
+                args['pic_dimensions'] = 6, 6
+            else:
+                args['pic_dimensions'] = 7, 7
+
+    if args.get('tileset'):
+        args['width'] = 128
+    return args
+
+
+def to_1bpp(filename, **kwargs):
+    name, ext = os.path.splitext(filename)
+    if   ext == '.1bpp': pass
+    elif ext == '.2bpp': gfx.export_2bpp_to_1bpp(filename, **kwargs)
+    elif ext == '.png':  gfx.export_png_to_1bpp(filename, **kwargs)
+    elif ext == '.lz':
+        decompress(filename, **kwargs)
+        to_1bpp(name, **kwargs)
+
+def to_2bpp(filename, **kwargs):
+    name, ext = os.path.splitext(filename)
+    if   ext == '.1bpp': gfx.export_1bpp_to_2bpp(filename, **kwargs)
+    elif ext == '.2bpp': pass
+    elif ext == '.png':  gfx.export_png_to_2bpp(filename, **kwargs)
+    elif ext == '.lz':
+        decompress(filename, **kwargs)
+        to_2bpp(name, **kwargs)
+
+def to_png(filename, **kwargs):
+    name, ext = os.path.splitext(filename)
+    if   ext == '.1bpp': gfx.export_1bpp_to_png(filename, **kwargs)
+    elif ext == '.2bpp' and name.endswith('.animated'):
+        w, h = kwargs.get('pic_dimensions') or (7, 7)
+        new_path = get_animated_graphics(filename, w=w, h=h)
+        return to_png(new_path, **kwargs)
+    elif ext == '.2bpp': gfx.export_2bpp_to_png(filename, **kwargs)
+    elif ext == '.png':  pass
+    elif ext == '.lz':
+        decompress(filename, **kwargs)
+        to_png(name, **kwargs)
+
+def compress(filename, **kwargs):
+    data = open(filename, 'rb').read()
+    lz_data = lz.Compressed(data).output
+    open(filename + '.lz', 'wb').write(bytearray(lz_data))
+
+def decompress(filename, **kwargs):
+    lz_data = open(filename, 'rb').read()
+    data = lz.Decompressed(lz_data).output
+    name, ext = os.path.splitext(filename)
+    open(name, 'wb').write(bytearray(data))
+
+
+methods = {
+    '2bpp': to_2bpp,
+    '1bpp': to_1bpp,
+    'png':  to_png,
+    'lz':   compress,
+    'unlz': decompress,
+}
+
+def main(method_name, filenames=None):
+    if filenames is None: filenames = []
+    for filename in filenames:
+        args = filepath_rules(filename)
+        method = methods.get(method_name)
+        if method:
+            method(filename, **args)
+
+def get_args():
+    ap = argparse.ArgumentParser()
+    ap.add_argument('method_name')
+    ap.add_argument('filenames', nargs='*')
+    args = ap.parse_args()
+    return args
+
+if __name__ == '__main__':
+    main(**get_args().__dict__)
--- /dev/null
+++ b/tools/pokemontools/__init__.py
@@ -1,0 +1,1 @@
+# A subset of https://github.com/pret/pokemon-reverse-engineering-tools
--- /dev/null
+++ b/tools/pokemontools/gfx.py
@@ -1,0 +1,938 @@
+# -*- coding: utf-8 -*-
+
+import os
+import sys
+import png
+from math import sqrt, floor, ceil
+import argparse
+import operator
+
+from lz import Compressed, Decompressed
+
+
+def split(list_, interval):
+    """
+    Split a list by length.
+    """
+    for i in xrange(0, len(list_), interval):
+        j = min(i + interval, len(list_))
+        yield list_[i:j]
+
+
+def hex_dump(data, length=0x10):
+    """
+    just use hexdump -C
+    """
+    margin = len('%x' % len(data))
+    output = []
+    address = 0
+    for line in split(data, length):
+        output += [
+            hex(address)[2:].zfill(margin) +
+            ' | ' +
+            ' '.join('%.2x' % byte for byte in line)
+        ]
+        address += length
+    return '\n'.join(output)
+
+
+def get_tiles(image):
+    """
+    Split a 2bpp image into 8x8 tiles.
+    """
+    return list(split(image, 0x10))
+
+def connect(tiles):
+    """
+    Combine 8x8 tiles into a 2bpp image.
+    """
+    return [byte for tile in tiles for byte in tile]
+
+def transpose(tiles, width=None):
+    """
+    Transpose a tile arrangement along line y=-x.
+
+      00 01 02 03 04 05     00 06 0c 12 18 1e
+      06 07 08 09 0a 0b     01 07 0d 13 19 1f
+      0c 0d 0e 0f 10 11 <-> 02 08 0e 14 1a 20
+      12 13 14 15 16 17     03 09 0f 15 1b 21
+      18 19 1a 1b 1c 1d     04 0a 10 16 1c 22
+      1e 1f 20 21 22 23     05 0b 11 17 1d 23
+
+      00 01 02 03     00 04 08
+      04 05 06 07 <-> 01 05 09
+      08 09 0a 0b     02 06 0a
+                      03 07 0b
+    """
+    if width == None:
+        width = int(sqrt(len(tiles))) # assume square image
+    tiles = sorted(enumerate(tiles), key= lambda (i, tile): i % width)
+    return [tile for i, tile in tiles]
+
+def transpose_tiles(image, width=None):
+    return connect(transpose(get_tiles(image), width))
+
+def interleave(tiles, width):
+    """
+      00 01 02 03 04 05     00 02 04 06 08 0a
+      06 07 08 09 0a 0b     01 03 05 07 09 0b
+      0c 0d 0e 0f 10 11 --> 0c 0e 10 12 14 16
+      12 13 14 15 16 17     0d 0f 11 13 15 17
+      18 19 1a 1b 1c 1d     18 1a 1c 1e 20 22
+      1e 1f 20 21 22 23     19 1b 1d 1f 21 23
+    """
+    interleaved = []
+    left, right = split(tiles[::2], width), split(tiles[1::2], width)
+    for l, r in zip(left, right):
+        interleaved += l + r
+    return interleaved
+
+def deinterleave(tiles, width):
+    """
+      00 02 04 06 08 0a     00 01 02 03 04 05 
+      01 03 05 07 09 0b     06 07 08 09 0a 0b
+      0c 0e 10 12 14 16 --> 0c 0d 0e 0f 10 11
+      0d 0f 11 13 15 17     12 13 14 15 16 17
+      18 1a 1c 1e 20 22     18 19 1a 1b 1c 1d
+      19 1b 1d 1f 21 23     1e 1f 20 21 22 23
+    """
+    deinterleaved = []
+    rows = list(split(tiles, width))
+    for left, right in zip(rows[::2], rows[1::2]):
+        for l, r in zip(left, right):
+            deinterleaved += [l, r]
+    return deinterleaved
+
+def interleave_tiles(image, width):
+    return connect(interleave(get_tiles(image), width))
+
+def deinterleave_tiles(image, width):
+    return connect(deinterleave(get_tiles(image), width))
+
+
+def condense_image_to_map(image, pic=0):
+    """
+    Reduce an image of adjacent frames to an image containing a base frame and any unrepeated tiles.
+    Returns the new image and the corresponding tilemap used to reconstruct the input image.
+
+    If <pic> is 0, ignore the concept of frames. This behavior might be better off as another function.
+    """
+    tiles = get_tiles(image)
+    new_tiles, tilemap = condense_tiles_to_map(tiles, pic)
+    new_image = connect(new_tiles)
+    return new_image, tilemap
+
+def condense_tiles_to_map(tiles, pic=0):
+    """
+    Reduce a sequence of tiles representing adjacent frames to a base frame and any unrepeated tiles.
+    Returns the new tiles and the corresponding tilemap used to reconstruct the input tile sequence.
+
+    If <pic> is 0, ignore the concept of frames. This behavior might be better off as another function.
+    """
+
+    # Leave the first frame intact for pics.
+    new_tiles = tiles[:pic]
+    tilemap   = range(pic)
+
+    for i, tile in enumerate(tiles[pic:]):
+        if tile not in new_tiles:
+            new_tiles.append(tile)
+
+        if pic:
+            # Match the first frame exactly where possible.
+            # This reduces the space needed to replace tiles in pic animations.
+            # For example, if a tile is repeated twice in the first frame,
+            # but at the same relative index as the second tile, use the second index.
+            # When creating a bitmask later, the second index would not require a replacement, but the first index would have.
+            pic_i = i % pic
+            if tile == new_tiles[pic_i]:
+                tilemap.append(pic_i)
+            else:
+                tilemap.append(new_tiles.index(tile))
+        else:
+            tilemap.append(new_tiles.index(tile))
+    return new_tiles, tilemap
+
+def test_condense_tiles_to_map():
+    test = condense_tiles_to_map(list('abcadbae'))
+    if test != (list('abcde'), [0, 1, 2, 0, 3, 1, 0, 4]):
+        raise Exception(test)
+    test = condense_tiles_to_map(list('abcadbae'), 2)
+    if test != (list('abcde'), [0, 1, 2, 0, 3, 1, 0, 4]):
+        raise Exception(test)
+    test = condense_tiles_to_map(list('abcadbae'), 4)
+    if test != (list('abcade'), [0, 1, 2, 3, 4, 1, 0, 5]):
+        raise Exception(test)
+    test = condense_tiles_to_map(list('abcadbea'), 4)
+    if test != (list('abcade'), [0, 1, 2, 3, 4, 1, 5, 3]):
+        raise Exception(test)
+
+
+def to_file(filename, data):
+    """
+    Apparently open(filename, 'wb').write(bytearray(data)) won't work.
+    """
+    file = open(filename, 'wb')
+    for byte in data:
+        file.write('%c' % byte)
+    file.close()
+
+
+def decompress_file(filein, fileout=None):
+    image = bytearray(open(filein).read())
+    de = Decompressed(image)
+
+    if fileout == None:
+        fileout = os.path.splitext(filein)[0]
+    to_file(fileout, de.output)
+
+
+def compress_file(filein, fileout=None):
+    image = bytearray(open(filein).read())
+    lz = Compressed(image)
+
+    if fileout == None:
+        fileout = filein + '.lz'
+    to_file(fileout, lz.output)
+
+
+def bin_to_rgb(word):
+    red   = word & 0b11111
+    word >>= 5
+    green = word & 0b11111
+    word >>= 5
+    blue  = word & 0b11111
+    return (red, green, blue)
+
+def convert_binary_pal_to_text_by_filename(filename):
+    pal = bytearray(open(filename).read())
+    return convert_binary_pal_to_text(pal)
+
+def convert_binary_pal_to_text(pal):
+    output = ''
+    words = [hi * 0x100 + lo for lo, hi in zip(pal[::2], pal[1::2])]
+    for word in words:
+        red, green, blue = ['%.2d' % c for c in bin_to_rgb(word)]
+        output += '\tRGB ' + ', '.join((red, green, blue))
+        output += '\n'
+    return output
+
+def read_rgb_macros(lines):
+    colors = []
+    for line in lines:
+        macro = line.split(" ")[0].strip()
+        if macro == 'RGB':
+            params = ' '.join(line.split(" ")[1:]).split(',')
+            red, green, blue = [int(v) for v in params]
+            colors += [[red, green, blue]]
+    return colors
+
+
+def rewrite_binary_pals_to_text(filenames):
+    for filename in filenames:
+        pal_text = convert_binary_pal_to_text_by_filename(filename)
+        with open(filename, 'w') as out:
+            out.write(pal_text)
+
+
+def flatten(planar):
+    """
+    Flatten planar 2bpp image data into a quaternary pixel map.
+    """
+    strips = []
+    for bottom, top in split(planar, 2):
+        bottom = bottom
+        top = top
+        strip = []
+        for i in xrange(7,-1,-1):
+            color = (
+                (bottom >> i & 1) +
+                (top *2 >> i & 2)
+            )
+            strip += [color]
+        strips += strip
+    return strips
+
+def to_lines(image, width):
+    """
+    Convert a tiled quaternary pixel map to lines of quaternary pixels.
+    """
+    tile_width = 8
+    tile_height = 8
+    num_columns = width / tile_width
+    height = len(image) / width
+
+    lines = []
+    for cur_line in xrange(height):
+        tile_row = cur_line / tile_height
+        line = []
+        for column in xrange(num_columns):
+            anchor = (
+                num_columns * tile_row * tile_width * tile_height +
+                column * tile_width * tile_height +
+                cur_line % tile_height * tile_width
+            )
+            line += image[anchor : anchor + tile_width]
+        lines += [line]
+    return lines
+
+
+def dmg2rgb(word):
+    """
+    For PNGs.
+    """
+    def shift(value):
+        while True:
+            yield value & (2**5 - 1)
+            value >>= 5
+    word = shift(word)
+    # distribution is less even w/ << 3
+    red, green, blue = [int(color * 8.25) for color in [word.next() for _ in xrange(3)]]
+    alpha = 255
+    return (red, green, blue, alpha)
+
+
+def rgb_to_dmg(color):
+    """
+    For PNGs.
+    """
+    word =  (color['r'] / 8)
+    word += (color['g'] / 8) << 5
+    word += (color['b'] / 8) << 10
+    return word
+
+
+def pal_to_png(filename):
+    """
+    Interpret a .pal file as a png palette.
+    """
+    with open(filename) as rgbs:
+        colors = read_rgb_macros(rgbs.readlines())
+    a = 255
+    palette = []
+    for color in colors:
+        # even distribution over 000-255
+        r, g, b = [int(hue * 8.25) for hue in color]
+        palette += [(r, g, b, a)]
+    white = (255,255,255,255)
+    black = (000,000,000,255)
+    if white not in palette and len(palette) < 4:
+        palette = [white] + palette
+    if black not in palette and len(palette) < 4:
+        palette = palette + [black]
+    return palette
+
+
+def png_to_rgb(palette):
+    """
+    Convert a png palette to rgb macros.
+    """
+    output = ''
+    for color in palette:
+        r, g, b = [color[c] / 8 for c in 'rgb']
+        output += '\tRGB ' + ', '.join(['%.2d' % hue for hue in (r, g, b)])
+        output += '\n'
+    return output
+
+
+def read_filename_arguments(filename):
+    """
+    Infer graphics conversion arguments given a filename.
+
+    Arguments are separated with '.'.
+    """
+    parsed_arguments = {}
+
+    int_arguments = {
+        'w': 'width',
+        'h': 'height',
+        't': 'tile_padding',
+    }
+    arguments = os.path.splitext(filename)[0].lstrip('.').split('.')[1:]
+    for argument in arguments:
+
+        # Check for integer arguments first (i.e. "w128").
+        arg   = argument[0]
+        param = argument[1:]
+        if param.isdigit():
+            arg = int_arguments.get(arg, False)
+            if arg:
+                parsed_arguments[arg] = int(param)
+
+        elif argument == 'arrange':
+            parsed_arguments['norepeat'] = True
+            parsed_arguments['tilemap']  = True
+
+        # Pic dimensions (i.e. "6x6").
+        elif 'x' in argument and any(map(str.isdigit, argument)):
+            w, h = argument.split('x')
+            if w.isdigit() and h.isdigit():
+                parsed_arguments['pic_dimensions'] = (int(w), int(h))
+
+        else:
+            parsed_arguments[argument] = True
+
+    return parsed_arguments
+
+
+def export_2bpp_to_png(filein, fileout=None, pal_file=None, height=0, width=0, tile_padding=0, pic_dimensions=None, **kwargs):
+
+    if fileout == None:
+        fileout = os.path.splitext(filein)[0] + '.png'
+
+    image = open(filein, 'rb').read()
+
+    arguments = {
+        'width': width,
+        'height': height,
+        'pal_file': pal_file,
+        'tile_padding': tile_padding,
+        'pic_dimensions': pic_dimensions,
+    }
+    arguments.update(read_filename_arguments(filein))
+
+    if pal_file == None:
+        if os.path.exists(os.path.splitext(fileout)[0]+'.pal'):
+            arguments['pal_file'] = os.path.splitext(fileout)[0]+'.pal'
+
+    result = convert_2bpp_to_png(image, **arguments)
+    width, height, palette, greyscale, bitdepth, px_map = result
+
+    w = png.Writer(
+        width,
+        height,
+        palette=palette,
+        compression=9,
+        greyscale=greyscale,
+        bitdepth=bitdepth
+    )
+    with open(fileout, 'wb') as f:
+        w.write(f, px_map)
+
+
+def convert_2bpp_to_png(image, **kwargs):
+    """
+    Convert a planar 2bpp graphic to png.
+    """
+
+    image = bytearray(image)
+
+    pad_color = bytearray([0])
+
+    width          = kwargs.get('width', 0)
+    height         = kwargs.get('height', 0)
+    tile_padding   = kwargs.get('tile_padding', 0)
+    pic_dimensions = kwargs.get('pic_dimensions', None)
+    pal_file       = kwargs.get('pal_file', None)
+    interleave     = kwargs.get('interleave', False)
+
+    # Width must be specified to interleave.
+    if interleave and width:
+        image = interleave_tiles(image, width / 8)
+
+    # Pad the image by a given number of tiles if asked.
+    image += pad_color * 0x10 * tile_padding
+
+    # Some images are transposed in blocks.
+    if pic_dimensions:
+        w, h  = pic_dimensions
+        if not width: width = w * 8
+
+        pic_length = w * h * 0x10
+
+        trailing = len(image) % pic_length
+
+        pic = []
+        for i in xrange(0, len(image) - trailing, pic_length):
+            pic += transpose_tiles(image[i:i+pic_length], h)
+        image = bytearray(pic) + image[len(image) - trailing:]
+
+        # Pad out trailing lines.
+        image += pad_color * 0x10 * ((w - (len(image) / 0x10) % h) % w)
+
+    def px_length(img):
+        return len(img) * 4
+    def tile_length(img):
+        return len(img) * 4 / (8*8)
+
+    if width and height:
+        tile_width = width / 8
+        more_tile_padding = (tile_width - (tile_length(image) % tile_width or tile_width))
+        image += pad_color * 0x10 * more_tile_padding
+
+    elif width and not height:
+        tile_width = width / 8
+        more_tile_padding = (tile_width - (tile_length(image) % tile_width or tile_width))
+        image += pad_color * 0x10 * more_tile_padding
+        height = px_length(image) / width
+
+    elif height and not width:
+        tile_height = height / 8
+        more_tile_padding = (tile_height - (tile_length(image) % tile_height or tile_height))
+        image += pad_color * 0x10 * more_tile_padding
+        width = px_length(image) / height
+
+    # at least one dimension should be given
+    if width * height != px_length(image):
+        # look for possible combos of width/height that would form a rectangle
+        matches = []
+        # Height need not be divisible by 8, but width must.
+        # See pokered gfx/minimize_pic.1bpp.
+        for w in range(8, px_length(image) / 2 + 1, 8):
+            h = px_length(image) / w
+            if w * h == px_length(image):
+                matches += [(w, h)]
+        # go for the most square image
+        if len(matches):
+            width, height = sorted(matches, key= lambda (w, h): (h % 8 != 0, w + h))[0] # favor height
+        else:
+            raise Exception, 'Image can\'t be divided into tiles (%d px)!' % (px_length(image))
+
+    # convert tiles to lines
+    lines = to_lines(flatten(image), width)
+
+    if pal_file == None:
+        palette   = None
+        greyscale = True
+        bitdepth  = 2
+        px_map    = [[3 - pixel for pixel in line] for line in lines]
+
+    else: # gbc color
+        palette   = pal_to_png(pal_file)
+        greyscale = False
+        bitdepth  = 8
+        px_map    = [[pixel for pixel in line] for line in lines]
+
+    return width, height, palette, greyscale, bitdepth, px_map
+
+
+def get_pic_animation(tmap, w, h):
+    """
+    Generate pic animation data from a combined tilemap of each frame.
+    """
+    frame_text = ''
+    bitmask_text = ''
+
+    frames = list(split(tmap, w * h))
+    base = frames.pop(0)
+    bitmasks = []
+
+    for i in xrange(len(frames)):
+        frame_text += '\tdw .frame{}\n'.format(i + 1)
+
+    for i, frame in enumerate(frames):
+        bitmask = map(operator.ne, frame, base)
+        if bitmask not in bitmasks:
+            bitmasks.append(bitmask)
+        which_bitmask = bitmasks.index(bitmask)
+
+        mask = iter(bitmask)
+        masked_frame = filter(lambda _: mask.next(), frame)
+
+        frame_text += '.frame{}\n'.format(i + 1)
+        frame_text += '\tdb ${:02x} ; bitmask\n'.format(which_bitmask)
+        if masked_frame:
+            frame_text += '\tdb {}\n'.format(', '.join(
+                map('${:02x}'.format, masked_frame)
+            ))
+
+    for i, bitmask in enumerate(bitmasks):
+        bitmask_text += '; {}\n'.format(i)
+        for byte in split(bitmask, 8):
+            byte = int(''.join(map(int.__repr__, reversed(byte))), 2)
+            bitmask_text += '\tdb %{:08b}\n'.format(byte)
+
+    return frame_text, bitmask_text
+
+
+def export_png_to_2bpp(filein, fileout=None, palout=None, **kwargs):
+
+    arguments = {
+        'tile_padding': 0,
+        'pic_dimensions': None,
+        'animate': False,
+        'stupid_bitmask_hack': [],
+    }
+    arguments.update(kwargs)
+    arguments.update(read_filename_arguments(filein))
+
+    image, arguments = png_to_2bpp(filein, **arguments)
+
+    if fileout == None:
+        fileout = os.path.splitext(filein)[0] + '.2bpp'
+    to_file(fileout, image)
+
+    tmap = arguments.get('tmap')
+
+    if tmap != None and arguments['animate'] and arguments['pic_dimensions']:
+        # Generate pic animation data.
+        frame_text, bitmask_text = get_pic_animation(tmap, *arguments['pic_dimensions'])
+
+        frames_path = os.path.join(os.path.split(fileout)[0], 'frames.asm')
+        with open(frames_path, 'w') as out:
+            out.write(frame_text)
+
+        bitmask_path = os.path.join(os.path.split(fileout)[0], 'bitmask.asm')
+
+        # The following Pokemon have a bitmask dummied out.
+        for exception in arguments['stupid_bitmask_hack']:
+           if exception in bitmask_path:
+                bitmasks = bitmask_text.split(';')
+                bitmasks[-1] = bitmasks[-1].replace('1', '0')
+                bitmask_text = ';'.join(bitmasks)
+
+        with open(bitmask_path, 'w') as out:
+            out.write(bitmask_text)
+
+    elif tmap != None and arguments.get('tilemap', False):
+        tilemap_path = os.path.splitext(fileout)[0] + '.tilemap'
+        to_file(tilemap_path, tmap)
+
+    palette = arguments.get('palette')
+    if palout == None:
+        palout = os.path.splitext(fileout)[0] + '.pal'
+    export_palette(palette, palout)
+
+
+def get_image_padding(width, height, wstep=8, hstep=8):
+
+    padding = {
+        'left':   0,
+        'right':  0,
+        'top':    0,
+        'bottom': 0,
+    }
+
+    if width % wstep and width >= wstep:
+       pad = float(width % wstep) / 2
+       padding['left']   = int(ceil(pad))
+       padding['right']  = int(floor(pad))
+
+    if height % hstep and height >= hstep:
+       pad = float(height % hstep) / 2
+       padding['top']    = int(ceil(pad))
+       padding['bottom'] = int(floor(pad))
+
+    return padding
+
+
+def png_to_2bpp(filein, **kwargs):
+    """
+    Convert a png image to planar 2bpp.
+    """
+
+    arguments = {
+        'tile_padding': 0,
+        'pic_dimensions': False,
+        'interleave': False,
+        'norepeat': False,
+        'tilemap': False,
+    }
+    arguments.update(kwargs)
+
+    if type(filein) is str:
+        filein = open(filein)
+
+    assert type(filein) is file
+
+    width, height, rgba, info = png.Reader(filein).asRGBA8()
+
+    # png.Reader returns flat pixel data. Nested is easier to work with
+    len_px  = len('rgba')
+    image   = []
+    palette = []
+    for line in rgba:
+        newline = []
+        for px in xrange(0, len(line), len_px):
+            color = dict(zip('rgba', line[px:px+len_px]))
+            if color not in palette:
+                if len(palette) < 4:
+                    palette += [color]
+                else:
+                    # TODO Find the nearest match
+                    print 'WARNING: %s: Color %s truncated to' % (filein, color),
+                    color = sorted(palette, key=lambda x: sum(x.values()))[0]
+                    print color
+            newline += [color]
+        image += [newline]
+
+    assert len(palette) <= 4, '%s: palette should be 4 colors, is really %d (%s)' % (filein, len(palette), palette)
+
+    # Pad out smaller palettes with greyscale colors
+    greyscale = {
+        'black': { 'r': 0x00, 'g': 0x00, 'b': 0x00, 'a': 0xff },
+        'grey':  { 'r': 0x55, 'g': 0x55, 'b': 0x55, 'a': 0xff },
+        'gray':  { 'r': 0xaa, 'g': 0xaa, 'b': 0xaa, 'a': 0xff },
+        'white': { 'r': 0xff, 'g': 0xff, 'b': 0xff, 'a': 0xff },
+    }
+    preference = 'white', 'black', 'grey', 'gray'
+    for hue in map(greyscale.get, preference):
+        if len(palette) >= 4:
+            break
+        if hue not in palette:
+            palette += [hue]
+
+    palette.sort(key=lambda x: sum(x.values()))
+
+    # Game Boy palette order
+    palette.reverse()
+
+    # Map pixels to quaternary color ids
+    padding = get_image_padding(width, height)
+    width += padding['left'] + padding['right']
+    height += padding['top'] + padding['bottom']
+    pad = bytearray([0])
+
+    qmap = []
+    qmap += pad * width * padding['top']
+    for line in image:
+        qmap += pad * padding['left']
+        for color in line:
+            qmap += [palette.index(color)]
+        qmap += pad * padding['right']
+    qmap += pad * width * padding['bottom']
+
+    # Graphics are stored in tiles instead of lines
+    tile_width  = 8
+    tile_height = 8
+    num_columns = max(width, tile_width) / tile_width
+    num_rows = max(height, tile_height) / tile_height
+    image = []
+
+    for row in xrange(num_rows):
+        for column in xrange(num_columns):
+
+            # Split it up into strips to convert to planar data
+            for strip in xrange(min(tile_height, height)):
+                anchor = (
+                    row * num_columns * tile_width * tile_height +
+                    column * tile_width +
+                    strip * width
+                )
+                line = qmap[anchor : anchor + tile_width]
+                bottom, top = 0, 0
+                for bit, quad in enumerate(line):
+                    bottom += (quad & 1) << (7 - bit)
+                    top += (quad /2 & 1) << (7 - bit)
+                image += [bottom, top]
+
+    dim = arguments['pic_dimensions']
+    if dim:
+        if type(dim) in (tuple, list):
+            w, h = dim
+        else:
+            # infer dimensions based on width.
+            w = width / tile_width
+            h = height / tile_height
+            if h % w == 0:
+                h = w
+
+        tiles = get_tiles(image)
+        pic_length = w * h
+        tile_width = width / 8
+        trailing = len(tiles) % pic_length
+        new_image = []
+        for block in xrange(len(tiles) / pic_length):
+            offset = (h * tile_width) * ((block * w) / tile_width) + ((block * w) % tile_width)
+            pic = []
+            for row in xrange(h):
+                index = offset + (row * tile_width)
+                pic += tiles[index:index + w]
+            new_image += transpose(pic, w)
+        new_image += tiles[len(tiles) - trailing:]
+        image = connect(new_image)
+
+    # Remove any tile padding used to make the png rectangular.
+    image = image[:len(image) - arguments['tile_padding'] * 0x10]
+
+    tmap = None
+
+    if arguments['interleave']:
+        image = deinterleave_tiles(image, num_columns)
+
+    if arguments['pic_dimensions']:
+        image, tmap = condense_image_to_map(image, w * h)
+    elif arguments['norepeat']:
+        image, tmap = condense_image_to_map(image)
+        if not arguments['tilemap']:
+            tmap = None
+
+    arguments.update({ 'palette': palette, 'tmap': tmap, })
+
+    return image, arguments
+
+
+def export_palette(palette, filename):
+    """
+    Export a palette from png to rgb macros in a .pal file.
+    """
+
+    if os.path.exists(filename):
+
+        # Pic palettes are 2 colors (black/white are added later).
+        with open(filename) as rgbs:
+            colors = read_rgb_macros(rgbs.readlines())
+
+        if len(colors) == 2:
+            palette = palette[1:3]
+
+        text = png_to_rgb(palette)
+        with open(filename, 'w') as out:
+            out.write(text)
+
+
+def png_to_lz(filein):
+
+    name = os.path.splitext(filein)[0]
+
+    export_png_to_2bpp(filein)
+    image = open(name+'.2bpp', 'rb').read()
+    to_file(name+'.2bpp'+'.lz', Compressed(image).output)
+
+
+def convert_2bpp_to_1bpp(data):
+    """
+    Convert planar 2bpp image data to 1bpp. Assume images are two colors.
+    """
+    return data[::2]
+
+def convert_1bpp_to_2bpp(data):
+    """
+    Convert 1bpp image data to planar 2bpp (black/white).
+    """
+    output = []
+    for i in data:
+        output += [i, i]
+    return output
+
+
+def export_2bpp_to_1bpp(filename):
+    name, extension = os.path.splitext(filename)
+    image = open(filename, 'rb').read()
+    image = convert_2bpp_to_1bpp(image)
+    to_file(name + '.1bpp', image)
+
+def export_1bpp_to_2bpp(filename):
+    name, extension = os.path.splitext(filename)
+    image = open(filename, 'rb').read()
+    image = convert_1bpp_to_2bpp(image)
+    to_file(name + '.2bpp', image)
+
+
+def export_1bpp_to_png(filename, fileout=None):
+
+    if fileout == None:
+        fileout = os.path.splitext(filename)[0] + '.png'
+
+    arguments = read_filename_arguments(filename)
+
+    image = open(filename, 'rb').read()
+    image = convert_1bpp_to_2bpp(image)
+
+    result = convert_2bpp_to_png(image, **arguments)
+    width, height, palette, greyscale, bitdepth, px_map = result
+
+    w = png.Writer(width, height, palette=palette, compression=9, greyscale=greyscale, bitdepth=bitdepth)
+    with open(fileout, 'wb') as f:
+        w.write(f, px_map)
+
+
+def export_png_to_1bpp(filename, fileout=None):
+
+    if fileout == None:
+        fileout = os.path.splitext(filename)[0] + '.1bpp'
+
+    arguments = read_filename_arguments(filename)
+    image = png_to_1bpp(filename, **arguments)
+
+    to_file(fileout, image)
+
+def png_to_1bpp(filename, **kwargs):
+    image, kwargs = png_to_2bpp(filename, **kwargs)
+    return convert_2bpp_to_1bpp(image)
+
+
+def convert_to_2bpp(filenames=[]):
+    for filename in filenames:
+        filename, name, extension = try_decompress(filename)
+        if extension == '.1bpp':
+            export_1bpp_to_2bpp(filename)
+        elif extension == '.2bpp':
+            pass
+        elif extension == '.png':
+            export_png_to_2bpp(filename)
+        else:
+            raise Exception, "Don't know how to convert {} to 2bpp!".format(filename)
+
+def convert_to_1bpp(filenames=[]):
+    for filename in filenames:
+        filename, name, extension = try_decompress(filename)
+        if extension == '.1bpp':
+            pass
+        elif extension == '.2bpp':
+            export_2bpp_to_1bpp(filename)
+        elif extension == '.png':
+            export_png_to_1bpp(filename)
+        else:
+            raise Exception, "Don't know how to convert {} to 1bpp!".format(filename)
+
+def convert_to_png(filenames=[]):
+    for filename in filenames:
+        filename, name, extension = try_decompress(filename)
+        if extension == '.1bpp':
+            export_1bpp_to_png(filename)
+        elif extension == '.2bpp':
+            export_2bpp_to_png(filename)
+        elif extension == '.png':
+            pass
+        else:
+            raise Exception, "Don't know how to convert {} to png!".format(filename)
+
+def compress(filenames=[]):
+    for filename in filenames:
+        data = open(filename, 'rb').read()
+        lz_data = Compressed(data).output
+        to_file(filename + '.lz', lz_data)
+
+def decompress(filenames=[]):
+    for filename in filenames:
+        name, extension = os.path.splitext(filename)
+        lz_data = open(filename, 'rb').read()
+        data = Decompressed(lz_data).output
+        to_file(name, data)
+
+def try_decompress(filename):
+    """
+    Try to decompress a graphic when determining the filetype.
+    This skips the manual unlz step when attempting
+    to convert lz-compressed graphics to png.
+    """
+    name, extension = os.path.splitext(filename)
+    if extension == '.lz':
+        decompress([filename])
+        filename = name
+        name, extension = os.path.splitext(filename)
+    return filename, name, extension
+
+
+def main():
+    ap = argparse.ArgumentParser()
+    ap.add_argument('mode')
+    ap.add_argument('filenames', nargs='*')
+    args = ap.parse_args()
+
+    method = {
+        '2bpp': convert_to_2bpp,
+        '1bpp': convert_to_1bpp,
+        'png':  convert_to_png,
+        'lz':   compress,
+        'unlz': decompress,
+    }.get(args.mode, None)
+
+    if method == None:
+        raise Exception, "Unknown conversion method!"
+
+    method(args.filenames)
+
+if __name__ == "__main__":
+    main()
--- /dev/null
+++ b/tools/pokemontools/lz.py
@@ -1,0 +1,580 @@
+# -*- coding: utf-8 -*-
+"""
+Pokemon Crystal data de/compression.
+"""
+
+"""
+A rundown of Pokemon Crystal's compression scheme:
+
+Control commands occupy bits 5-7.
+Bits 0-4 serve as the first parameter <n> for each command.
+"""
+lz_commands = {
+    'literal':   0, # n values for n bytes
+    'iterate':   1, # one value for n bytes
+    'alternate': 2, # alternate two values for n bytes
+    'blank':     3, # zero for n bytes
+}
+
+"""
+Repeater commands repeat any data that was just decompressed.
+They take an additional signed parameter <s> to mark a relative starting point.
+These wrap around (positive from the start, negative from the current position).
+"""
+lz_commands.update({
+    'repeat':    4, # n bytes starting from s
+    'flip':      5, # n bytes in reverse bit order starting from s
+    'reverse':   6, # n bytes backwards starting from s
+})
+
+"""
+The long command is used when 5 bits aren't enough. Bits 2-4 contain a new control code.
+Bits 0-1 are appended to a new byte as 8-9, allowing a 10-bit parameter.
+"""
+lz_commands.update({
+    'long':      7, # n is now 10 bits for a new control code
+})
+max_length = 1 << 10 # can't go higher than 10 bits
+lowmax     = 1 <<  5 # standard 5-bit param
+
+"""
+If 0xff is encountered instead of a command, decompression ends.
+"""
+lz_end = 0xff
+
+
+bit_flipped = [
+    sum(((byte >> i) & 1) << (7 - i) for i in xrange(8))
+    for byte in xrange(0x100)
+]
+
+
+class Compressed:
+
+    """
+    Usage:
+        lz = Compressed(data).output
+    or
+        lz = Compressed().compress(data)
+    or
+        c = Compressed()
+        c.data = data
+        lz = c.compress()
+
+    There are some issues with reproducing the target compressor.
+    Some notes are listed here:
+        - the criteria for detecting a lookback is inconsistent
+            - sometimes lookbacks that are mostly 0s are pruned, sometimes not
+        - target appears to skip ahead if it can use a lookback soon, stopping the current command short or in some cases truncating it with literals.
+            - this has been implemented, but the specifics are unknown
+        - self.min_scores: It's unknown if blank's minimum score should be 1 or 2. Most likely it's 1, with some other hack to account for edge cases.
+            - may be related to the above
+        - target does not appear to compress backwards
+    """
+
+    def __init__(self, *args, **kwargs):
+
+        self.min_scores = {
+            'blank':     1,
+            'iterate':   2,
+            'alternate': 3,
+            'repeat':    3,
+            'reverse':   3,
+            'flip':      3,
+        }
+
+        self.preference = [
+            'repeat',
+            'blank',
+            'flip',
+            'reverse',
+            'iterate',
+            'alternate',
+            #'literal',
+        ]
+
+        self.lookback_methods = 'repeat', 'reverse', 'flip'
+
+        self.__dict__.update({
+            'data': None,
+            'commands': lz_commands,
+            'debug': False,
+            'literal_only': False,
+        })
+
+        self.arg_names = 'data', 'commands', 'debug', 'literal_only'
+
+        self.__dict__.update(kwargs)
+        self.__dict__.update(dict(zip(self.arg_names, args)))
+
+        if self.data is not None:
+            self.compress()
+
+    def compress(self, data=None):
+        if data is not None:
+            self.data = data
+
+        self.data = list(bytearray(self.data))
+
+        self.indexes = {}
+        self.lookbacks = {}
+        for method in self.lookback_methods:
+            self.lookbacks[method] = {}
+
+        self.address = 0
+        self.end     = len(self.data)
+        self.output  = []
+        self.literal = None
+
+        while self.address < self.end:
+
+            if self.score():
+                self.do_literal()
+                self.do_winner()
+
+            else:
+                if self.literal == None:
+                    self.literal = self.address
+                self.address += 1
+
+        self.do_literal()
+
+        self.output += [lz_end]
+        return self.output
+
+    def reset_scores(self):
+        self.scores = {}
+        self.offsets = {}
+        self.helpers = {}
+        for method in self.min_scores.iterkeys():
+            self.scores[method] = 0
+
+    def bit_flip(self, byte):
+        return bit_flipped[byte]
+
+    def do_literal(self):
+        if self.literal != None:
+            length = abs(self.address - self.literal)
+            start  = min(self.literal, self.address + 1)
+            self.helpers['literal'] = self.data[start:start+length]
+            self.do_cmd('literal', length)
+            self.literal = None
+
+    def score(self):
+        self.reset_scores()
+
+        map(self.score_literal, ['iterate', 'alternate', 'blank'])
+
+        for method in self.lookback_methods:
+            self.scores[method], self.offsets[method] = self.find_lookback(method, self.address)
+
+        self.stop_short()
+
+        return any(
+            score
+          > self.min_scores[method] + int(score > lowmax)
+            for method, score in self.scores.iteritems()
+        )
+
+    def stop_short(self):
+        """
+        If a lookback is close, reduce the scores of other commands.
+        """
+        best_method, best_score = max(
+            self.scores.items(),
+            key = lambda x: (
+                x[1],
+                -self.preference.index(x[0])
+            )
+        )
+        for method in self.lookback_methods:
+            min_score = self.min_scores[method]
+            for address in xrange(self.address+1, self.address+best_score):
+                length, index = self.find_lookback(method, address)
+                if length > max(min_score, best_score):
+                    # BUG: lookbacks can reduce themselves. This appears to be a bug in the target also.
+                    for m, score in self.scores.items():
+                        self.scores[m] = min(score, address - self.address)
+
+
+    def read(self, address=None):
+        if address is None:
+            address = self.address
+        if 0 <= address < len(self.data):
+            return self.data[address]
+        return None
+
+    def find_all_lookbacks(self):
+        for method in self.lookback_methods:
+            for address, byte in enumerate(self.data):
+                self.find_lookback(method, address)
+
+    def find_lookback(self, method, address=None):
+        """Temporarily stubbed, because the real function doesn't run in polynomial time."""
+	return 0, None
+
+    def broken_find_lookback(self, method, address=None):
+        if address is None:
+            address = self.address
+
+        existing = self.lookbacks.get(method, {}).get(address)
+        if existing != None:
+            return existing
+
+        lookback = 0, None
+
+        # Better to not carelessly optimize at the moment.
+        """
+        if address < 2:
+            return lookback
+        """
+
+        byte = self.read(address)
+        if byte is None:
+            return lookback
+
+        direction, mutate = {
+            'repeat':  ( 1, int),
+            'reverse': (-1, int),
+            'flip':    ( 1, self.bit_flip),
+        }[method]
+
+        # Doesn't seem to help
+        """
+        if mutate == self.bit_flip:
+            if byte == 0:
+                self.lookbacks[method][address] = lookback
+                return lookback
+        """
+
+        data_len = len(self.data)
+        is_two_byte_index = lambda index: int(index < address - 0x7f)
+
+        for index in self.get_indexes(mutate(byte)):
+
+            if index >= address:
+                break
+
+            old_length, old_index = lookback
+            if direction == 1:
+                if old_length > data_len - index: break
+            else:
+                if old_length > index: continue
+
+            if self.read(index) in [None]: continue
+
+            length = 1 # we know there's at least one match, or we wouldn't be checking this index
+            while 1:
+                this_byte = self.read(address + length)
+                that_byte = self.read(index + length * direction)
+                if that_byte == None or this_byte != mutate(that_byte):
+                    break
+                length += 1
+
+            score = length - is_two_byte_index(index)
+            old_score = old_length - is_two_byte_index(old_index)
+            if score >= old_score or (score == old_score and length > old_length):
+                # XXX maybe avoid two-byte indexes when possible
+                if score >= lookback[0] - is_two_byte_index(lookback[1]):
+                    lookback = length, index
+
+        self.lookbacks[method][address] = lookback
+        return lookback
+
+    def get_indexes(self, byte):
+        if not self.indexes.has_key(byte):
+            self.indexes[byte] = []
+            index = -1
+            while 1:
+                try:
+                    index = self.data.index(byte, index + 1)
+                except ValueError:
+                    break
+                self.indexes[byte].append(index)
+        return self.indexes[byte]
+
+    def score_literal(self, method):
+        address = self.address
+
+        compare = {
+            'blank': [0],
+            'iterate': [self.read(address)],
+            'alternate': [self.read(address), self.read(address + 1)],
+        }[method]
+
+        # XXX may or may not be correct
+        if method == 'alternate' and compare[0] == 0:
+            return
+
+        length = 0
+        while self.read(address + length) == compare[length % len(compare)]:
+            length += 1
+
+        self.scores[method] = length
+        self.helpers[method] = compare
+
+    def do_winner(self):
+        winners = filter(
+            lambda (method, score):
+                score
+              > self.min_scores[method] + int(score > lowmax),
+            self.scores.iteritems()
+        )
+        winners.sort(
+            key = lambda (method, score): (
+                -(score - self.min_scores[method] - int(score > lowmax)),
+                self.preference.index(method)
+            )
+        )
+        winner, score = winners[0]
+
+        length = min(score, max_length)
+        self.do_cmd(winner, length)
+        self.address += length
+
+    def do_cmd(self, cmd, length):
+        start_address = self.address
+
+        cmd_length = length - 1
+
+        output = []
+
+        if length > lowmax:
+            output.append(
+                (self.commands['long'] << 5)
+              + (self.commands[cmd] << 2)
+              + (cmd_length >> 8)
+            )
+            output.append(
+                cmd_length & 0xff
+            )
+        else:
+            output.append(
+                (self.commands[cmd] << 5)
+              + cmd_length
+            )
+
+        self.helpers['blank'] = [] # quick hack
+        output += self.helpers.get(cmd, [])
+
+        if cmd in self.lookback_methods:
+            offset = self.offsets[cmd]
+            # Negative offsets are one byte.
+            # Positive offsets are two.
+            if 0 < start_address - offset - 1 <= 0x7f:
+                offset = (start_address - offset - 1) | 0x80
+                output += [offset]
+            else:
+                output += [offset / 0x100, offset % 0x100] # big endian
+
+        if self.debug:
+            print ' '.join(map(str, [
+                  cmd, length, '\t',
+                  ' '.join(map('{:02x}'.format, output)),
+                  self.data[start_address:start_address+length] if cmd in self.lookback_methods else '',
+            ]))
+
+        self.output += output
+
+
+
+class Decompressed:
+    """
+    Interpret and decompress lz-compressed data, usually 2bpp.
+    """
+
+    """
+    Usage:
+        data = Decompressed(lz).output
+    or
+        data = Decompressed().decompress(lz)
+    or
+        d = Decompressed()
+        d.lz = lz
+        data = d.decompress()
+
+    To decompress from offset 0x80000 in a rom:
+        data = Decompressed(rom, start=0x80000).output
+    """
+
+    lz = None
+    start = 0
+    commands = lz_commands
+    debug = False
+
+    arg_names = 'lz', 'start', 'commands', 'debug'
+
+    def __init__(self, *args, **kwargs):
+        self.__dict__.update(dict(zip(self.arg_names, args)))
+        self.__dict__.update(kwargs)
+
+        self.command_names = dict(map(reversed, self.commands.items()))
+        self.address = self.start
+
+        if self.lz is not None:
+            self.decompress()
+
+        if self.debug: print self.command_list()
+
+
+    def command_list(self):
+        """
+        Print a list of commands that were used. Useful for debugging.
+        """
+
+        text = ''
+
+        output_address = 0
+        for name, attrs in self.used_commands:
+            length     = attrs['length']
+            address    = attrs['address']
+            offset     = attrs['offset']
+            direction  = attrs['direction']
+
+            text += '{2:03x} {0}: {1}'.format(name, length, output_address)
+            text += '\t' + ' '.join(
+                '{:02x}'.format(int(byte))
+                for byte in self.lz[ address : address + attrs['cmd_length'] ]
+            )
+
+            if offset is not None:
+                repeated_data = self.output[ offset : offset + length * direction : direction ]
+                if name == 'flip':
+                    repeated_data = map(bit_flipped.__getitem__, repeated_data)
+                text += ' [' + ' '.join(map('{:02x}'.format, repeated_data)) + ']'
+
+            text += '\n'
+            output_address += length
+
+        return text
+
+
+    def decompress(self, lz=None):
+
+        if lz is not None:
+            self.lz = lz
+
+        self.lz = bytearray(self.lz)
+
+        self.used_commands = []
+        self.output = []
+
+        while 1:
+
+            cmd_address = self.address
+            self.offset = None
+            self.direction = None
+
+            if (self.byte == lz_end):
+                self.next()
+                break
+
+            self.cmd = (self.byte & 0b11100000) >> 5
+
+            if self.cmd_name == 'long':
+                # 10-bit length
+                self.cmd = (self.byte & 0b00011100) >> 2
+                self.length = (self.next() & 0b00000011) * 0x100
+                self.length += self.next() + 1
+            else:
+                # 5-bit length
+                self.length = (self.next() & 0b00011111) + 1
+
+            self.__class__.__dict__[self.cmd_name](self)
+
+            self.used_commands += [(
+                self.cmd_name,
+                {
+                    'length':     self.length,
+                    'address':    cmd_address,
+                    'offset':     self.offset,
+                    'cmd_length': self.address - cmd_address,
+                    'direction':  self.direction,
+                }
+            )]
+
+        # Keep track of the data we just decompressed.
+        self.compressed_data = self.lz[self.start : self.address]
+
+
+    @property
+    def byte(self):
+        return self.lz[ self.address ]
+
+    def next(self):
+        byte = self.byte
+        self.address += 1
+        return byte
+
+    @property
+    def cmd_name(self):
+        return self.command_names.get(self.cmd)
+
+
+    def get_offset(self):
+
+        if self.byte >= 0x80: # negative
+            # negative
+            offset = self.next() & 0x7f
+            offset = len(self.output) - offset - 1
+        else:
+            # positive
+            offset =  self.next() * 0x100
+            offset += self.next()
+
+        self.offset = offset
+
+
+    def literal(self):
+        """
+        Copy data directly.
+        """
+        self.output  += self.lz[ self.address : self.address + self.length ]
+        self.address += self.length
+
+    def iterate(self):
+        """
+        Write one byte repeatedly.
+        """
+        self.output += [self.next()] * self.length
+
+    def alternate(self):
+        """
+        Write alternating bytes.
+        """
+        alts = [self.next(), self.next()]
+        self.output += [ alts[x & 1] for x in xrange(self.length) ]
+
+    def blank(self):
+        """
+        Write zeros.
+        """
+        self.output += [0] * self.length
+
+    def flip(self):
+        """
+        Repeat flipped bytes from output.
+
+        Example: 11100100 -> 00100111
+        """
+        self._repeat(table=bit_flipped)
+
+    def reverse(self):
+        """
+        Repeat reversed bytes from output.
+        """
+        self._repeat(direction=-1)
+
+    def repeat(self):
+        """
+        Repeat bytes from output.
+        """
+        self._repeat()
+
+    def _repeat(self, direction=1, table=None):
+        self.get_offset()
+        self.direction = direction
+        # Note: appends must be one at a time (this way, repeats can draw from themselves if required)
+        for i in xrange(self.length):
+            byte = self.output[ self.offset + i * direction ]
+            self.output.append( table[byte] if table else byte )
--- /dev/null
+++ b/tools/pokemontools/png.py
@@ -1,0 +1,2650 @@
+#!/usr/bin/env python
+
+from __future__ import print_function
+
+# png.py - PNG encoder/decoder in pure Python
+#
+# Copyright (C) 2006 Johann C. Rocholl <johann@browsershots.org>
+# Portions Copyright (C) 2009 David Jones <drj@pobox.com>
+# And probably portions Copyright (C) 2006 Nicko van Someren <nicko@nicko.org>
+#
+# Original concept by Johann C. Rocholl.
+#
+# LICENCE (MIT)
+#
+# Permission is hereby granted, free of charge, to any person
+# obtaining a copy of this software and associated documentation files
+# (the "Software"), to deal in the Software without restriction,
+# including without limitation the rights to use, copy, modify, merge,
+# publish, distribute, sublicense, and/or sell copies of the Software,
+# and to permit persons to whom the Software is furnished to do so,
+# subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+# SOFTWARE.
+
+"""
+Pure Python PNG Reader/Writer
+
+This Python module implements support for PNG images (see PNG
+specification at http://www.w3.org/TR/2003/REC-PNG-20031110/ ). It reads
+and writes PNG files with all allowable bit depths
+(1/2/4/8/16/24/32/48/64 bits per pixel) and colour combinations:
+greyscale (1/2/4/8/16 bit); RGB, RGBA, LA (greyscale with alpha) with
+8/16 bits per channel; colour mapped images (1/2/4/8 bit).
+Adam7 interlacing is supported for reading and
+writing.  A number of optional chunks can be specified (when writing)
+and understood (when reading): ``tRNS``, ``bKGD``, ``gAMA``.
+
+For help, type ``import png; help(png)`` in your python interpreter.
+
+A good place to start is the :class:`Reader` and :class:`Writer`
+classes.
+
+Requires Python 2.3.  Limited support is available for Python 2.2, but
+not everything works.  Best with Python 2.4 and higher.  Installation is
+trivial, but see the ``README.txt`` file (with the source distribution)
+for details.
+
+This file can also be used as a command-line utility to convert
+`Netpbm <http://netpbm.sourceforge.net/>`_ PNM files to PNG, and the
+reverse conversion from PNG to PNM. The interface is similar to that
+of the ``pnmtopng`` program from Netpbm.  Type ``python png.py --help``
+at the shell prompt for usage and a list of options.
+
+A note on spelling and terminology
+----------------------------------
+
+Generally British English spelling is used in the documentation.  So
+that's "greyscale" and "colour".  This not only matches the author's
+native language, it's also used by the PNG specification.
+
+The major colour models supported by PNG (and hence by PyPNG) are:
+greyscale, RGB, greyscale--alpha, RGB--alpha.  These are sometimes
+referred to using the abbreviations: L, RGB, LA, RGBA.  In this case
+each letter abbreviates a single channel: *L* is for Luminance or Luma
+or Lightness which is the channel used in greyscale images; *R*, *G*,
+*B* stand for Red, Green, Blue, the components of a colour image; *A*
+stands for Alpha, the opacity channel (used for transparency effects,
+but higher values are more opaque, so it makes sense to call it 
+opacity).
+
+A note on formats
+-----------------
+
+When getting pixel data out of this module (reading) and presenting
+data to this module (writing) there are a number of ways the data could
+be represented as a Python value.  Generally this module uses one of
+three formats called "flat row flat pixel", "boxed row flat pixel", and
+"boxed row boxed pixel".  Basically the concern is whether each pixel
+and each row comes in its own little tuple (box), or not.
+
+Consider an image that is 3 pixels wide by 2 pixels high, and each pixel
+has RGB components:
+
+Boxed row flat pixel::
+
+  list([R,G,B, R,G,B, R,G,B],
+       [R,G,B, R,G,B, R,G,B])
+
+Each row appears as its own list, but the pixels are flattened so
+that three values for one pixel simply follow the three values for
+the previous pixel.  This is the most common format used, because it
+provides a good compromise between space and convenience.  PyPNG regards
+itself as at liberty to replace any sequence type with any sufficiently
+compatible other sequence type; in practice each row is an array (from
+the array module), and the outer list is sometimes an iterator rather
+than an explicit list (so that streaming is possible).
+
+Flat row flat pixel::
+
+  [R,G,B, R,G,B, R,G,B,
+   R,G,B, R,G,B, R,G,B]
+
+The entire image is one single giant sequence of colour values.
+Generally an array will be used (to save space), not a list.
+
+Boxed row boxed pixel::
+
+  list([ (R,G,B), (R,G,B), (R,G,B) ],
+       [ (R,G,B), (R,G,B), (R,G,B) ])
+
+Each row appears in its own list, but each pixel also appears in its own
+tuple.  A serious memory burn in Python.
+
+In all cases the top row comes first, and for each row the pixels are
+ordered from left-to-right.  Within a pixel the values appear in the
+order, R-G-B-A (or L-A for greyscale--alpha).
+
+There is a fourth format, mentioned because it is used internally,
+is close to what lies inside a PNG file itself, and has some support
+from the public API.  This format is called packed.  When packed,
+each row is a sequence of bytes (integers from 0 to 255), just as
+it is before PNG scanline filtering is applied.  When the bit depth
+is 8 this is essentially the same as boxed row flat pixel; when the
+bit depth is less than 8, several pixels are packed into each byte;
+when the bit depth is 16 (the only value more than 8 that is supported
+by the PNG image format) each pixel value is decomposed into 2 bytes
+(and `packed` is a misnomer).  This format is used by the
+:meth:`Writer.write_packed` method.  It isn't usually a convenient
+format, but may be just right if the source data for the PNG image
+comes from something that uses a similar format (for example, 1-bit
+BMPs, or another PNG file).
+
+And now, my famous members
+--------------------------
+"""
+
+__version__ = "0.0.18"
+
+import itertools
+import math
+# http://www.python.org/doc/2.4.4/lib/module-operator.html
+import operator
+import struct
+import sys
+# http://www.python.org/doc/2.4.4/lib/module-warnings.html
+import warnings
+import zlib
+
+from array import array
+from functools import reduce
+
+try:
+    # `cpngfilters` is a Cython module: it must be compiled by
+    # Cython for this import to work.
+    # If this import does work, then it overrides pure-python
+    # filtering functions defined later in this file (see `class
+    # pngfilters`).
+    import cpngfilters as pngfilters
+except ImportError:
+    pass
+
+
+__all__ = ['Image', 'Reader', 'Writer', 'write_chunks', 'from_array']
+
+
+# The PNG signature.
+# http://www.w3.org/TR/PNG/#5PNG-file-signature
+_signature = struct.pack('8B', 137, 80, 78, 71, 13, 10, 26, 10)
+
+_adam7 = ((0, 0, 8, 8),
+          (4, 0, 8, 8),
+          (0, 4, 4, 8),
+          (2, 0, 4, 4),
+          (0, 2, 2, 4),
+          (1, 0, 2, 2),
+          (0, 1, 1, 2))
+
+def group(s, n):
+    # See http://www.python.org/doc/2.6/library/functions.html#zip
+    return list(zip(*[iter(s)]*n))
+
+def isarray(x):
+    return isinstance(x, array)
+
+def tostring(row):
+    return row.tostring()
+
+def interleave_planes(ipixels, apixels, ipsize, apsize):
+    """
+    Interleave (colour) planes, e.g. RGB + A = RGBA.
+
+    Return an array of pixels consisting of the `ipsize` elements of
+    data from each pixel in `ipixels` followed by the `apsize` elements
+    of data from each pixel in `apixels`.  Conventionally `ipixels`
+    and `apixels` are byte arrays so the sizes are bytes, but it
+    actually works with any arrays of the same type.  The returned
+    array is the same type as the input arrays which should be the
+    same type as each other.
+    """
+
+    itotal = len(ipixels)
+    atotal = len(apixels)
+    newtotal = itotal + atotal
+    newpsize = ipsize + apsize
+    # Set up the output buffer
+    # See http://www.python.org/doc/2.4.4/lib/module-array.html#l2h-1356
+    out = array(ipixels.typecode)
+    # It's annoying that there is no cheap way to set the array size :-(
+    out.extend(ipixels)
+    out.extend(apixels)
+    # Interleave in the pixel data
+    for i in range(ipsize):
+        out[i:newtotal:newpsize] = ipixels[i:itotal:ipsize]
+    for i in range(apsize):
+        out[i+ipsize:newtotal:newpsize] = apixels[i:atotal:apsize]
+    return out
+
+def check_palette(palette):
+    """Check a palette argument (to the :class:`Writer` class)
+    for validity.  Returns the palette as a list if okay; raises an
+    exception otherwise.
+    """
+
+    # None is the default and is allowed.
+    if palette is None:
+        return None
+
+    p = list(palette)
+    if not (0 < len(p) <= 256):
+        raise ValueError("a palette must have between 1 and 256 entries")
+    seen_triple = False
+    for i,t in enumerate(p):
+        if len(t) not in (3,4):
+            raise ValueError(
+              "palette entry %d: entries must be 3- or 4-tuples." % i)
+        if len(t) == 3:
+            seen_triple = True
+        if seen_triple and len(t) == 4:
+            raise ValueError(
+              "palette entry %d: all 4-tuples must precede all 3-tuples" % i)
+        for x in t:
+            if int(x) != x or not(0 <= x <= 255):
+                raise ValueError(
+                  "palette entry %d: values must be integer: 0 <= x <= 255" % i)
+    return p
+
+def check_sizes(size, width, height):
+    """Check that these arguments, in supplied, are consistent.
+    Return a (width, height) pair.
+    """
+
+    if not size:
+        return width, height
+
+    if len(size) != 2:
+        raise ValueError(
+          "size argument should be a pair (width, height)")
+    if width is not None and width != size[0]:
+        raise ValueError(
+          "size[0] (%r) and width (%r) should match when both are used."
+            % (size[0], width))
+    if height is not None and height != size[1]:
+        raise ValueError(
+          "size[1] (%r) and height (%r) should match when both are used."
+            % (size[1], height))
+    return size
+
+def check_color(c, greyscale, which):
+    """Checks that a colour argument for transparent or
+    background options is the right form.  Returns the colour
+    (which, if it's a bar integer, is "corrected" to a 1-tuple).
+    """
+
+    if c is None:
+        return c
+    if greyscale:
+        try:
+            len(c)
+        except TypeError:
+            c = (c,)
+        if len(c) != 1:
+            raise ValueError("%s for greyscale must be 1-tuple" %
+                which)
+        if not isinteger(c[0]):
+            raise ValueError(
+                "%s colour for greyscale must be integer" % which)
+    else:
+        if not (len(c) == 3 and
+                isinteger(c[0]) and
+                isinteger(c[1]) and
+                isinteger(c[2])):
+            raise ValueError(
+                "%s colour must be a triple of integers" % which)
+    return c
+
+class Error(Exception):
+    def __str__(self):
+        return self.__class__.__name__ + ': ' + ' '.join(self.args)
+
+class FormatError(Error):
+    """Problem with input file format.  In other words, PNG file does
+    not conform to the specification in some way and is invalid.
+    """
+
+class ChunkError(FormatError):
+    pass
+
+
+class Writer:
+    """
+    PNG encoder in pure Python.
+    """
+
+    def __init__(self, width=None, height=None,
+                 size=None,
+                 greyscale=False,
+                 alpha=False,
+                 bitdepth=8,
+                 palette=None,
+                 transparent=None,
+                 background=None,
+                 gamma=None,
+                 compression=None,
+                 interlace=False,
+                 bytes_per_sample=None, # deprecated
+                 planes=None,
+                 colormap=None,
+                 maxval=None,
+                 chunk_limit=2**20,
+                 x_pixels_per_unit = None,
+                 y_pixels_per_unit = None,
+                 unit_is_meter = False):
+        """
+        Create a PNG encoder object.
+
+        Arguments:
+
+        width, height
+          Image size in pixels, as two separate arguments.
+        size
+          Image size (w,h) in pixels, as single argument.
+        greyscale
+          Input data is greyscale, not RGB.
+        alpha
+          Input data has alpha channel (RGBA or LA).
+        bitdepth
+          Bit depth: from 1 to 16.
+        palette
+          Create a palette for a colour mapped image (colour type 3).
+        transparent
+          Specify a transparent colour (create a ``tRNS`` chunk).
+        background
+          Specify a default background colour (create a ``bKGD`` chunk).
+        gamma
+          Specify a gamma value (create a ``gAMA`` chunk).
+        compression
+          zlib compression level: 0 (none) to 9 (more compressed);
+          default: -1 or None.
+        interlace
+          Create an interlaced image.
+        chunk_limit
+          Write multiple ``IDAT`` chunks to save memory.
+        x_pixels_per_unit
+          Number of pixels a unit along the x axis (write a
+          `pHYs` chunk).
+        y_pixels_per_unit
+          Number of pixels a unit along the y axis (write a
+          `pHYs` chunk). Along with `x_pixel_unit`, this gives
+          the pixel size ratio.
+        unit_is_meter
+          `True` to indicate that the unit (for the `pHYs`
+          chunk) is metre.
+
+        The image size (in pixels) can be specified either by using the
+        `width` and `height` arguments, or with the single `size`
+        argument.  If `size` is used it should be a pair (*width*,
+        *height*).
+
+        `greyscale` and `alpha` are booleans that specify whether
+        an image is greyscale (or colour), and whether it has an
+        alpha channel (or not).
+
+        `bitdepth` specifies the bit depth of the source pixel values.
+        Each source pixel value must be an integer between 0 and
+        ``2**bitdepth-1``.  For example, 8-bit images have values
+        between 0 and 255.  PNG only stores images with bit depths of
+        1,2,4,8, or 16.  When `bitdepth` is not one of these values,
+        the next highest valid bit depth is selected, and an ``sBIT``
+        (significant bits) chunk is generated that specifies the
+        original precision of the source image.  In this case the
+        supplied pixel values will be rescaled to fit the range of
+        the selected bit depth.
+
+        The details of which bit depth / colour model combinations the
+        PNG file format supports directly, are somewhat arcane
+        (refer to the PNG specification for full details).  Briefly:
+        "small" bit depths (1,2,4) are only allowed with greyscale and
+        colour mapped images; colour mapped images cannot have bit depth
+        16.
+
+        For colour mapped images (in other words, when the `palette`
+        argument is specified) the `bitdepth` argument must match one of
+        the valid PNG bit depths: 1, 2, 4, or 8.  (It is valid to have a
+        PNG image with a palette and an ``sBIT`` chunk, but the meaning
+        is slightly different; it would be awkward to press the
+        `bitdepth` argument into service for this.)
+
+        The `palette` option, when specified, causes a colour
+        mapped image to be created: the PNG colour type is set to 3;
+        `greyscale` must not be set; `alpha` must not be set;
+        `transparent` must not be set; the bit depth must be 1,2,4,
+        or 8.  When a colour mapped image is created, the pixel values
+        are palette indexes and the `bitdepth` argument specifies the
+        size of these indexes (not the size of the colour values in
+        the palette).
+
+        The palette argument value should be a sequence of 3- or
+        4-tuples.  3-tuples specify RGB palette entries; 4-tuples
+        specify RGBA palette entries.  If both 4-tuples and 3-tuples
+        appear in the sequence then all the 4-tuples must come
+        before all the 3-tuples.  A ``PLTE`` chunk is created; if there
+        are 4-tuples then a ``tRNS`` chunk is created as well.  The
+        ``PLTE`` chunk will contain all the RGB triples in the same
+        sequence; the ``tRNS`` chunk will contain the alpha channel for
+        all the 4-tuples, in the same sequence.  Palette entries
+        are always 8-bit.
+
+        If specified, the `transparent` and `background` parameters must
+        be a tuple with three integer values for red, green, blue, or
+        a simple integer (or singleton tuple) for a greyscale image.
+
+        If specified, the `gamma` parameter must be a positive number
+        (generally, a `float`).  A ``gAMA`` chunk will be created.
+        Note that this will not change the values of the pixels as
+        they appear in the PNG file, they are assumed to have already
+        been converted appropriately for the gamma specified.
+
+        The `compression` argument specifies the compression level to
+        be used by the ``zlib`` module.  Values from 1 to 9 specify
+        compression, with 9 being "more compressed" (usually smaller
+        and slower, but it doesn't always work out that way).  0 means
+        no compression.  -1 and ``None`` both mean that the default
+        level of compession will be picked by the ``zlib`` module
+        (which is generally acceptable).
+
+        If `interlace` is true then an interlaced image is created
+        (using PNG's so far only interace method, *Adam7*).  This does
+        not affect how the pixels should be presented to the encoder,
+        rather it changes how they are arranged into the PNG file.
+        On slow connexions interlaced images can be partially decoded
+        by the browser to give a rough view of the image that is
+        successively refined as more image data appears.
+
+        .. note ::
+
+          Enabling the `interlace` option requires the entire image
+          to be processed in working memory.
+
+        `chunk_limit` is used to limit the amount of memory used whilst
+        compressing the image.  In order to avoid using large amounts of
+        memory, multiple ``IDAT`` chunks may be created.
+        """
+
+        # At the moment the `planes` argument is ignored;
+        # its purpose is to act as a dummy so that
+        # ``Writer(x, y, **info)`` works, where `info` is a dictionary
+        # returned by Reader.read and friends.
+        # Ditto for `colormap`.
+
+        width, height = check_sizes(size, width, height)
+        del size
+
+        if width <= 0 or height <= 0:
+            raise ValueError("width and height must be greater than zero")
+        if not isinteger(width) or not isinteger(height):
+            raise ValueError("width and height must be integers")
+        # http://www.w3.org/TR/PNG/#7Integers-and-byte-order
+        if width > 2**32-1 or height > 2**32-1:
+            raise ValueError("width and height cannot exceed 2**32-1")
+
+        if alpha and transparent is not None:
+            raise ValueError(
+                "transparent colour not allowed with alpha channel")
+
+        if bytes_per_sample is not None:
+            warnings.warn('please use bitdepth instead of bytes_per_sample',
+                          DeprecationWarning)
+            if bytes_per_sample not in (0.125, 0.25, 0.5, 1, 2):
+                raise ValueError(
+                    "bytes per sample must be .125, .25, .5, 1, or 2")
+            bitdepth = int(8*bytes_per_sample)
+        del bytes_per_sample
+        if not isinteger(bitdepth) or bitdepth < 1 or 16 < bitdepth:
+            raise ValueError("bitdepth (%r) must be a positive integer <= 16" %
+              bitdepth)
+
+        self.rescale = None
+        palette = check_palette(palette)
+        if palette:
+            if bitdepth not in (1,2,4,8):
+                raise ValueError("with palette, bitdepth must be 1, 2, 4, or 8")
+            if transparent is not None:
+                raise ValueError("transparent and palette not compatible")
+            if alpha:
+                raise ValueError("alpha and palette not compatible")
+            if greyscale:
+                raise ValueError("greyscale and palette not compatible")
+        else:
+            # No palette, check for sBIT chunk generation.
+            if alpha or not greyscale:
+                if bitdepth not in (8,16):
+                    targetbitdepth = (8,16)[bitdepth > 8]
+                    self.rescale = (bitdepth, targetbitdepth)
+                    bitdepth = targetbitdepth
+                    del targetbitdepth
+            else:
+                assert greyscale
+                assert not alpha
+                if bitdepth not in (1,2,4,8,16):
+                    if bitdepth > 8:
+                        targetbitdepth = 16
+                    elif bitdepth == 3:
+                        targetbitdepth = 4
+                    else:
+                        assert bitdepth in (5,6,7)
+                        targetbitdepth = 8
+                    self.rescale = (bitdepth, targetbitdepth)
+                    bitdepth = targetbitdepth
+                    del targetbitdepth
+
+        if bitdepth < 8 and (alpha or not greyscale and not palette):
+            raise ValueError(
+              "bitdepth < 8 only permitted with greyscale or palette")
+        if bitdepth > 8 and palette:
+            raise ValueError(
+                "bit depth must be 8 or less for images with palette")
+
+        transparent = check_color(transparent, greyscale, 'transparent')
+        background = check_color(background, greyscale, 'background')
+
+        # It's important that the true boolean values (greyscale, alpha,
+        # colormap, interlace) are converted to bool because Iverson's
+        # convention is relied upon later on.
+        self.width = width
+        self.height = height
+        self.transparent = transparent
+        self.background = background
+        self.gamma = gamma
+        self.greyscale = bool(greyscale)
+        self.alpha = bool(alpha)
+        self.colormap = bool(palette)
+        self.bitdepth = int(bitdepth)
+        self.compression = compression
+        self.chunk_limit = chunk_limit
+        self.interlace = bool(interlace)
+        self.palette = palette
+        self.x_pixels_per_unit = x_pixels_per_unit
+        self.y_pixels_per_unit = y_pixels_per_unit
+        self.unit_is_meter = bool(unit_is_meter)
+
+        self.color_type = 4*self.alpha + 2*(not greyscale) + 1*self.colormap
+        assert self.color_type in (0,2,3,4,6)
+
+        self.color_planes = (3,1)[self.greyscale or self.colormap]
+        self.planes = self.color_planes + self.alpha
+        # :todo: fix for bitdepth < 8
+        self.psize = (self.bitdepth/8) * self.planes
+
+    def make_palette(self):
+        """Create the byte sequences for a ``PLTE`` and if necessary a
+        ``tRNS`` chunk.  Returned as a pair (*p*, *t*).  *t* will be
+        ``None`` if no ``tRNS`` chunk is necessary.
+        """
+
+        p = array('B')
+        t = array('B')
+
+        for x in self.palette:
+            p.extend(x[0:3])
+            if len(x) > 3:
+                t.append(x[3])
+        p = tostring(p)
+        t = tostring(t)
+        if t:
+            return p,t
+        return p,None
+
+    def write(self, outfile, rows):
+        """Write a PNG image to the output file.  `rows` should be
+        an iterable that yields each row in boxed row flat pixel
+        format.  The rows should be the rows of the original image,
+        so there should be ``self.height`` rows of ``self.width *
+        self.planes`` values.  If `interlace` is specified (when
+        creating the instance), then an interlaced PNG file will
+        be written.  Supply the rows in the normal image order;
+        the interlacing is carried out internally.
+
+        .. note ::
+
+          Interlacing will require the entire image to be in working
+          memory.
+        """
+
+        if self.interlace:
+            fmt = 'BH'[self.bitdepth > 8]
+            a = array(fmt, itertools.chain(*rows))
+            return self.write_array(outfile, a)
+
+        nrows = self.write_passes(outfile, rows)
+        if nrows != self.height:
+            raise ValueError(
+              "rows supplied (%d) does not match height (%d)" %
+              (nrows, self.height))
+
+    def write_passes(self, outfile, rows, packed=False):
+        """
+        Write a PNG image to the output file.
+
+        Most users are expected to find the :meth:`write` or
+        :meth:`write_array` method more convenient.
+        
+        The rows should be given to this method in the order that
+        they appear in the output file.  For straightlaced images,
+        this is the usual top to bottom ordering, but for interlaced
+        images the rows should have already been interlaced before
+        passing them to this function.
+
+        `rows` should be an iterable that yields each row.  When
+        `packed` is ``False`` the rows should be in boxed row flat pixel
+        format; when `packed` is ``True`` each row should be a packed
+        sequence of bytes.
+        """
+
+        # http://www.w3.org/TR/PNG/#5PNG-file-signature
+        outfile.write(_signature)
+
+        # http://www.w3.org/TR/PNG/#11IHDR
+        write_chunk(outfile, b'IHDR',
+                    struct.pack("!2I5B", self.width, self.height,
+                                self.bitdepth, self.color_type,
+                                0, 0, self.interlace))
+
+        # See :chunk:order
+        # http://www.w3.org/TR/PNG/#11gAMA
+        if self.gamma is not None:
+            write_chunk(outfile, b'gAMA',
+                        struct.pack("!L", int(round(self.gamma*1e5))))
+
+        # See :chunk:order
+        # http://www.w3.org/TR/PNG/#11sBIT
+        if self.rescale:
+            write_chunk(outfile, b'sBIT',
+                struct.pack('%dB' % self.planes,
+                            *[self.rescale[0]]*self.planes))
+        
+        # :chunk:order: Without a palette (PLTE chunk), ordering is
+        # relatively relaxed.  With one, gAMA chunk must precede PLTE
+        # chunk which must precede tRNS and bKGD.
+        # See http://www.w3.org/TR/PNG/#5ChunkOrdering
+        if self.palette:
+            p,t = self.make_palette()
+            write_chunk(outfile, b'PLTE', p)
+            if t:
+                # tRNS chunk is optional. Only needed if palette entries
+                # have alpha.
+                write_chunk(outfile, b'tRNS', t)
+
+        # http://www.w3.org/TR/PNG/#11tRNS
+        if self.transparent is not None:
+            if self.greyscale:
+                write_chunk(outfile, b'tRNS',
+                            struct.pack("!1H", *self.transparent))
+            else:
+                write_chunk(outfile, b'tRNS',
+                            struct.pack("!3H", *self.transparent))
+
+        # http://www.w3.org/TR/PNG/#11bKGD
+        if self.background is not None:
+            if self.greyscale:
+                write_chunk(outfile, b'bKGD',
+                            struct.pack("!1H", *self.background))
+            else:
+                write_chunk(outfile, b'bKGD',
+                            struct.pack("!3H", *self.background))
+
+        # http://www.w3.org/TR/PNG/#11pHYs
+        if self.x_pixels_per_unit is not None and self.y_pixels_per_unit is not None:
+            tup = (self.x_pixels_per_unit, self.y_pixels_per_unit, int(self.unit_is_meter))
+            write_chunk(outfile, b'pHYs', struct.pack("!LLB",*tup))
+
+        # http://www.w3.org/TR/PNG/#11IDAT
+        if self.compression is not None:
+            compressor = zlib.compressobj(self.compression)
+        else:
+            compressor = zlib.compressobj()
+
+        # Choose an extend function based on the bitdepth.  The extend
+        # function packs/decomposes the pixel values into bytes and
+        # stuffs them onto the data array.
+        data = array('B')
+        if self.bitdepth == 8 or packed:
+            extend = data.extend
+        elif self.bitdepth == 16:
+            # Decompose into bytes
+            def extend(sl):
+                fmt = '!%dH' % len(sl)
+                data.extend(array('B', struct.pack(fmt, *sl)))
+        else:
+            # Pack into bytes
+            assert self.bitdepth < 8
+            # samples per byte
+            spb = int(8/self.bitdepth)
+            def extend(sl):
+                a = array('B', sl)
+                # Adding padding bytes so we can group into a whole
+                # number of spb-tuples.
+                l = float(len(a))
+                extra = math.ceil(l / float(spb))*spb - l
+                a.extend([0]*int(extra))
+                # Pack into bytes
+                l = group(a, spb)
+                l = [reduce(lambda x,y:
+                                           (x << self.bitdepth) + y, e) for e in l]
+                data.extend(l)
+        if self.rescale:
+            oldextend = extend
+            factor = \
+              float(2**self.rescale[1]-1) / float(2**self.rescale[0]-1)
+            def extend(sl):
+                oldextend([int(round(factor*x)) for x in sl])
+
+        # Build the first row, testing mostly to see if we need to
+        # changed the extend function to cope with NumPy integer types
+        # (they cause our ordinary definition of extend to fail, so we
+        # wrap it).  See
+        # http://code.google.com/p/pypng/issues/detail?id=44
+        enumrows = enumerate(rows)
+        del rows
+
+        # First row's filter type.
+        data.append(0)
+        # :todo: Certain exceptions in the call to ``.next()`` or the
+        # following try would indicate no row data supplied.
+        # Should catch.
+        i,row = next(enumrows)
+        try:
+            # If this fails...
+            extend(row)
+        except:
+            # ... try a version that converts the values to int first.
+            # Not only does this work for the (slightly broken) NumPy
+            # types, there are probably lots of other, unknown, "nearly"
+            # int types it works for.
+            def wrapmapint(f):
+                return lambda sl: f([int(x) for x in sl])
+            extend = wrapmapint(extend)
+            del wrapmapint
+            extend(row)
+
+        for i,row in enumrows:
+            # Add "None" filter type.  Currently, it's essential that
+            # this filter type be used for every scanline as we do not
+            # mark the first row of a reduced pass image; that means we
+            # could accidentally compute the wrong filtered scanline if
+            # we used "up", "average", or "paeth" on such a line.
+            data.append(0)
+            extend(row)
+            if len(data) > self.chunk_limit:
+                compressed = compressor.compress(tostring(data))
+                if len(compressed):
+                    write_chunk(outfile, b'IDAT', compressed)
+                # Because of our very witty definition of ``extend``,
+                # above, we must re-use the same ``data`` object.  Hence
+                # we use ``del`` to empty this one, rather than create a
+                # fresh one (which would be my natural FP instinct).
+                del data[:]
+        if len(data):
+            compressed = compressor.compress(tostring(data))
+        else:
+            compressed = b''
+        flushed = compressor.flush()
+        if len(compressed) or len(flushed):
+            write_chunk(outfile, b'IDAT', compressed + flushed)
+        # http://www.w3.org/TR/PNG/#11IEND
+        write_chunk(outfile, b'IEND')
+        return i+1
+
+    def write_array(self, outfile, pixels):
+        """
+        Write an array in flat row flat pixel format as a PNG file on
+        the output file.  See also :meth:`write` method.
+        """
+
+        if self.interlace:
+            self.write_passes(outfile, self.array_scanlines_interlace(pixels))
+        else:
+            self.write_passes(outfile, self.array_scanlines(pixels))
+
+    def write_packed(self, outfile, rows):
+        """
+        Write PNG file to `outfile`.  The pixel data comes from `rows`
+        which should be in boxed row packed format.  Each row should be
+        a sequence of packed bytes.
+
+        Technically, this method does work for interlaced images but it
+        is best avoided.  For interlaced images, the rows should be
+        presented in the order that they appear in the file.
+
+        This method should not be used when the source image bit depth
+        is not one naturally supported by PNG; the bit depth should be
+        1, 2, 4, 8, or 16.
+        """
+
+        if self.rescale:
+            raise Error("write_packed method not suitable for bit depth %d" %
+              self.rescale[0])
+        return self.write_passes(outfile, rows, packed=True)
+
+    def convert_pnm(self, infile, outfile):
+        """
+        Convert a PNM file containing raw pixel data into a PNG file
+        with the parameters set in the writer object.  Works for
+        (binary) PGM, PPM, and PAM formats.
+        """
+
+        if self.interlace:
+            pixels = array('B')
+            pixels.fromfile(infile,
+                            (self.bitdepth/8) * self.color_planes *
+                            self.width * self.height)
+            self.write_passes(outfile, self.array_scanlines_interlace(pixels))
+        else:
+            self.write_passes(outfile, self.file_scanlines(infile))
+
+    def convert_ppm_and_pgm(self, ppmfile, pgmfile, outfile):
+        """
+        Convert a PPM and PGM file containing raw pixel data into a
+        PNG outfile with the parameters set in the writer object.
+        """
+        pixels = array('B')
+        pixels.fromfile(ppmfile,
+                        (self.bitdepth/8) * self.color_planes *
+                        self.width * self.height)
+        apixels = array('B')
+        apixels.fromfile(pgmfile,
+                         (self.bitdepth/8) *
+                         self.width * self.height)
+        pixels = interleave_planes(pixels, apixels,
+                                   (self.bitdepth/8) * self.color_planes,
+                                   (self.bitdepth/8))
+        if self.interlace:
+            self.write_passes(outfile, self.array_scanlines_interlace(pixels))
+        else:
+            self.write_passes(outfile, self.array_scanlines(pixels))
+
+    def file_scanlines(self, infile):
+        """
+        Generates boxed rows in flat pixel format, from the input file
+        `infile`.  It assumes that the input file is in a "Netpbm-like"
+        binary format, and is positioned at the beginning of the first
+        pixel.  The number of pixels to read is taken from the image
+        dimensions (`width`, `height`, `planes`) and the number of bytes
+        per value is implied by the image `bitdepth`.
+        """
+
+        # Values per row
+        vpr = self.width * self.planes
+        row_bytes = vpr
+        if self.bitdepth > 8:
+            assert self.bitdepth == 16
+            row_bytes *= 2
+            fmt = '>%dH' % vpr
+            def line():
+                return array('H', struct.unpack(fmt, infile.read(row_bytes)))
+        else:
+            def line():
+                scanline = array('B', infile.read(row_bytes))
+                return scanline
+        for y in range(self.height):
+            yield line()
+
+    def array_scanlines(self, pixels):
+        """
+        Generates boxed rows (flat pixels) from flat rows (flat pixels)
+        in an array.
+        """
+
+        # Values per row
+        vpr = self.width * self.planes
+        stop = 0
+        for y in range(self.height):
+            start = stop
+            stop = start + vpr
+            yield pixels[start:stop]
+
+    def array_scanlines_interlace(self, pixels):
+        """
+        Generator for interlaced scanlines from an array.  `pixels` is
+        the full source image in flat row flat pixel format.  The
+        generator yields each scanline of the reduced passes in turn, in
+        boxed row flat pixel format.
+        """
+
+        # http://www.w3.org/TR/PNG/#8InterlaceMethods
+        # Array type.
+        fmt = 'BH'[self.bitdepth > 8]
+        # Value per row
+        vpr = self.width * self.planes
+        for xstart, ystart, xstep, ystep in _adam7:
+            if xstart >= self.width:
+                continue
+            # Pixels per row (of reduced image)
+            ppr = int(math.ceil((self.width-xstart)/float(xstep)))
+            # number of values in reduced image row.
+            row_len = ppr*self.planes
+            for y in range(ystart, self.height, ystep):
+                if xstep == 1:
+                    offset = y * vpr
+                    yield pixels[offset:offset+vpr]
+                else:
+                    row = array(fmt)
+                    # There's no easier way to set the length of an array
+                    row.extend(pixels[0:row_len])
+                    offset = y * vpr + xstart * self.planes
+                    end_offset = (y+1) * vpr
+                    skip = self.planes * xstep
+                    for i in range(self.planes):
+                        row[i::self.planes] = \
+                            pixels[offset+i:end_offset:skip]
+                    yield row
+
+def write_chunk(outfile, tag, data=b''):
+    """
+    Write a PNG chunk to the output file, including length and
+    checksum.
+    """
+
+    # http://www.w3.org/TR/PNG/#5Chunk-layout
+    outfile.write(struct.pack("!I", len(data)))
+    outfile.write(tag)
+    outfile.write(data)
+    checksum = zlib.crc32(tag)
+    checksum = zlib.crc32(data, checksum)
+    checksum &= 2**32-1
+    outfile.write(struct.pack("!I", checksum))
+
+def write_chunks(out, chunks):
+    """Create a PNG file by writing out the chunks."""
+
+    out.write(_signature)
+    for chunk in chunks:
+        write_chunk(out, *chunk)
+
+def filter_scanline(type, line, fo, prev=None):
+    """Apply a scanline filter to a scanline.  `type` specifies the
+    filter type (0 to 4); `line` specifies the current (unfiltered)
+    scanline as a sequence of bytes; `prev` specifies the previous
+    (unfiltered) scanline as a sequence of bytes. `fo` specifies the
+    filter offset; normally this is size of a pixel in bytes (the number
+    of bytes per sample times the number of channels), but when this is
+    < 1 (for bit depths < 8) then the filter offset is 1.
+    """
+
+    assert 0 <= type < 5
+
+    # The output array.  Which, pathetically, we extend one-byte at a
+    # time (fortunately this is linear).
+    out = array('B', [type])
+
+    def sub():
+        ai = -fo
+        for x in line:
+            if ai >= 0:
+                x = (x - line[ai]) & 0xff
+            out.append(x)
+            ai += 1
+    def up():
+        for i,x in enumerate(line):
+            x = (x - prev[i]) & 0xff
+            out.append(x)
+    def average():
+        ai = -fo
+        for i,x in enumerate(line):
+            if ai >= 0:
+                x = (x - ((line[ai] + prev[i]) >> 1)) & 0xff
+            else:
+                x = (x - (prev[i] >> 1)) & 0xff
+            out.append(x)
+            ai += 1
+    def paeth():
+        # http://www.w3.org/TR/PNG/#9Filter-type-4-Paeth
+        ai = -fo # also used for ci
+        for i,x in enumerate(line):
+            a = 0
+            b = prev[i]
+            c = 0
+
+            if ai >= 0:
+                a = line[ai]
+                c = prev[ai]
+            p = a + b - c
+            pa = abs(p - a)
+            pb = abs(p - b)
+            pc = abs(p - c)
+            if pa <= pb and pa <= pc:
+                Pr = a
+            elif pb <= pc:
+                Pr = b
+            else:
+                Pr = c
+
+            x = (x - Pr) & 0xff
+            out.append(x)
+            ai += 1
+
+    if not prev:
+        # We're on the first line.  Some of the filters can be reduced
+        # to simpler cases which makes handling the line "off the top"
+        # of the image simpler.  "up" becomes "none"; "paeth" becomes
+        # "left" (non-trivial, but true). "average" needs to be handled
+        # specially.
+        if type == 2: # "up"
+            type = 0
+        elif type == 3:
+            prev = [0]*len(line)
+        elif type == 4: # "paeth"
+            type = 1
+    if type == 0:
+        out.extend(line)
+    elif type == 1:
+        sub()
+    elif type == 2:
+        up()
+    elif type == 3:
+        average()
+    else: # type == 4
+        paeth()
+    return out
+
+
+def from_array(a, mode=None, info={}):
+    """Create a PNG :class:`Image` object from a 2- or 3-dimensional
+    array.  One application of this function is easy PIL-style saving:
+    ``png.from_array(pixels, 'L').save('foo.png')``.
+
+    Unless they are specified using the *info* parameter, the PNG's
+    height and width are taken from the array size.  For a 3 dimensional
+    array the first axis is the height; the second axis is the width;
+    and the third axis is the channel number.  Thus an RGB image that is
+    16 pixels high and 8 wide will use an array that is 16x8x3.  For 2
+    dimensional arrays the first axis is the height, but the second axis
+    is ``width*channels``, so an RGB image that is 16 pixels high and 8
+    wide will use a 2-dimensional array that is 16x24 (each row will be
+    8*3 = 24 sample values).
+
+    *mode* is a string that specifies the image colour format in a
+    PIL-style mode.  It can be:
+
+    ``'L'``
+      greyscale (1 channel)
+    ``'LA'``
+      greyscale with alpha (2 channel)
+    ``'RGB'``
+      colour image (3 channel)
+    ``'RGBA'``
+      colour image with alpha (4 channel)
+
+    The mode string can also specify the bit depth (overriding how this
+    function normally derives the bit depth, see below).  Appending
+    ``';16'`` to the mode will cause the PNG to be 16 bits per channel;
+    any decimal from 1 to 16 can be used to specify the bit depth.
+
+    When a 2-dimensional array is used *mode* determines how many
+    channels the image has, and so allows the width to be derived from
+    the second array dimension.
+
+    The array is expected to be a ``numpy`` array, but it can be any
+    suitable Python sequence.  For example, a list of lists can be used:
+    ``png.from_array([[0, 255, 0], [255, 0, 255]], 'L')``.  The exact
+    rules are: ``len(a)`` gives the first dimension, height;
+    ``len(a[0])`` gives the second dimension; ``len(a[0][0])`` gives the
+    third dimension, unless an exception is raised in which case a
+    2-dimensional array is assumed.  It's slightly more complicated than
+    that because an iterator of rows can be used, and it all still
+    works.  Using an iterator allows data to be streamed efficiently.
+
+    The bit depth of the PNG is normally taken from the array element's
+    datatype (but if *mode* specifies a bitdepth then that is used
+    instead).  The array element's datatype is determined in a way which
+    is supposed to work both for ``numpy`` arrays and for Python
+    ``array.array`` objects.  A 1 byte datatype will give a bit depth of
+    8, a 2 byte datatype will give a bit depth of 16.  If the datatype
+    does not have an implicit size, for example it is a plain Python
+    list of lists, as above, then a default of 8 is used.
+
+    The *info* parameter is a dictionary that can be used to specify
+    metadata (in the same style as the arguments to the
+    :class:`png.Writer` class).  For this function the keys that are
+    useful are:
+    
+    height
+      overrides the height derived from the array dimensions and allows
+      *a* to be an iterable.
+    width
+      overrides the width derived from the array dimensions.
+    bitdepth
+      overrides the bit depth derived from the element datatype (but
+      must match *mode* if that also specifies a bit depth).
+
+    Generally anything specified in the
+    *info* dictionary will override any implicit choices that this
+    function would otherwise make, but must match any explicit ones.
+    For example, if the *info* dictionary has a ``greyscale`` key then
+    this must be true when mode is ``'L'`` or ``'LA'`` and false when
+    mode is ``'RGB'`` or ``'RGBA'``.
+    """
+
+    # We abuse the *info* parameter by modifying it.  Take a copy here.
+    # (Also typechecks *info* to some extent).
+    info = dict(info)
+
+    # Syntax check mode string.
+    bitdepth = None
+    try:
+        # Assign the 'L' or 'RGBA' part to `gotmode`.
+        if mode.startswith('L'):
+            gotmode = 'L'
+            mode = mode[1:]
+        elif mode.startswith('RGB'):
+            gotmode = 'RGB'
+            mode = mode[3:]
+        else:
+            raise Error()
+        if mode.startswith('A'):
+            gotmode += 'A'
+            mode = mode[1:]
+
+        # Skip any optional ';'
+        while mode.startswith(';'):
+            mode = mode[1:]
+
+        # Parse optional bitdepth
+        if mode:
+            try:
+                bitdepth = int(mode)
+            except (TypeError, ValueError):
+                raise Error()
+    except Error:
+        raise Error("mode string should be 'RGB' or 'L;16' or similar.")
+    mode = gotmode
+
+    # Get bitdepth from *mode* if possible.
+    if bitdepth:
+        if info.get('bitdepth') and bitdepth != info['bitdepth']:
+            raise Error("mode bitdepth (%d) should match info bitdepth (%d)." %
+              (bitdepth, info['bitdepth']))
+        info['bitdepth'] = bitdepth
+
+    # Fill in and/or check entries in *info*.
+    # Dimensions.
+    if 'size' in info:
+        # Check width, height, size all match where used.
+        for dimension,axis in [('width', 0), ('height', 1)]:
+            if dimension in info:
+                if info[dimension] != info['size'][axis]:
+                    raise Error(
+                      "info[%r] should match info['size'][%r]." %
+                      (dimension, axis))
+        info['width'],info['height'] = info['size']
+    if 'height' not in info:
+        try:
+            l = len(a)
+        except TypeError:
+            raise Error(
+              "len(a) does not work, supply info['height'] instead.")
+        info['height'] = l
+    # Colour format.
+    if 'greyscale' in info:
+        if bool(info['greyscale']) != ('L' in mode):
+            raise Error("info['greyscale'] should match mode.")
+    info['greyscale'] = 'L' in mode
+    if 'alpha' in info:
+        if bool(info['alpha']) != ('A' in mode):
+            raise Error("info['alpha'] should match mode.")
+    info['alpha'] = 'A' in mode
+
+    planes = len(mode)
+    if 'planes' in info:
+        if info['planes'] != planes:
+            raise Error("info['planes'] should match mode.")
+
+    # In order to work out whether we the array is 2D or 3D we need its
+    # first row, which requires that we take a copy of its iterator.
+    # We may also need the first row to derive width and bitdepth.
+    a,t = itertools.tee(a)
+    row = next(t)
+    del t
+    try:
+        row[0][0]
+        threed = True
+        testelement = row[0]
+    except (IndexError, TypeError):
+        threed = False
+        testelement = row
+    if 'width' not in info:
+        if threed:
+            width = len(row)
+        else:
+            width = len(row) // planes
+        info['width'] = width
+
+    if threed:
+        # Flatten the threed rows
+        a = (itertools.chain.from_iterable(x) for x in a)
+
+    if 'bitdepth' not in info:
+        try:
+            dtype = testelement.dtype
+            # goto the "else:" clause.  Sorry.
+        except AttributeError:
+            try:
+                # Try a Python array.array.
+                bitdepth = 8 * testelement.itemsize
+            except AttributeError:
+                # We can't determine it from the array element's
+                # datatype, use a default of 8.
+                bitdepth = 8
+        else:
+            # If we got here without exception, we now assume that
+            # the array is a numpy array.
+            if dtype.kind == 'b':
+                bitdepth = 1
+            else:
+                bitdepth = 8 * dtype.itemsize
+        info['bitdepth'] = bitdepth
+
+    for thing in 'width height bitdepth greyscale alpha'.split():
+        assert thing in info
+    return Image(a, info)
+
+# So that refugee's from PIL feel more at home.  Not documented.
+fromarray = from_array
+
+class Image:
+    """A PNG image.  You can create an :class:`Image` object from
+    an array of pixels by calling :meth:`png.from_array`.  It can be
+    saved to disk with the :meth:`save` method.
+    """
+
+    def __init__(self, rows, info):
+        """
+        .. note ::
+        
+          The constructor is not public.  Please do not call it.
+        """
+        
+        self.rows = rows
+        self.info = info
+
+    def save(self, file):
+        """Save the image to *file*.  If *file* looks like an open file
+        descriptor then it is used, otherwise it is treated as a
+        filename and a fresh file is opened.
+
+        In general, you can only call this method once; after it has
+        been called the first time and the PNG image has been saved, the
+        source data will have been streamed, and cannot be streamed
+        again.
+        """
+
+        w = Writer(**self.info)
+
+        try:
+            file.write
+            def close(): pass
+        except AttributeError:
+            file = open(file, 'wb')
+            def close(): file.close()
+
+        try:
+            w.write(file, self.rows)
+        finally:
+            close()
+
+class _readable:
+    """
+    A simple file-like interface for strings and arrays.
+    """
+
+    def __init__(self, buf):
+        self.buf = buf
+        self.offset = 0
+
+    def read(self, n):
+        r = self.buf[self.offset:self.offset+n]
+        if isarray(r):
+            r = r.tostring()
+        self.offset += n
+        return r
+
+try:
+    str(b'dummy', 'ascii')
+except TypeError:
+    as_str = str
+else:
+    def as_str(x):
+        return str(x, 'ascii')
+
+class Reader:
+    """
+    PNG decoder in pure Python.
+    """
+
+    def __init__(self, _guess=None, **kw):
+        """
+        Create a PNG decoder object.
+
+        The constructor expects exactly one keyword argument. If you
+        supply a positional argument instead, it will guess the input
+        type. You can choose among the following keyword arguments:
+
+        filename
+          Name of input file (a PNG file).
+        file
+          A file-like object (object with a read() method).
+        bytes
+          ``array`` or ``string`` with PNG data.
+
+        """
+        if ((_guess is not None and len(kw) != 0) or
+            (_guess is None and len(kw) != 1)):
+            raise TypeError("Reader() takes exactly 1 argument")
+
+        # Will be the first 8 bytes, later on.  See validate_signature.
+        self.signature = None
+        self.transparent = None
+        # A pair of (len,type) if a chunk has been read but its data and
+        # checksum have not (in other words the file position is just
+        # past the 4 bytes that specify the chunk type).  See preamble
+        # method for how this is used.
+        self.atchunk = None
+
+        if _guess is not None:
+            if isarray(_guess):
+                kw["bytes"] = _guess
+            elif isinstance(_guess, str):
+                kw["filename"] = _guess
+            elif hasattr(_guess, 'read'):
+                kw["file"] = _guess
+
+        if "filename" in kw:
+            self.file = open(kw["filename"], "rb")
+        elif "file" in kw:
+            self.file = kw["file"]
+        elif "bytes" in kw:
+            self.file = _readable(kw["bytes"])
+        else:
+            raise TypeError("expecting filename, file or bytes array")
+
+
+    def chunk(self, seek=None, lenient=False):
+        """
+        Read the next PNG chunk from the input file; returns a
+        (*type*, *data*) tuple.  *type* is the chunk's type as a
+        byte string (all PNG chunk types are 4 bytes long).
+        *data* is the chunk's data content, as a byte string.
+
+        If the optional `seek` argument is
+        specified then it will keep reading chunks until it either runs
+        out of file or finds the type specified by the argument.  Note
+        that in general the order of chunks in PNGs is unspecified, so
+        using `seek` can cause you to miss chunks.
+
+        If the optional `lenient` argument evaluates to `True`,
+        checksum failures will raise warnings rather than exceptions.
+        """
+
+        self.validate_signature()
+
+        while True:
+            # http://www.w3.org/TR/PNG/#5Chunk-layout
+            if not self.atchunk:
+                self.atchunk = self.chunklentype()
+            length, type = self.atchunk
+            self.atchunk = None
+            data = self.file.read(length)
+            if len(data) != length:
+                raise ChunkError('Chunk %s too short for required %i octets.'
+                  % (type, length))
+            checksum = self.file.read(4)
+            if len(checksum) != 4:
+                raise ChunkError('Chunk %s too short for checksum.' % type)
+            if seek and type != seek:
+                continue
+            verify = zlib.crc32(type)
+            verify = zlib.crc32(data, verify)
+            # Whether the output from zlib.crc32 is signed or not varies
+            # according to hideous implementation details, see
+            # http://bugs.python.org/issue1202 .
+            # We coerce it to be positive here (in a way which works on
+            # Python 2.3 and older).
+            verify &= 2**32 - 1
+            verify = struct.pack('!I', verify)
+            if checksum != verify:
+                (a, ) = struct.unpack('!I', checksum)
+                (b, ) = struct.unpack('!I', verify)
+                message = "Checksum error in %s chunk: 0x%08X != 0x%08X." % (type, a, b)
+                if lenient:
+                    warnings.warn(message, RuntimeWarning)
+                else:
+                    raise ChunkError(message)
+            return type, data
+
+    def chunks(self):
+        """Return an iterator that will yield each chunk as a
+        (*chunktype*, *content*) pair.
+        """
+
+        while True:
+            t,v = self.chunk()
+            yield t,v
+            if t == b'IEND':
+                break
+
+    def undo_filter(self, filter_type, scanline, previous):
+        """Undo the filter for a scanline.  `scanline` is a sequence of
+        bytes that does not include the initial filter type byte.
+        `previous` is decoded previous scanline (for straightlaced
+        images this is the previous pixel row, but for interlaced
+        images, it is the previous scanline in the reduced image, which
+        in general is not the previous pixel row in the final image).
+        When there is no previous scanline (the first row of a
+        straightlaced image, or the first row in one of the passes in an
+        interlaced image), then this argument should be ``None``.
+
+        The scanline will have the effects of filtering removed, and the
+        result will be returned as a fresh sequence of bytes.
+        """
+
+        # :todo: Would it be better to update scanline in place?
+        # Yes, with the Cython extension making the undo_filter fast,
+        # updating scanline inplace makes the code 3 times faster
+        # (reading 50 images of 800x800 went from 40s to 16s)
+        result = scanline
+
+        if filter_type == 0:
+            return result
+
+        if filter_type not in (1,2,3,4):
+            raise FormatError('Invalid PNG Filter Type.'
+              '  See http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters .')
+
+        # Filter unit.  The stride from one pixel to the corresponding
+        # byte from the previous pixel.  Normally this is the pixel
+        # size in bytes, but when this is smaller than 1, the previous
+        # byte is used instead.
+        fu = max(1, self.psize)
+
+        # For the first line of a pass, synthesize a dummy previous
+        # line.  An alternative approach would be to observe that on the
+        # first line 'up' is the same as 'null', 'paeth' is the same
+        # as 'sub', with only 'average' requiring any special case.
+        if not previous:
+            previous = array('B', [0]*len(scanline))
+
+        def sub():
+            """Undo sub filter."""
+
+            ai = 0
+            # Loop starts at index fu.  Observe that the initial part
+            # of the result is already filled in correctly with
+            # scanline.
+            for i in range(fu, len(result)):
+                x = scanline[i]
+                a = result[ai]
+                result[i] = (x + a) & 0xff
+                ai += 1
+
+        def up():
+            """Undo up filter."""
+
+            for i in range(len(result)):
+                x = scanline[i]
+                b = previous[i]
+                result[i] = (x + b) & 0xff
+
+        def average():
+            """Undo average filter."""
+
+            ai = -fu
+            for i in range(len(result)):
+                x = scanline[i]
+                if ai < 0:
+                    a = 0
+                else:
+                    a = result[ai]
+                b = previous[i]
+                result[i] = (x + ((a + b) >> 1)) & 0xff
+                ai += 1
+
+        def paeth():
+            """Undo Paeth filter."""
+
+            # Also used for ci.
+            ai = -fu
+            for i in range(len(result)):
+                x = scanline[i]
+                if ai < 0:
+                    a = c = 0
+                else:
+                    a = result[ai]
+                    c = previous[ai]
+                b = previous[i]
+                p = a + b - c
+                pa = abs(p - a)
+                pb = abs(p - b)
+                pc = abs(p - c)
+                if pa <= pb and pa <= pc:
+                    pr = a
+                elif pb <= pc:
+                    pr = b
+                else:
+                    pr = c
+                result[i] = (x + pr) & 0xff
+                ai += 1
+
+        # Call appropriate filter algorithm.  Note that 0 has already
+        # been dealt with.
+        (None,
+         pngfilters.undo_filter_sub,
+         pngfilters.undo_filter_up,
+         pngfilters.undo_filter_average,
+         pngfilters.undo_filter_paeth)[filter_type](fu, scanline, previous, result)
+        return result
+
+    def deinterlace(self, raw):
+        """
+        Read raw pixel data, undo filters, deinterlace, and flatten.
+        Return in flat row flat pixel format.
+        """
+
+        # Values per row (of the target image)
+        vpr = self.width * self.planes
+
+        # Make a result array, and make it big enough.  Interleaving
+        # writes to the output array randomly (well, not quite), so the
+        # entire output array must be in memory.
+        fmt = 'BH'[self.bitdepth > 8]
+        a = array(fmt, [0]*vpr*self.height)
+        source_offset = 0
+
+        for xstart, ystart, xstep, ystep in _adam7:
+            if xstart >= self.width:
+                continue
+            # The previous (reconstructed) scanline.  None at the
+            # beginning of a pass to indicate that there is no previous
+            # line.
+            recon = None
+            # Pixels per row (reduced pass image)
+            ppr = int(math.ceil((self.width-xstart)/float(xstep)))
+            # Row size in bytes for this pass.
+            row_size = int(math.ceil(self.psize * ppr))
+            for y in range(ystart, self.height, ystep):
+                filter_type = raw[source_offset]
+                source_offset += 1
+                scanline = raw[source_offset:source_offset+row_size]
+                source_offset += row_size
+                recon = self.undo_filter(filter_type, scanline, recon)
+                # Convert so that there is one element per pixel value
+                flat = self.serialtoflat(recon, ppr)
+                if xstep == 1:
+                    assert xstart == 0
+                    offset = y * vpr
+                    a[offset:offset+vpr] = flat
+                else:
+                    offset = y * vpr + xstart * self.planes
+                    end_offset = (y+1) * vpr
+                    skip = self.planes * xstep
+                    for i in range(self.planes):
+                        a[offset+i:end_offset:skip] = \
+                            flat[i::self.planes]
+        return a
+
+    def iterboxed(self, rows):
+        """Iterator that yields each scanline in boxed row flat pixel
+        format.  `rows` should be an iterator that yields the bytes of
+        each row in turn.
+        """
+
+        def asvalues(raw):
+            """Convert a row of raw bytes into a flat row.  Result will
+            be a freshly allocated object, not shared with
+            argument.
+            """
+
+            if self.bitdepth == 8:
+                return array('B', raw)
+            if self.bitdepth == 16:
+                raw = tostring(raw)
+                return array('H', struct.unpack('!%dH' % (len(raw)//2), raw))
+            assert self.bitdepth < 8
+            width = self.width
+            # Samples per byte
+            spb = 8//self.bitdepth
+            out = array('B')
+            mask = 2**self.bitdepth - 1
+            shifts = [self.bitdepth * i
+                for i in reversed(list(range(spb)))]
+            for o in raw:
+                out.extend([mask&(o>>i) for i in shifts])
+            return out[:width]
+
+        return map(asvalues, rows)
+
+    def serialtoflat(self, bytes, width=None):
+        """Convert serial format (byte stream) pixel data to flat row
+        flat pixel.
+        """
+
+        if self.bitdepth == 8:
+            return bytes
+        if self.bitdepth == 16:
+            bytes = tostring(bytes)
+            return array('H',
+              struct.unpack('!%dH' % (len(bytes)//2), bytes))
+        assert self.bitdepth < 8
+        if width is None:
+            width = self.width
+        # Samples per byte
+        spb = 8//self.bitdepth
+        out = array('B')
+        mask = 2**self.bitdepth - 1
+        shifts = list(map(self.bitdepth.__mul__, reversed(list(range(spb)))))
+        l = width
+        for o in bytes:
+            out.extend([(mask&(o>>s)) for s in shifts][:l])
+            l -= spb
+            if l <= 0:
+                l = width
+        return out
+
+    def iterstraight(self, raw):
+        """Iterator that undoes the effect of filtering, and yields
+        each row in serialised format (as a sequence of bytes).
+        Assumes input is straightlaced.  `raw` should be an iterable
+        that yields the raw bytes in chunks of arbitrary size.
+        """
+
+        # length of row, in bytes
+        rb = self.row_bytes
+        a = array('B')
+        # The previous (reconstructed) scanline.  None indicates first
+        # line of image.
+        recon = None
+        for some in raw:
+            a.extend(some)
+            while len(a) >= rb + 1:
+                filter_type = a[0]
+                scanline = a[1:rb+1]
+                del a[:rb+1]
+                recon = self.undo_filter(filter_type, scanline, recon)
+                yield recon
+        if len(a) != 0:
+            # :file:format We get here with a file format error:
+            # when the available bytes (after decompressing) do not
+            # pack into exact rows.
+            raise FormatError(
+              'Wrong size for decompressed IDAT chunk.')
+        assert len(a) == 0
+
+    def validate_signature(self):
+        """If signature (header) has not been read then read and
+        validate it; otherwise do nothing.
+        """
+
+        if self.signature:
+            return
+        self.signature = self.file.read(8)
+        if self.signature != _signature:
+            raise FormatError("PNG file has invalid signature.")
+
+    def preamble(self, lenient=False):
+        """
+        Extract the image metadata by reading the initial part of
+        the PNG file up to the start of the ``IDAT`` chunk.  All the
+        chunks that precede the ``IDAT`` chunk are read and either
+        processed for metadata or discarded.
+
+        If the optional `lenient` argument evaluates to `True`, checksum
+        failures will raise warnings rather than exceptions.
+        """
+
+        self.validate_signature()
+
+        while True:
+            if not self.atchunk:
+                self.atchunk = self.chunklentype()
+                if self.atchunk is None:
+                    raise FormatError(
+                      'This PNG file has no IDAT chunks.')
+            if self.atchunk[1] == b'IDAT':
+                return
+            self.process_chunk(lenient=lenient)
+
+    def chunklentype(self):
+        """Reads just enough of the input to determine the next
+        chunk's length and type, returned as a (*length*, *type*) pair
+        where *type* is a string.  If there are no more chunks, ``None``
+        is returned.
+        """
+
+        x = self.file.read(8)
+        if not x:
+            return None
+        if len(x) != 8:
+            raise FormatError(
+              'End of file whilst reading chunk length and type.')
+        length,type = struct.unpack('!I4s', x)
+        if length > 2**31-1:
+            raise FormatError('Chunk %s is too large: %d.' % (type,length))
+        return length,type
+
+    def process_chunk(self, lenient=False):
+        """Process the next chunk and its data.  This only processes the
+        following chunk types, all others are ignored: ``IHDR``,
+        ``PLTE``, ``bKGD``, ``tRNS``, ``gAMA``, ``sBIT``, ``pHYs``.
+
+        If the optional `lenient` argument evaluates to `True`,
+        checksum failures will raise warnings rather than exceptions.
+        """
+
+        type, data = self.chunk(lenient=lenient)
+        method = '_process_' + as_str(type)
+        m = getattr(self, method, None)
+        if m:
+            m(data)
+
+    def _process_IHDR(self, data):
+        # http://www.w3.org/TR/PNG/#11IHDR
+        if len(data) != 13:
+            raise FormatError('IHDR chunk has incorrect length.')
+        (self.width, self.height, self.bitdepth, self.color_type,
+         self.compression, self.filter,
+         self.interlace) = struct.unpack("!2I5B", data)
+
+        check_bitdepth_colortype(self.bitdepth, self.color_type)
+
+        if self.compression != 0:
+            raise Error("unknown compression method %d" % self.compression)
+        if self.filter != 0:
+            raise FormatError("Unknown filter method %d,"
+              " see http://www.w3.org/TR/2003/REC-PNG-20031110/#9Filters ."
+              % self.filter)
+        if self.interlace not in (0,1):
+            raise FormatError("Unknown interlace method %d,"
+              " see http://www.w3.org/TR/2003/REC-PNG-20031110/#8InterlaceMethods ."
+              % self.interlace)
+
+        # Derived values
+        # http://www.w3.org/TR/PNG/#6Colour-values
+        colormap =  bool(self.color_type & 1)
+        greyscale = not (self.color_type & 2)
+        alpha = bool(self.color_type & 4)
+        color_planes = (3,1)[greyscale or colormap]
+        planes = color_planes + alpha
+
+        self.colormap = colormap
+        self.greyscale = greyscale
+        self.alpha = alpha
+        self.color_planes = color_planes
+        self.planes = planes
+        self.psize = float(self.bitdepth)/float(8) * planes
+        if int(self.psize) == self.psize:
+            self.psize = int(self.psize)
+        self.row_bytes = int(math.ceil(self.width * self.psize))
+        # Stores PLTE chunk if present, and is used to check
+        # chunk ordering constraints.
+        self.plte = None
+        # Stores tRNS chunk if present, and is used to check chunk
+        # ordering constraints.
+        self.trns = None
+        # Stores sbit chunk if present.
+        self.sbit = None
+
+    def _process_PLTE(self, data):
+        # http://www.w3.org/TR/PNG/#11PLTE
+        if self.plte:
+            warnings.warn("Multiple PLTE chunks present.")
+        self.plte = data
+        if len(data) % 3 != 0:
+            raise FormatError(
+              "PLTE chunk's length should be a multiple of 3.")
+        if len(data) > (2**self.bitdepth)*3:
+            raise FormatError("PLTE chunk is too long.")
+        if len(data) == 0:
+            raise FormatError("Empty PLTE is not allowed.")
+
+    def _process_bKGD(self, data):
+        try:
+            if self.colormap:
+                if not self.plte:
+                    warnings.warn(
+                      "PLTE chunk is required before bKGD chunk.")
+                self.background = struct.unpack('B', data)
+            else:
+                self.background = struct.unpack("!%dH" % self.color_planes,
+                  data)
+        except struct.error:
+            raise FormatError("bKGD chunk has incorrect length.")
+
+    def _process_tRNS(self, data):
+        # http://www.w3.org/TR/PNG/#11tRNS
+        self.trns = data
+        if self.colormap:
+            if not self.plte:
+                warnings.warn("PLTE chunk is required before tRNS chunk.")
+            else:
+                if len(data) > len(self.plte)/3:
+                    # Was warning, but promoted to Error as it
+                    # would otherwise cause pain later on.
+                    raise FormatError("tRNS chunk is too long.")
+        else:
+            if self.alpha:
+                raise FormatError(
+                  "tRNS chunk is not valid with colour type %d." %
+                  self.color_type)
+            try:
+                self.transparent = \
+                    struct.unpack("!%dH" % self.color_planes, data)
+            except struct.error:
+                raise FormatError("tRNS chunk has incorrect length.")
+
+    def _process_gAMA(self, data):
+        try:
+            self.gamma = struct.unpack("!L", data)[0] / 100000.0
+        except struct.error:
+            raise FormatError("gAMA chunk has incorrect length.")
+
+    def _process_sBIT(self, data):
+        self.sbit = data
+        if (self.colormap and len(data) != 3 or
+            not self.colormap and len(data) != self.planes):
+            raise FormatError("sBIT chunk has incorrect length.")
+
+    def _process_pHYs(self, data):
+        # http://www.w3.org/TR/PNG/#11pHYs
+        self.phys = data
+        fmt = "!LLB"
+        if len(data) != struct.calcsize(fmt):
+            raise FormatError("pHYs chunk has incorrect length.")
+        self.x_pixels_per_unit, self.y_pixels_per_unit, unit = struct.unpack(fmt,data)
+        self.unit_is_meter = bool(unit)
+
+    def read(self, lenient=False):
+        """
+        Read the PNG file and decode it.  Returns (`width`, `height`,
+        `pixels`, `metadata`).
+
+        May use excessive memory.
+
+        `pixels` are returned in boxed row flat pixel format.
+
+        If the optional `lenient` argument evaluates to True,
+        checksum failures will raise warnings rather than exceptions.
+        """
+
+        def iteridat():
+            """Iterator that yields all the ``IDAT`` chunks as strings."""
+            while True:
+                try:
+                    type, data = self.chunk(lenient=lenient)
+                except ValueError as e:
+                    raise ChunkError(e.args[0])
+                if type == b'IEND':
+                    # http://www.w3.org/TR/PNG/#11IEND
+                    break
+                if type != b'IDAT':
+                    continue
+                # type == b'IDAT'
+                # http://www.w3.org/TR/PNG/#11IDAT
+                if self.colormap and not self.plte:
+                    warnings.warn("PLTE chunk is required before IDAT chunk")
+                yield data
+
+        def iterdecomp(idat):
+            """Iterator that yields decompressed strings.  `idat` should
+            be an iterator that yields the ``IDAT`` chunk data.
+            """
+
+            # Currently, with no max_length parameter to decompress,
+            # this routine will do one yield per IDAT chunk: Not very
+            # incremental.
+            d = zlib.decompressobj()
+            # Each IDAT chunk is passed to the decompressor, then any
+            # remaining state is decompressed out.
+            for data in idat:
+                # :todo: add a max_length argument here to limit output
+                # size.
+                yield array('B', d.decompress(data))
+            yield array('B', d.flush())
+
+        self.preamble(lenient=lenient)
+        raw = iterdecomp(iteridat())
+
+        if self.interlace:
+            raw = array('B', itertools.chain(*raw))
+            arraycode = 'BH'[self.bitdepth>8]
+            # Like :meth:`group` but producing an array.array object for
+            # each row.
+            pixels = map(lambda *row: array(arraycode, row),
+                       *[iter(self.deinterlace(raw))]*self.width*self.planes)
+        else:
+            pixels = self.iterboxed(self.iterstraight(raw))
+        meta = dict()
+        for attr in 'greyscale alpha planes bitdepth interlace'.split():
+            meta[attr] = getattr(self, attr)
+        meta['size'] = (self.width, self.height)
+        for attr in 'gamma transparent background'.split():
+            a = getattr(self, attr, None)
+            if a is not None:
+                meta[attr] = a
+        if self.plte:
+            meta['palette'] = self.palette()
+        return self.width, self.height, pixels, meta
+
+
+    def read_flat(self):
+        """
+        Read a PNG file and decode it into flat row flat pixel format.
+        Returns (*width*, *height*, *pixels*, *metadata*).
+
+        May use excessive memory.
+
+        `pixels` are returned in flat row flat pixel format.
+
+        See also the :meth:`read` method which returns pixels in the
+        more stream-friendly boxed row flat pixel format.
+        """
+
+        x, y, pixel, meta = self.read()
+        arraycode = 'BH'[meta['bitdepth']>8]
+        pixel = array(arraycode, itertools.chain(*pixel))
+        return x, y, pixel, meta
+
+    def palette(self, alpha='natural'):
+        """Returns a palette that is a sequence of 3-tuples or 4-tuples,
+        synthesizing it from the ``PLTE`` and ``tRNS`` chunks.  These
+        chunks should have already been processed (for example, by
+        calling the :meth:`preamble` method).  All the tuples are the
+        same size: 3-tuples if there is no ``tRNS`` chunk, 4-tuples when
+        there is a ``tRNS`` chunk.  Assumes that the image is colour type
+        3 and therefore a ``PLTE`` chunk is required.
+
+        If the `alpha` argument is ``'force'`` then an alpha channel is
+        always added, forcing the result to be a sequence of 4-tuples.
+        """
+
+        if not self.plte:
+            raise FormatError(
+                "Required PLTE chunk is missing in colour type 3 image.")
+        plte = group(array('B', self.plte), 3)
+        if self.trns or alpha == 'force':
+            trns = array('B', self.trns or '')
+            trns.extend([255]*(len(plte)-len(trns)))
+            plte = list(map(operator.add, plte, group(trns, 1)))
+        return plte
+
+    def asDirect(self):
+        """Returns the image data as a direct representation of an
+        ``x * y * planes`` array.  This method is intended to remove the
+        need for callers to deal with palettes and transparency
+        themselves.  Images with a palette (colour type 3)
+        are converted to RGB or RGBA; images with transparency (a
+        ``tRNS`` chunk) are converted to LA or RGBA as appropriate.
+        When returned in this format the pixel values represent the
+        colour value directly without needing to refer to palettes or
+        transparency information.
+
+        Like the :meth:`read` method this method returns a 4-tuple:
+
+        (*width*, *height*, *pixels*, *meta*)
+
+        This method normally returns pixel values with the bit depth
+        they have in the source image, but when the source PNG has an
+        ``sBIT`` chunk it is inspected and can reduce the bit depth of
+        the result pixels; pixel values will be reduced according to
+        the bit depth specified in the ``sBIT`` chunk (PNG nerds should
+        note a single result bit depth is used for all channels; the
+        maximum of the ones specified in the ``sBIT`` chunk.  An RGB565
+        image will be rescaled to 6-bit RGB666).
+
+        The *meta* dictionary that is returned reflects the `direct`
+        format and not the original source image.  For example, an RGB
+        source image with a ``tRNS`` chunk to represent a transparent
+        colour, will have ``planes=3`` and ``alpha=False`` for the
+        source image, but the *meta* dictionary returned by this method
+        will have ``planes=4`` and ``alpha=True`` because an alpha
+        channel is synthesized and added.
+
+        *pixels* is the pixel data in boxed row flat pixel format (just
+        like the :meth:`read` method).
+
+        All the other aspects of the image data are not changed.
+        """
+
+        self.preamble()
+
+        # Simple case, no conversion necessary.
+        if not self.colormap and not self.trns and not self.sbit:
+            return self.read()
+
+        x,y,pixels,meta = self.read()
+
+        if self.colormap:
+            meta['colormap'] = False
+            meta['alpha'] = bool(self.trns)
+            meta['bitdepth'] = 8
+            meta['planes'] = 3 + bool(self.trns)
+            plte = self.palette()
+            def iterpal(pixels):
+                for row in pixels:
+                    row = [plte[x] for x in row]
+                    yield array('B', itertools.chain(*row))
+            pixels = iterpal(pixels)
+        elif self.trns:
+            # It would be nice if there was some reasonable way
+            # of doing this without generating a whole load of
+            # intermediate tuples.  But tuples does seem like the
+            # easiest way, with no other way clearly much simpler or
+            # much faster.  (Actually, the L to LA conversion could
+            # perhaps go faster (all those 1-tuples!), but I still
+            # wonder whether the code proliferation is worth it)
+            it = self.transparent
+            maxval = 2**meta['bitdepth']-1
+            planes = meta['planes']
+            meta['alpha'] = True
+            meta['planes'] += 1
+            typecode = 'BH'[meta['bitdepth']>8]
+            def itertrns(pixels):
+                for row in pixels:
+                    # For each row we group it into pixels, then form a
+                    # characterisation vector that says whether each
+                    # pixel is opaque or not.  Then we convert
+                    # True/False to 0/maxval (by multiplication),
+                    # and add it as the extra channel.
+                    row = group(row, planes)
+                    opa = map(it.__ne__, row)
+                    opa = map(maxval.__mul__, opa)
+                    opa = list(zip(opa)) # convert to 1-tuples
+                    yield array(typecode,
+                      itertools.chain(*map(operator.add, row, opa)))
+            pixels = itertrns(pixels)
+        targetbitdepth = None
+        if self.sbit:
+            sbit = struct.unpack('%dB' % len(self.sbit), self.sbit)
+            targetbitdepth = max(sbit)
+            if targetbitdepth > meta['bitdepth']:
+                raise Error('sBIT chunk %r exceeds bitdepth %d' %
+                    (sbit,self.bitdepth))
+            if min(sbit) <= 0:
+                raise Error('sBIT chunk %r has a 0-entry' % sbit)
+            if targetbitdepth == meta['bitdepth']:
+                targetbitdepth = None
+        if targetbitdepth:
+            shift = meta['bitdepth'] - targetbitdepth
+            meta['bitdepth'] = targetbitdepth
+            def itershift(pixels):
+                for row in pixels:
+                    yield [p >> shift for p in row]
+            pixels = itershift(pixels)
+        return x,y,pixels,meta
+
+    def asFloat(self, maxval=1.0):
+        """Return image pixels as per :meth:`asDirect` method, but scale
+        all pixel values to be floating point values between 0.0 and
+        *maxval*.
+        """
+
+        x,y,pixels,info = self.asDirect()
+        sourcemaxval = 2**info['bitdepth']-1
+        del info['bitdepth']
+        info['maxval'] = float(maxval)
+        factor = float(maxval)/float(sourcemaxval)
+        def iterfloat():
+            for row in pixels:
+                yield [factor * p for p in row]
+        return x,y,iterfloat(),info
+
+    def _as_rescale(self, get, targetbitdepth):
+        """Helper used by :meth:`asRGB8` and :meth:`asRGBA8`."""
+
+        width,height,pixels,meta = get()
+        maxval = 2**meta['bitdepth'] - 1
+        targetmaxval = 2**targetbitdepth - 1
+        factor = float(targetmaxval) / float(maxval)
+        meta['bitdepth'] = targetbitdepth
+        def iterscale():
+            for row in pixels:
+                yield [int(round(x*factor)) for x in row]
+        if maxval == targetmaxval:
+            return width, height, pixels, meta
+        else:
+            return width, height, iterscale(), meta
+
+    def asRGB8(self):
+        """Return the image data as an RGB pixels with 8-bits per
+        sample.  This is like the :meth:`asRGB` method except that
+        this method additionally rescales the values so that they
+        are all between 0 and 255 (8-bit).  In the case where the
+        source image has a bit depth < 8 the transformation preserves
+        all the information; where the source image has bit depth
+        > 8, then rescaling to 8-bit values loses precision.  No
+        dithering is performed.  Like :meth:`asRGB`, an alpha channel
+        in the source image will raise an exception.
+
+        This function returns a 4-tuple:
+        (*width*, *height*, *pixels*, *metadata*).
+        *width*, *height*, *metadata* are as per the
+        :meth:`read` method.
+        
+        *pixels* is the pixel data in boxed row flat pixel format.
+        """
+
+        return self._as_rescale(self.asRGB, 8)
+
+    def asRGBA8(self):
+        """Return the image data as RGBA pixels with 8-bits per
+        sample.  This method is similar to :meth:`asRGB8` and
+        :meth:`asRGBA`:  The result pixels have an alpha channel, *and*
+        values are rescaled to the range 0 to 255.  The alpha channel is
+        synthesized if necessary (with a small speed penalty).
+        """
+
+        return self._as_rescale(self.asRGBA, 8)
+
+    def asRGB(self):
+        """Return image as RGB pixels.  RGB colour images are passed
+        through unchanged; greyscales are expanded into RGB
+        triplets (there is a small speed overhead for doing this).
+
+        An alpha channel in the source image will raise an
+        exception.
+
+        The return values are as for the :meth:`read` method
+        except that the *metadata* reflect the returned pixels, not the
+        source image.  In particular, for this method
+        ``metadata['greyscale']`` will be ``False``.
+        """
+
+        width,height,pixels,meta = self.asDirect()
+        if meta['alpha']:
+            raise Error("will not convert image with alpha channel to RGB")
+        if not meta['greyscale']:
+            return width,height,pixels,meta
+        meta['greyscale'] = False
+        typecode = 'BH'[meta['bitdepth'] > 8]
+        def iterrgb():
+            for row in pixels:
+                a = array(typecode, [0]) * 3 * width
+                for i in range(3):
+                    a[i::3] = row
+                yield a
+        return width,height,iterrgb(),meta
+
+    def asRGBA(self):
+        """Return image as RGBA pixels.  Greyscales are expanded into
+        RGB triplets; an alpha channel is synthesized if necessary.
+        The return values are as for the :meth:`read` method
+        except that the *metadata* reflect the returned pixels, not the
+        source image.  In particular, for this method
+        ``metadata['greyscale']`` will be ``False``, and
+        ``metadata['alpha']`` will be ``True``.
+        """
+
+        width,height,pixels,meta = self.asDirect()
+        if meta['alpha'] and not meta['greyscale']:
+            return width,height,pixels,meta
+        typecode = 'BH'[meta['bitdepth'] > 8]
+        maxval = 2**meta['bitdepth'] - 1
+        maxbuffer = struct.pack('=' + typecode, maxval) * 4 * width
+        def newarray():
+            return array(typecode, maxbuffer)
+
+        if meta['alpha'] and meta['greyscale']:
+            # LA to RGBA
+            def convert():
+                for row in pixels:
+                    # Create a fresh target row, then copy L channel
+                    # into first three target channels, and A channel
+                    # into fourth channel.
+                    a = newarray()
+                    pngfilters.convert_la_to_rgba(row, a)
+                    yield a
+        elif meta['greyscale']:
+            # L to RGBA
+            def convert():
+                for row in pixels:
+                    a = newarray()
+                    pngfilters.convert_l_to_rgba(row, a)
+                    yield a
+        else:
+            assert not meta['alpha'] and not meta['greyscale']
+            # RGB to RGBA
+            def convert():
+                for row in pixels:
+                    a = newarray()
+                    pngfilters.convert_rgb_to_rgba(row, a)
+                    yield a
+        meta['alpha'] = True
+        meta['greyscale'] = False
+        return width,height,convert(),meta
+
+def check_bitdepth_colortype(bitdepth, colortype):
+    """Check that `bitdepth` and `colortype` are both valid,
+    and specified in a valid combination. Returns if valid,
+    raise an Exception if not valid.
+    """
+
+    if bitdepth not in (1,2,4,8,16):
+        raise FormatError("invalid bit depth %d" % bitdepth)
+    if colortype not in (0,2,3,4,6):
+        raise FormatError("invalid colour type %d" % colortype)
+    # Check indexed (palettized) images have 8 or fewer bits
+    # per pixel; check only indexed or greyscale images have
+    # fewer than 8 bits per pixel.
+    if colortype & 1 and bitdepth > 8:
+        raise FormatError(
+          "Indexed images (colour type %d) cannot"
+          " have bitdepth > 8 (bit depth %d)."
+          " See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
+          % (bitdepth, colortype))
+    if bitdepth < 8 and colortype not in (0,3):
+        raise FormatError("Illegal combination of bit depth (%d)"
+          " and colour type (%d)."
+          " See http://www.w3.org/TR/2003/REC-PNG-20031110/#table111 ."
+          % (bitdepth, colortype))
+
+def isinteger(x):
+    try:
+        return int(x) == x
+    except (TypeError, ValueError):
+        return False
+
+
+# === Support for users without Cython ===
+
+try:
+    pngfilters
+except NameError:
+    class pngfilters(object):
+        def undo_filter_sub(filter_unit, scanline, previous, result):
+            """Undo sub filter."""
+
+            ai = 0
+            # Loops starts at index fu.  Observe that the initial part
+            # of the result is already filled in correctly with
+            # scanline.
+            for i in range(filter_unit, len(result)):
+                x = scanline[i]
+                a = result[ai]
+                result[i] = (x + a) & 0xff
+                ai += 1
+        undo_filter_sub = staticmethod(undo_filter_sub)
+
+        def undo_filter_up(filter_unit, scanline, previous, result):
+            """Undo up filter."""
+
+            for i in range(len(result)):
+                x = scanline[i]
+                b = previous[i]
+                result[i] = (x + b) & 0xff
+        undo_filter_up = staticmethod(undo_filter_up)
+
+        def undo_filter_average(filter_unit, scanline, previous, result):
+            """Undo up filter."""
+
+            ai = -filter_unit
+            for i in range(len(result)):
+                x = scanline[i]
+                if ai < 0:
+                    a = 0
+                else:
+                    a = result[ai]
+                b = previous[i]
+                result[i] = (x + ((a + b) >> 1)) & 0xff
+                ai += 1
+        undo_filter_average = staticmethod(undo_filter_average)
+
+        def undo_filter_paeth(filter_unit, scanline, previous, result):
+            """Undo Paeth filter."""
+
+            # Also used for ci.
+            ai = -filter_unit
+            for i in range(len(result)):
+                x = scanline[i]
+                if ai < 0:
+                    a = c = 0
+                else:
+                    a = result[ai]
+                    c = previous[ai]
+                b = previous[i]
+                p = a + b - c
+                pa = abs(p - a)
+                pb = abs(p - b)
+                pc = abs(p - c)
+                if pa <= pb and pa <= pc:
+                    pr = a
+                elif pb <= pc:
+                    pr = b
+                else:
+                    pr = c
+                result[i] = (x + pr) & 0xff
+                ai += 1
+        undo_filter_paeth = staticmethod(undo_filter_paeth)
+
+        def convert_la_to_rgba(row, result):
+            for i in range(3):
+                result[i::4] = row[0::2]
+            result[3::4] = row[1::2]
+        convert_la_to_rgba = staticmethod(convert_la_to_rgba)
+
+        def convert_l_to_rgba(row, result):
+            """Convert a grayscale image to RGBA. This method assumes
+            the alpha channel in result is already correctly
+            initialized.
+            """
+            for i in range(3):
+                result[i::4] = row
+        convert_l_to_rgba = staticmethod(convert_l_to_rgba)
+
+        def convert_rgb_to_rgba(row, result):
+            """Convert an RGB image to RGBA. This method assumes the
+            alpha channel in result is already correctly initialized.
+            """
+            for i in range(3):
+                result[i::4] = row[i::3]
+        convert_rgb_to_rgba = staticmethod(convert_rgb_to_rgba)
+
+
+# === Command Line Support ===
+
+def read_pam_header(infile):
+    """
+    Read (the rest of a) PAM header.  `infile` should be positioned
+    immediately after the initial 'P7' line (at the beginning of the
+    second line).  Returns are as for `read_pnm_header`.
+    """
+    
+    # Unlike PBM, PGM, and PPM, we can read the header a line at a time.
+    header = dict()
+    while True:
+        l = infile.readline().strip()
+        if l == b'ENDHDR':
+            break
+        if not l:
+            raise EOFError('PAM ended prematurely')
+        if l[0] == b'#':
+            continue
+        l = l.split(None, 1)
+        if l[0] not in header:
+            header[l[0]] = l[1]
+        else:
+            header[l[0]] += b' ' + l[1]
+
+    required = [b'WIDTH', b'HEIGHT', b'DEPTH', b'MAXVAL']
+    WIDTH,HEIGHT,DEPTH,MAXVAL = required
+    present = [x for x in required if x in header]
+    if len(present) != len(required):
+        raise Error('PAM file must specify WIDTH, HEIGHT, DEPTH, and MAXVAL')
+    width = int(header[WIDTH])
+    height = int(header[HEIGHT])
+    depth = int(header[DEPTH])
+    maxval = int(header[MAXVAL])
+    if (width <= 0 or
+        height <= 0 or
+        depth <= 0 or
+        maxval <= 0):
+        raise Error(
+          'WIDTH, HEIGHT, DEPTH, MAXVAL must all be positive integers')
+    return 'P7', width, height, depth, maxval
+
+def read_pnm_header(infile, supported=(b'P5', b'P6')):
+    """
+    Read a PNM header, returning (format,width,height,depth,maxval).
+    `width` and `height` are in pixels.  `depth` is the number of
+    channels in the image; for PBM and PGM it is synthesized as 1, for
+    PPM as 3; for PAM images it is read from the header.  `maxval` is
+    synthesized (as 1) for PBM images.
+    """
+
+    # Generally, see http://netpbm.sourceforge.net/doc/ppm.html
+    # and http://netpbm.sourceforge.net/doc/pam.html
+
+    # Technically 'P7' must be followed by a newline, so by using
+    # rstrip() we are being liberal in what we accept.  I think this
+    # is acceptable.
+    type = infile.read(3).rstrip()
+    if type not in supported:
+        raise NotImplementedError('file format %s not supported' % type)
+    if type == b'P7':
+        # PAM header parsing is completely different.
+        return read_pam_header(infile)
+    # Expected number of tokens in header (3 for P4, 4 for P6)
+    expected = 4
+    pbm = (b'P1', b'P4')
+    if type in pbm:
+        expected = 3
+    header = [type]
+
+    # We have to read the rest of the header byte by byte because the
+    # final whitespace character (immediately following the MAXVAL in
+    # the case of P6) may not be a newline.  Of course all PNM files in
+    # the wild use a newline at this point, so it's tempting to use
+    # readline; but it would be wrong.
+    def getc():
+        c = infile.read(1)
+        if not c:
+            raise Error('premature EOF reading PNM header')
+        return c
+
+    c = getc()
+    while True:
+        # Skip whitespace that precedes a token.
+        while c.isspace():
+            c = getc()
+        # Skip comments.
+        while c == '#':
+            while c not in b'\n\r':
+                c = getc()
+        if not c.isdigit():
+            raise Error('unexpected character %s found in header' % c)
+        # According to the specification it is legal to have comments
+        # that appear in the middle of a token.
+        # This is bonkers; I've never seen it; and it's a bit awkward to
+        # code good lexers in Python (no goto).  So we break on such
+        # cases.
+        token = b''
+        while c.isdigit():
+            token += c
+            c = getc()
+        # Slight hack.  All "tokens" are decimal integers, so convert
+        # them here.
+        header.append(int(token))
+        if len(header) == expected:
+            break
+    # Skip comments (again)
+    while c == '#':
+        while c not in '\n\r':
+            c = getc()
+    if not c.isspace():
+        raise Error('expected header to end with whitespace, not %s' % c)
+
+    if type in pbm:
+        # synthesize a MAXVAL
+        header.append(1)
+    depth = (1,3)[type == b'P6']
+    return header[0], header[1], header[2], depth, header[3]
+
+def write_pnm(file, width, height, pixels, meta):
+    """Write a Netpbm PNM/PAM file.
+    """
+
+    bitdepth = meta['bitdepth']
+    maxval = 2**bitdepth - 1
+    # Rudely, the number of image planes can be used to determine
+    # whether we are L (PGM), LA (PAM), RGB (PPM), or RGBA (PAM).
+    planes = meta['planes']
+    # Can be an assert as long as we assume that pixels and meta came
+    # from a PNG file.
+    assert planes in (1,2,3,4)
+    if planes in (1,3):
+        if 1 == planes:
+            # PGM
+            # Could generate PBM if maxval is 1, but we don't (for one
+            # thing, we'd have to convert the data, not just blat it
+            # out).
+            fmt = 'P5'
+        else:
+            # PPM
+            fmt = 'P6'
+        header = '%s %d %d %d\n' % (fmt, width, height, maxval)
+    if planes in (2,4):
+        # PAM
+        # See http://netpbm.sourceforge.net/doc/pam.html
+        if 2 == planes:
+            tupltype = 'GRAYSCALE_ALPHA'
+        else:
+            tupltype = 'RGB_ALPHA'
+        header = ('P7\nWIDTH %d\nHEIGHT %d\nDEPTH %d\nMAXVAL %d\n'
+                  'TUPLTYPE %s\nENDHDR\n' %
+                  (width, height, planes, maxval, tupltype))
+    file.write(header.encode('ascii'))
+    # Values per row
+    vpr = planes * width
+    # struct format
+    fmt = '>%d' % vpr
+    if maxval > 0xff:
+        fmt = fmt + 'H'
+    else:
+        fmt = fmt + 'B'
+    for row in pixels:
+        file.write(struct.pack(fmt, *row))
+    file.flush()
+
+def color_triple(color):
+    """
+    Convert a command line colour value to a RGB triple of integers.
+    FIXME: Somewhere we need support for greyscale backgrounds etc.
+    """
+    if color.startswith('#') and len(color) == 4:
+        return (int(color[1], 16),
+                int(color[2], 16),
+                int(color[3], 16))
+    if color.startswith('#') and len(color) == 7:
+        return (int(color[1:3], 16),
+                int(color[3:5], 16),
+                int(color[5:7], 16))
+    elif color.startswith('#') and len(color) == 13:
+        return (int(color[1:5], 16),
+                int(color[5:9], 16),
+                int(color[9:13], 16))
+
+def _add_common_options(parser):
+    """Call *parser.add_option* for each of the options that are
+    common between this PNG--PNM conversion tool and the gen
+    tool.
+    """
+    parser.add_option("-i", "--interlace",
+                      default=False, action="store_true",
+                      help="create an interlaced PNG file (Adam7)")
+    parser.add_option("-t", "--transparent",
+                      action="store", type="string", metavar="#RRGGBB",
+                      help="mark the specified colour as transparent")
+    parser.add_option("-b", "--background",
+                      action="store", type="string", metavar="#RRGGBB",
+                      help="save the specified background colour")
+    parser.add_option("-g", "--gamma",
+                      action="store", type="float", metavar="value",
+                      help="save the specified gamma value")
+    parser.add_option("-c", "--compression",
+                      action="store", type="int", metavar="level",
+                      help="zlib compression level (0-9)")
+    return parser
+
+def _main(argv):
+    """
+    Run the PNG encoder with options from the command line.
+    """
+
+    # Parse command line arguments
+    from optparse import OptionParser
+    version = '%prog ' + __version__
+    parser = OptionParser(version=version)
+    parser.set_usage("%prog [options] [imagefile]")
+    parser.add_option('-r', '--read-png', default=False,
+                      action='store_true',
+                      help='Read PNG, write PNM')
+    parser.add_option("-a", "--alpha",
+                      action="store", type="string", metavar="pgmfile",
+                      help="alpha channel transparency (RGBA)")
+    _add_common_options(parser)
+
+    (options, args) = parser.parse_args(args=argv[1:])
+
+    # Convert options
+    if options.transparent is not None:
+        options.transparent = color_triple(options.transparent)
+    if options.background is not None:
+        options.background = color_triple(options.background)
+
+    # Prepare input and output files
+    if len(args) == 0:
+        infilename = '-'
+        infile = sys.stdin
+    elif len(args) == 1:
+        infilename = args[0]
+        infile = open(infilename, 'rb')
+    else:
+        parser.error("more than one input file")
+    outfile = sys.stdout
+    if sys.platform == "win32":
+        import msvcrt, os
+        msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
+
+    if options.read_png:
+        # Encode PNG to PPM
+        png = Reader(file=infile)
+        width,height,pixels,meta = png.asDirect()
+        write_pnm(outfile, width, height, pixels, meta) 
+    else:
+        # Encode PNM to PNG
+        format, width, height, depth, maxval = \
+          read_pnm_header(infile, (b'P5',b'P6',b'P7'))
+        # When it comes to the variety of input formats, we do something
+        # rather rude.  Observe that L, LA, RGB, RGBA are the 4 colour
+        # types supported by PNG and that they correspond to 1, 2, 3, 4
+        # channels respectively.  So we use the number of channels in
+        # the source image to determine which one we have.  We do not
+        # care about TUPLTYPE.
+        greyscale = depth <= 2
+        pamalpha = depth in (2,4)
+        supported = [2**x-1 for x in range(1,17)]
+        try:
+            mi = supported.index(maxval)
+        except ValueError:
+            raise NotImplementedError(
+              'your maxval (%s) not in supported list %s' %
+              (maxval, str(supported)))
+        bitdepth = mi+1
+        writer = Writer(width, height,
+                        greyscale=greyscale,
+                        bitdepth=bitdepth,
+                        interlace=options.interlace,
+                        transparent=options.transparent,
+                        background=options.background,
+                        alpha=bool(pamalpha or options.alpha),
+                        gamma=options.gamma,
+                        compression=options.compression)
+        if options.alpha:
+            pgmfile = open(options.alpha, 'rb')
+            format, awidth, aheight, adepth, amaxval = \
+              read_pnm_header(pgmfile, 'P5')
+            if amaxval != '255':
+                raise NotImplementedError(
+                  'maxval %s not supported for alpha channel' % amaxval)
+            if (awidth, aheight) != (width, height):
+                raise ValueError("alpha channel image size mismatch"
+                                 " (%s has %sx%s but %s has %sx%s)"
+                                 % (infilename, width, height,
+                                    options.alpha, awidth, aheight))
+            writer.convert_ppm_and_pgm(infile, pgmfile, outfile)
+        else:
+            writer.convert_pnm(infile, outfile)
+
+
+if __name__ == '__main__':
+    try:
+        _main(sys.argv)
+    except Error as e:
+        print(e, file=sys.stderr)