asd
This commit is contained in:
@ -0,0 +1,26 @@
|
||||
"""fontTools.ttLib -- a package for dealing with TrueType fonts."""
|
||||
|
||||
from fontTools.misc.loggingTools import deprecateFunction
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TTLibError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class TTLibFileIsCollectionError(TTLibError):
|
||||
pass
|
||||
|
||||
|
||||
@deprecateFunction("use logging instead", category=DeprecationWarning)
|
||||
def debugmsg(msg):
|
||||
import time
|
||||
|
||||
print(msg + time.strftime(" (%H:%M:%S)", time.localtime(time.time())))
|
||||
|
||||
|
||||
from fontTools.ttLib.ttFont import *
|
||||
from fontTools.ttLib.ttCollection import TTCollection
|
||||
109
venv/lib/python3.12/site-packages/fontTools/ttLib/__main__.py
Normal file
109
venv/lib/python3.12/site-packages/fontTools/ttLib/__main__.py
Normal file
@ -0,0 +1,109 @@
|
||||
import sys
|
||||
from fontTools.ttLib import TTLibError, TTLibFileIsCollectionError
|
||||
from fontTools.ttLib.ttFont import *
|
||||
from fontTools.ttLib.ttCollection import TTCollection
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Open/save fonts with TTFont() or TTCollection()
|
||||
|
||||
./fonttools ttLib [-oFILE] [-yNUMBER] files...
|
||||
|
||||
If multiple files are given on the command-line,
|
||||
they are each opened (as a font or collection),
|
||||
and added to the font list.
|
||||
|
||||
If -o (output-file) argument is given, the font
|
||||
list is then saved to the output file, either as
|
||||
a single font, if there is only one font, or as
|
||||
a collection otherwise.
|
||||
|
||||
If -y (font-number) argument is given, only the
|
||||
specified font from collections is opened.
|
||||
|
||||
The above allow extracting a single font from a
|
||||
collection, or combining multiple fonts into a
|
||||
collection.
|
||||
|
||||
If --lazy or --no-lazy are give, those are passed
|
||||
to the TTFont() or TTCollection() constructors.
|
||||
"""
|
||||
from fontTools import configLogger
|
||||
|
||||
if args is None:
|
||||
args = sys.argv[1:]
|
||||
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools ttLib",
|
||||
description="Open/save fonts with TTFont() or TTCollection()",
|
||||
epilog="""
|
||||
If multiple files are given on the command-line,
|
||||
they are each opened (as a font or collection),
|
||||
and added to the font list.
|
||||
|
||||
The above, when combined with -o / --output,
|
||||
allows for extracting a single font from a
|
||||
collection, or combining multiple fonts into a
|
||||
collection.
|
||||
""",
|
||||
)
|
||||
parser.add_argument("font", metavar="font", nargs="*", help="Font file.")
|
||||
parser.add_argument(
|
||||
"-t", "--table", metavar="table", nargs="*", help="Tables to decompile."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o", "--output", metavar="FILE", default=None, help="Output file."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-y", metavar="NUMBER", default=-1, help="Font number to load from collections."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--lazy", action="store_true", default=None, help="Load fonts lazily."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-lazy", dest="lazy", action="store_false", help="Load fonts immediately."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--flavor",
|
||||
dest="flavor",
|
||||
default=None,
|
||||
help="Flavor of output font. 'woff' or 'woff2'.",
|
||||
)
|
||||
options = parser.parse_args(args)
|
||||
|
||||
fontNumber = int(options.y) if options.y is not None else None
|
||||
outFile = options.output
|
||||
lazy = options.lazy
|
||||
flavor = options.flavor
|
||||
tables = options.table if options.table is not None else ["*"]
|
||||
|
||||
fonts = []
|
||||
for f in options.font:
|
||||
try:
|
||||
font = TTFont(f, fontNumber=fontNumber, lazy=lazy)
|
||||
fonts.append(font)
|
||||
except TTLibFileIsCollectionError:
|
||||
collection = TTCollection(f, lazy=lazy)
|
||||
fonts.extend(collection.fonts)
|
||||
|
||||
if lazy is False:
|
||||
for font in fonts:
|
||||
for table in tables if "*" not in tables else font.keys():
|
||||
font[table] # Decompiles
|
||||
|
||||
if outFile is not None:
|
||||
if len(fonts) == 1:
|
||||
fonts[0].flavor = flavor
|
||||
fonts[0].save(outFile)
|
||||
else:
|
||||
if flavor is not None:
|
||||
raise TTLibError("Cannot set flavor for collections.")
|
||||
collection = TTCollection()
|
||||
collection.fonts = fonts
|
||||
collection.save(outFile)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@ -0,0 +1,54 @@
|
||||
"""ttLib.macUtils.py -- Various Mac-specific stuff."""
|
||||
|
||||
from io import BytesIO
|
||||
from fontTools.misc.macRes import ResourceReader, ResourceError
|
||||
|
||||
|
||||
def getSFNTResIndices(path):
|
||||
"""Determine whether a file has a 'sfnt' resource fork or not."""
|
||||
try:
|
||||
reader = ResourceReader(path)
|
||||
indices = reader.getIndices("sfnt")
|
||||
reader.close()
|
||||
return indices
|
||||
except ResourceError:
|
||||
return []
|
||||
|
||||
|
||||
def openTTFonts(path):
|
||||
"""Given a pathname, return a list of TTFont objects. In the case
|
||||
of a flat TTF/OTF file, the list will contain just one font object;
|
||||
but in the case of a Mac font suitcase it will contain as many
|
||||
font objects as there are sfnt resources in the file.
|
||||
"""
|
||||
from fontTools import ttLib
|
||||
|
||||
fonts = []
|
||||
sfnts = getSFNTResIndices(path)
|
||||
if not sfnts:
|
||||
fonts.append(ttLib.TTFont(path))
|
||||
else:
|
||||
for index in sfnts:
|
||||
fonts.append(ttLib.TTFont(path, index))
|
||||
if not fonts:
|
||||
raise ttLib.TTLibError("no fonts found in file '%s'" % path)
|
||||
return fonts
|
||||
|
||||
|
||||
class SFNTResourceReader(BytesIO):
|
||||
"""Simple read-only file wrapper for 'sfnt' resources."""
|
||||
|
||||
def __init__(self, path, res_name_or_index):
|
||||
from fontTools import ttLib
|
||||
|
||||
reader = ResourceReader(path)
|
||||
if isinstance(res_name_or_index, str):
|
||||
rsrc = reader.getNamedResource("sfnt", res_name_or_index)
|
||||
else:
|
||||
rsrc = reader.getIndResource("sfnt", res_name_or_index)
|
||||
if rsrc is None:
|
||||
raise ttLib.TTLibError("sfnt resource not found: %s" % res_name_or_index)
|
||||
reader.close()
|
||||
self.rsrc = rsrc
|
||||
super(SFNTResourceReader, self).__init__(rsrc.data)
|
||||
self.name = path
|
||||
@ -0,0 +1,393 @@
|
||||
""" Simplify TrueType glyphs by merging overlapping contours/components.
|
||||
|
||||
Requires https://github.com/fonttools/skia-pathops
|
||||
"""
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
from typing import Callable, Iterable, Optional, Mapping
|
||||
|
||||
from fontTools.cffLib import CFFFontSet
|
||||
from fontTools.ttLib import ttFont
|
||||
from fontTools.ttLib.tables import _g_l_y_f
|
||||
from fontTools.ttLib.tables import _h_m_t_x
|
||||
from fontTools.misc.psCharStrings import T2CharString
|
||||
from fontTools.misc.roundTools import otRound, noRound
|
||||
from fontTools.pens.ttGlyphPen import TTGlyphPen
|
||||
from fontTools.pens.t2CharStringPen import T2CharStringPen
|
||||
|
||||
import pathops
|
||||
|
||||
|
||||
__all__ = ["removeOverlaps"]
|
||||
|
||||
|
||||
class RemoveOverlapsError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.ttLib.removeOverlaps")
|
||||
|
||||
_TTGlyphMapping = Mapping[str, ttFont._TTGlyph]
|
||||
|
||||
|
||||
def skPathFromGlyph(glyphName: str, glyphSet: _TTGlyphMapping) -> pathops.Path:
|
||||
path = pathops.Path()
|
||||
pathPen = path.getPen(glyphSet=glyphSet)
|
||||
glyphSet[glyphName].draw(pathPen)
|
||||
return path
|
||||
|
||||
|
||||
def skPathFromGlyphComponent(
|
||||
component: _g_l_y_f.GlyphComponent, glyphSet: _TTGlyphMapping
|
||||
):
|
||||
baseGlyphName, transformation = component.getComponentInfo()
|
||||
path = skPathFromGlyph(baseGlyphName, glyphSet)
|
||||
return path.transform(*transformation)
|
||||
|
||||
|
||||
def componentsOverlap(glyph: _g_l_y_f.Glyph, glyphSet: _TTGlyphMapping) -> bool:
|
||||
if not glyph.isComposite():
|
||||
raise ValueError("This method only works with TrueType composite glyphs")
|
||||
if len(glyph.components) < 2:
|
||||
return False # single component, no overlaps
|
||||
|
||||
component_paths = {}
|
||||
|
||||
def _get_nth_component_path(index: int) -> pathops.Path:
|
||||
if index not in component_paths:
|
||||
component_paths[index] = skPathFromGlyphComponent(
|
||||
glyph.components[index], glyphSet
|
||||
)
|
||||
return component_paths[index]
|
||||
|
||||
return any(
|
||||
pathops.op(
|
||||
_get_nth_component_path(i),
|
||||
_get_nth_component_path(j),
|
||||
pathops.PathOp.INTERSECTION,
|
||||
fix_winding=False,
|
||||
keep_starting_points=False,
|
||||
)
|
||||
for i, j in itertools.combinations(range(len(glyph.components)), 2)
|
||||
)
|
||||
|
||||
|
||||
def ttfGlyphFromSkPath(path: pathops.Path) -> _g_l_y_f.Glyph:
|
||||
# Skia paths have no 'components', no need for glyphSet
|
||||
ttPen = TTGlyphPen(glyphSet=None)
|
||||
path.draw(ttPen)
|
||||
glyph = ttPen.glyph()
|
||||
assert not glyph.isComposite()
|
||||
# compute glyph.xMin (glyfTable parameter unused for non composites)
|
||||
glyph.recalcBounds(glyfTable=None)
|
||||
return glyph
|
||||
|
||||
|
||||
def _charString_from_SkPath(
|
||||
path: pathops.Path, charString: T2CharString
|
||||
) -> T2CharString:
|
||||
if charString.width == charString.private.defaultWidthX:
|
||||
width = None
|
||||
else:
|
||||
width = charString.width - charString.private.nominalWidthX
|
||||
t2Pen = T2CharStringPen(width=width, glyphSet=None)
|
||||
path.draw(t2Pen)
|
||||
return t2Pen.getCharString(charString.private, charString.globalSubrs)
|
||||
|
||||
|
||||
def _round_path(
|
||||
path: pathops.Path, round: Callable[[float], float] = otRound
|
||||
) -> pathops.Path:
|
||||
rounded_path = pathops.Path()
|
||||
for verb, points in path:
|
||||
rounded_path.add(verb, *((round(p[0]), round(p[1])) for p in points))
|
||||
return rounded_path
|
||||
|
||||
|
||||
def _simplify(
|
||||
path: pathops.Path,
|
||||
debugGlyphName: str,
|
||||
*,
|
||||
round: Callable[[float], float] = otRound,
|
||||
) -> pathops.Path:
|
||||
# skia-pathops has a bug where it sometimes fails to simplify paths when there
|
||||
# are float coordinates and control points are very close to one another.
|
||||
# Rounding coordinates to integers works around the bug.
|
||||
# Since we are going to round glyf coordinates later on anyway, here it is
|
||||
# ok(-ish) to also round before simplify. Better than failing the whole process
|
||||
# for the entire font.
|
||||
# https://bugs.chromium.org/p/skia/issues/detail?id=11958
|
||||
# https://github.com/google/fonts/issues/3365
|
||||
# TODO(anthrotype): remove once this Skia bug is fixed
|
||||
try:
|
||||
return pathops.simplify(path, clockwise=path.clockwise)
|
||||
except pathops.PathOpsError:
|
||||
pass
|
||||
|
||||
path = _round_path(path, round=round)
|
||||
try:
|
||||
path = pathops.simplify(path, clockwise=path.clockwise)
|
||||
log.debug(
|
||||
"skia-pathops failed to simplify '%s' with float coordinates, "
|
||||
"but succeded using rounded integer coordinates",
|
||||
debugGlyphName,
|
||||
)
|
||||
return path
|
||||
except pathops.PathOpsError as e:
|
||||
if log.isEnabledFor(logging.DEBUG):
|
||||
path.dump()
|
||||
raise RemoveOverlapsError(
|
||||
f"Failed to remove overlaps from glyph {debugGlyphName!r}"
|
||||
) from e
|
||||
|
||||
raise AssertionError("Unreachable")
|
||||
|
||||
|
||||
def _same_path(path1: pathops.Path, path2: pathops.Path) -> bool:
|
||||
return {tuple(c) for c in path1.contours} == {tuple(c) for c in path2.contours}
|
||||
|
||||
|
||||
def removeTTGlyphOverlaps(
|
||||
glyphName: str,
|
||||
glyphSet: _TTGlyphMapping,
|
||||
glyfTable: _g_l_y_f.table__g_l_y_f,
|
||||
hmtxTable: _h_m_t_x.table__h_m_t_x,
|
||||
removeHinting: bool = True,
|
||||
) -> bool:
|
||||
glyph = glyfTable[glyphName]
|
||||
# decompose composite glyphs only if components overlap each other
|
||||
if (
|
||||
glyph.numberOfContours > 0
|
||||
or glyph.isComposite()
|
||||
and componentsOverlap(glyph, glyphSet)
|
||||
):
|
||||
path = skPathFromGlyph(glyphName, glyphSet)
|
||||
|
||||
# remove overlaps
|
||||
path2 = _simplify(path, glyphName)
|
||||
|
||||
# replace TTGlyph if simplified path is different (ignoring contour order)
|
||||
if not _same_path(path, path2):
|
||||
glyfTable[glyphName] = glyph = ttfGlyphFromSkPath(path2)
|
||||
# simplified glyph is always unhinted
|
||||
assert not glyph.program
|
||||
# also ensure hmtx LSB == glyph.xMin so glyph origin is at x=0
|
||||
width, lsb = hmtxTable[glyphName]
|
||||
if lsb != glyph.xMin:
|
||||
hmtxTable[glyphName] = (width, glyph.xMin)
|
||||
return True
|
||||
|
||||
if removeHinting:
|
||||
glyph.removeHinting()
|
||||
return False
|
||||
|
||||
|
||||
def _remove_glyf_overlaps(
|
||||
*,
|
||||
font: ttFont.TTFont,
|
||||
glyphNames: Iterable[str],
|
||||
glyphSet: _TTGlyphMapping,
|
||||
removeHinting: bool,
|
||||
ignoreErrors: bool,
|
||||
) -> None:
|
||||
glyfTable = font["glyf"]
|
||||
hmtxTable = font["hmtx"]
|
||||
|
||||
# process all simple glyphs first, then composites with increasing component depth,
|
||||
# so that by the time we test for component intersections the respective base glyphs
|
||||
# have already been simplified
|
||||
glyphNames = sorted(
|
||||
glyphNames,
|
||||
key=lambda name: (
|
||||
(
|
||||
glyfTable[name].getCompositeMaxpValues(glyfTable).maxComponentDepth
|
||||
if glyfTable[name].isComposite()
|
||||
else 0
|
||||
),
|
||||
name,
|
||||
),
|
||||
)
|
||||
modified = set()
|
||||
for glyphName in glyphNames:
|
||||
try:
|
||||
if removeTTGlyphOverlaps(
|
||||
glyphName, glyphSet, glyfTable, hmtxTable, removeHinting
|
||||
):
|
||||
modified.add(glyphName)
|
||||
except RemoveOverlapsError:
|
||||
if not ignoreErrors:
|
||||
raise
|
||||
log.error("Failed to remove overlaps for '%s'", glyphName)
|
||||
|
||||
log.debug("Removed overlaps for %s glyphs:\n%s", len(modified), " ".join(modified))
|
||||
|
||||
|
||||
def _remove_charstring_overlaps(
|
||||
*,
|
||||
glyphName: str,
|
||||
glyphSet: _TTGlyphMapping,
|
||||
cffFontSet: CFFFontSet,
|
||||
) -> bool:
|
||||
path = skPathFromGlyph(glyphName, glyphSet)
|
||||
|
||||
# remove overlaps
|
||||
path2 = _simplify(path, glyphName, round=noRound)
|
||||
|
||||
# replace TTGlyph if simplified path is different (ignoring contour order)
|
||||
if not _same_path(path, path2):
|
||||
charStrings = cffFontSet[0].CharStrings
|
||||
charStrings[glyphName] = _charString_from_SkPath(path2, charStrings[glyphName])
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def _remove_cff_overlaps(
|
||||
*,
|
||||
font: ttFont.TTFont,
|
||||
glyphNames: Iterable[str],
|
||||
glyphSet: _TTGlyphMapping,
|
||||
removeHinting: bool,
|
||||
ignoreErrors: bool,
|
||||
removeUnusedSubroutines: bool = True,
|
||||
) -> None:
|
||||
cffFontSet = font["CFF "].cff
|
||||
modified = set()
|
||||
for glyphName in glyphNames:
|
||||
try:
|
||||
if _remove_charstring_overlaps(
|
||||
glyphName=glyphName,
|
||||
glyphSet=glyphSet,
|
||||
cffFontSet=cffFontSet,
|
||||
):
|
||||
modified.add(glyphName)
|
||||
except RemoveOverlapsError:
|
||||
if not ignoreErrors:
|
||||
raise
|
||||
log.error("Failed to remove overlaps for '%s'", glyphName)
|
||||
|
||||
if not modified:
|
||||
log.debug("No overlaps found in the specified CFF glyphs")
|
||||
return
|
||||
|
||||
if removeHinting:
|
||||
cffFontSet.remove_hints()
|
||||
|
||||
if removeUnusedSubroutines:
|
||||
cffFontSet.remove_unused_subroutines()
|
||||
|
||||
log.debug("Removed overlaps for %s glyphs:\n%s", len(modified), " ".join(modified))
|
||||
|
||||
|
||||
def removeOverlaps(
|
||||
font: ttFont.TTFont,
|
||||
glyphNames: Optional[Iterable[str]] = None,
|
||||
removeHinting: bool = True,
|
||||
ignoreErrors: bool = False,
|
||||
*,
|
||||
removeUnusedSubroutines: bool = True,
|
||||
) -> None:
|
||||
"""Simplify glyphs in TTFont by merging overlapping contours.
|
||||
|
||||
Overlapping components are first decomposed to simple contours, then merged.
|
||||
|
||||
Currently this only works for fonts with 'glyf' or 'CFF ' tables.
|
||||
Raises NotImplementedError if 'glyf' or 'CFF ' tables are absent.
|
||||
|
||||
Note that removing overlaps invalidates the hinting. By default we drop hinting
|
||||
from all glyphs whether or not overlaps are removed from a given one, as it would
|
||||
look weird if only some glyphs are left (un)hinted.
|
||||
|
||||
Args:
|
||||
font: input TTFont object, modified in place.
|
||||
glyphNames: optional iterable of glyph names (str) to remove overlaps from.
|
||||
By default, all glyphs in the font are processed.
|
||||
removeHinting (bool): set to False to keep hinting for unmodified glyphs.
|
||||
ignoreErrors (bool): set to True to ignore errors while removing overlaps,
|
||||
thus keeping the tricky glyphs unchanged (fonttools/fonttools#2363).
|
||||
removeUnusedSubroutines (bool): set to False to keep unused subroutines
|
||||
in CFF table after removing overlaps. Default is to remove them if
|
||||
any glyphs are modified.
|
||||
"""
|
||||
|
||||
if "glyf" not in font and "CFF " not in font:
|
||||
raise NotImplementedError(
|
||||
"No outline data found in the font: missing 'glyf' or 'CFF ' table"
|
||||
)
|
||||
|
||||
if glyphNames is None:
|
||||
glyphNames = font.getGlyphOrder()
|
||||
|
||||
# Wraps the underlying glyphs, takes care of interfacing with drawing pens
|
||||
glyphSet = font.getGlyphSet()
|
||||
|
||||
if "glyf" in font:
|
||||
_remove_glyf_overlaps(
|
||||
font=font,
|
||||
glyphNames=glyphNames,
|
||||
glyphSet=glyphSet,
|
||||
removeHinting=removeHinting,
|
||||
ignoreErrors=ignoreErrors,
|
||||
)
|
||||
|
||||
if "CFF " in font:
|
||||
_remove_cff_overlaps(
|
||||
font=font,
|
||||
glyphNames=glyphNames,
|
||||
glyphSet=glyphSet,
|
||||
removeHinting=removeHinting,
|
||||
ignoreErrors=ignoreErrors,
|
||||
removeUnusedSubroutines=removeUnusedSubroutines,
|
||||
)
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Simplify glyphs in TTFont by merging overlapping contours."""
|
||||
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools ttLib.removeOverlaps", description=__doc__
|
||||
)
|
||||
|
||||
parser.add_argument("input", metavar="INPUT.ttf", help="Input font file")
|
||||
parser.add_argument("output", metavar="OUTPUT.ttf", help="Output font file")
|
||||
parser.add_argument(
|
||||
"glyphs",
|
||||
metavar="GLYPHS",
|
||||
nargs="*",
|
||||
help="Optional list of glyph names to remove overlaps from",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--keep-hinting",
|
||||
action="store_true",
|
||||
help="Keep hinting for unmodified glyphs, default is to drop hinting",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--ignore-errors",
|
||||
action="store_true",
|
||||
help="ignore errors while removing overlaps, "
|
||||
"thus keeping the tricky glyphs unchanged",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--keep-unused-subroutines",
|
||||
action="store_true",
|
||||
help="Keep unused subroutines in CFF table after removing overlaps, "
|
||||
"default is to remove them if any glyphs are modified",
|
||||
)
|
||||
args = parser.parse_args(args)
|
||||
|
||||
with ttFont.TTFont(args.input) as font:
|
||||
removeOverlaps(
|
||||
font=font,
|
||||
glyphNames=args.glyphs or None,
|
||||
removeHinting=not args.keep_hinting,
|
||||
ignoreErrors=args.ignore_errors,
|
||||
removeUnusedSubroutines=not args.keep_unused_subroutines,
|
||||
)
|
||||
font.save(args.output)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@ -0,0 +1,284 @@
|
||||
"""Reorder glyphs in a font."""
|
||||
|
||||
__author__ = "Rod Sheeter"
|
||||
|
||||
# See https://docs.google.com/document/d/1h9O-C_ndods87uY0QeIIcgAMiX2gDTpvO_IhMJsKAqs/
|
||||
# for details.
|
||||
|
||||
|
||||
from fontTools import ttLib
|
||||
from fontTools.ttLib.tables import otBase
|
||||
from fontTools.ttLib.tables import otTables as ot
|
||||
from abc import ABC, abstractmethod
|
||||
from dataclasses import dataclass
|
||||
from collections import deque
|
||||
from typing import (
|
||||
Optional,
|
||||
Any,
|
||||
Callable,
|
||||
Deque,
|
||||
Iterable,
|
||||
List,
|
||||
Tuple,
|
||||
)
|
||||
|
||||
|
||||
_COVERAGE_ATTR = "Coverage" # tables that have one coverage use this name
|
||||
|
||||
|
||||
def _sort_by_gid(
|
||||
get_glyph_id: Callable[[str], int],
|
||||
glyphs: List[str],
|
||||
parallel_list: Optional[List[Any]],
|
||||
):
|
||||
if parallel_list:
|
||||
reordered = sorted(
|
||||
((g, e) for g, e in zip(glyphs, parallel_list)),
|
||||
key=lambda t: get_glyph_id(t[0]),
|
||||
)
|
||||
sorted_glyphs, sorted_parallel_list = map(list, zip(*reordered))
|
||||
parallel_list[:] = sorted_parallel_list
|
||||
else:
|
||||
sorted_glyphs = sorted(glyphs, key=get_glyph_id)
|
||||
|
||||
glyphs[:] = sorted_glyphs
|
||||
|
||||
|
||||
def _get_dotted_attr(value: Any, dotted_attr: str) -> Any:
|
||||
attr_names = dotted_attr.split(".")
|
||||
assert attr_names
|
||||
|
||||
while attr_names:
|
||||
attr_name = attr_names.pop(0)
|
||||
value = getattr(value, attr_name)
|
||||
return value
|
||||
|
||||
|
||||
class ReorderRule(ABC):
|
||||
"""A rule to reorder something in a font to match the fonts glyph order."""
|
||||
|
||||
@abstractmethod
|
||||
def apply(self, font: ttLib.TTFont, value: otBase.BaseTable) -> None: ...
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ReorderCoverage(ReorderRule):
|
||||
"""Reorder a Coverage table, and optionally a list that is sorted parallel to it."""
|
||||
|
||||
# A list that is parallel to Coverage
|
||||
parallel_list_attr: Optional[str] = None
|
||||
coverage_attr: str = _COVERAGE_ATTR
|
||||
|
||||
def apply(self, font: ttLib.TTFont, value: otBase.BaseTable) -> None:
|
||||
coverage = _get_dotted_attr(value, self.coverage_attr)
|
||||
|
||||
if type(coverage) is not list:
|
||||
# Normal path, process one coverage that might have a parallel list
|
||||
parallel_list = None
|
||||
if self.parallel_list_attr:
|
||||
parallel_list = _get_dotted_attr(value, self.parallel_list_attr)
|
||||
assert (
|
||||
type(parallel_list) is list
|
||||
), f"{self.parallel_list_attr} should be a list"
|
||||
assert len(parallel_list) == len(coverage.glyphs), "Nothing makes sense"
|
||||
|
||||
_sort_by_gid(font.getGlyphID, coverage.glyphs, parallel_list)
|
||||
|
||||
else:
|
||||
# A few tables have a list of coverage. No parallel list can exist.
|
||||
assert (
|
||||
not self.parallel_list_attr
|
||||
), f"Can't have multiple coverage AND a parallel list; {self}"
|
||||
for coverage_entry in coverage:
|
||||
_sort_by_gid(font.getGlyphID, coverage_entry.glyphs, None)
|
||||
|
||||
|
||||
@dataclass(frozen=True)
|
||||
class ReorderList(ReorderRule):
|
||||
"""Reorder the items within a list to match the updated glyph order.
|
||||
|
||||
Useful when a list ordered by coverage itself contains something ordered by a gid.
|
||||
For example, the PairSet table of https://docs.microsoft.com/en-us/typography/opentype/spec/gpos#lookup-type-2-pair-adjustment-positioning-subtable.
|
||||
"""
|
||||
|
||||
list_attr: str
|
||||
key: str
|
||||
|
||||
def apply(self, font: ttLib.TTFont, value: otBase.BaseTable) -> None:
|
||||
lst = _get_dotted_attr(value, self.list_attr)
|
||||
assert isinstance(lst, list), f"{self.list_attr} should be a list"
|
||||
lst.sort(key=lambda v: font.getGlyphID(getattr(v, self.key)))
|
||||
|
||||
|
||||
# (Type, Optional Format) => List[ReorderRule]
|
||||
# Encodes the relationships Cosimo identified
|
||||
_REORDER_RULES = {
|
||||
# GPOS
|
||||
(ot.SinglePos, 1): [ReorderCoverage()],
|
||||
(ot.SinglePos, 2): [ReorderCoverage(parallel_list_attr="Value")],
|
||||
(ot.PairPos, 1): [ReorderCoverage(parallel_list_attr="PairSet")],
|
||||
(ot.PairSet, None): [ReorderList("PairValueRecord", key="SecondGlyph")],
|
||||
(ot.PairPos, 2): [ReorderCoverage()],
|
||||
(ot.CursivePos, 1): [ReorderCoverage(parallel_list_attr="EntryExitRecord")],
|
||||
(ot.MarkBasePos, 1): [
|
||||
ReorderCoverage(
|
||||
coverage_attr="MarkCoverage", parallel_list_attr="MarkArray.MarkRecord"
|
||||
),
|
||||
ReorderCoverage(
|
||||
coverage_attr="BaseCoverage", parallel_list_attr="BaseArray.BaseRecord"
|
||||
),
|
||||
],
|
||||
(ot.MarkLigPos, 1): [
|
||||
ReorderCoverage(
|
||||
coverage_attr="MarkCoverage", parallel_list_attr="MarkArray.MarkRecord"
|
||||
),
|
||||
ReorderCoverage(
|
||||
coverage_attr="LigatureCoverage",
|
||||
parallel_list_attr="LigatureArray.LigatureAttach",
|
||||
),
|
||||
],
|
||||
(ot.MarkMarkPos, 1): [
|
||||
ReorderCoverage(
|
||||
coverage_attr="Mark1Coverage", parallel_list_attr="Mark1Array.MarkRecord"
|
||||
),
|
||||
ReorderCoverage(
|
||||
coverage_attr="Mark2Coverage", parallel_list_attr="Mark2Array.Mark2Record"
|
||||
),
|
||||
],
|
||||
(ot.ContextPos, 1): [ReorderCoverage(parallel_list_attr="PosRuleSet")],
|
||||
(ot.ContextPos, 2): [ReorderCoverage()],
|
||||
(ot.ContextPos, 3): [ReorderCoverage()],
|
||||
(ot.ChainContextPos, 1): [ReorderCoverage(parallel_list_attr="ChainPosRuleSet")],
|
||||
(ot.ChainContextPos, 2): [ReorderCoverage()],
|
||||
(ot.ChainContextPos, 3): [
|
||||
ReorderCoverage(coverage_attr="BacktrackCoverage"),
|
||||
ReorderCoverage(coverage_attr="InputCoverage"),
|
||||
ReorderCoverage(coverage_attr="LookAheadCoverage"),
|
||||
],
|
||||
# GSUB
|
||||
(ot.ContextSubst, 1): [ReorderCoverage(parallel_list_attr="SubRuleSet")],
|
||||
(ot.ContextSubst, 2): [ReorderCoverage()],
|
||||
(ot.ContextSubst, 3): [ReorderCoverage()],
|
||||
(ot.ChainContextSubst, 1): [ReorderCoverage(parallel_list_attr="ChainSubRuleSet")],
|
||||
(ot.ChainContextSubst, 2): [ReorderCoverage()],
|
||||
(ot.ChainContextSubst, 3): [
|
||||
ReorderCoverage(coverage_attr="BacktrackCoverage"),
|
||||
ReorderCoverage(coverage_attr="InputCoverage"),
|
||||
ReorderCoverage(coverage_attr="LookAheadCoverage"),
|
||||
],
|
||||
(ot.ReverseChainSingleSubst, 1): [
|
||||
ReorderCoverage(parallel_list_attr="Substitute"),
|
||||
ReorderCoverage(coverage_attr="BacktrackCoverage"),
|
||||
ReorderCoverage(coverage_attr="LookAheadCoverage"),
|
||||
],
|
||||
# GDEF
|
||||
(ot.AttachList, None): [ReorderCoverage(parallel_list_attr="AttachPoint")],
|
||||
(ot.LigCaretList, None): [ReorderCoverage(parallel_list_attr="LigGlyph")],
|
||||
(ot.MarkGlyphSetsDef, None): [ReorderCoverage()],
|
||||
# MATH
|
||||
(ot.MathGlyphInfo, None): [ReorderCoverage(coverage_attr="ExtendedShapeCoverage")],
|
||||
(ot.MathItalicsCorrectionInfo, None): [
|
||||
ReorderCoverage(parallel_list_attr="ItalicsCorrection")
|
||||
],
|
||||
(ot.MathTopAccentAttachment, None): [
|
||||
ReorderCoverage(
|
||||
coverage_attr="TopAccentCoverage", parallel_list_attr="TopAccentAttachment"
|
||||
)
|
||||
],
|
||||
(ot.MathKernInfo, None): [
|
||||
ReorderCoverage(
|
||||
coverage_attr="MathKernCoverage", parallel_list_attr="MathKernInfoRecords"
|
||||
)
|
||||
],
|
||||
(ot.MathVariants, None): [
|
||||
ReorderCoverage(
|
||||
coverage_attr="VertGlyphCoverage",
|
||||
parallel_list_attr="VertGlyphConstruction",
|
||||
),
|
||||
ReorderCoverage(
|
||||
coverage_attr="HorizGlyphCoverage",
|
||||
parallel_list_attr="HorizGlyphConstruction",
|
||||
),
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
# TODO Port to otTraverse
|
||||
|
||||
SubTablePath = Tuple[otBase.BaseTable.SubTableEntry, ...]
|
||||
|
||||
|
||||
def _bfs_base_table(
|
||||
root: otBase.BaseTable, root_accessor: str
|
||||
) -> Iterable[SubTablePath]:
|
||||
yield from _traverse_ot_data(
|
||||
root, root_accessor, lambda frontier, new: frontier.extend(new)
|
||||
)
|
||||
|
||||
|
||||
# Given f(current frontier, new entries) add new entries to frontier
|
||||
AddToFrontierFn = Callable[[Deque[SubTablePath], List[SubTablePath]], None]
|
||||
|
||||
|
||||
def _traverse_ot_data(
|
||||
root: otBase.BaseTable, root_accessor: str, add_to_frontier_fn: AddToFrontierFn
|
||||
) -> Iterable[SubTablePath]:
|
||||
# no visited because general otData is forward-offset only and thus cannot cycle
|
||||
|
||||
frontier: Deque[SubTablePath] = deque()
|
||||
frontier.append((otBase.BaseTable.SubTableEntry(root_accessor, root),))
|
||||
while frontier:
|
||||
# path is (value, attr_name) tuples. attr_name is attr of parent to get value
|
||||
path = frontier.popleft()
|
||||
current = path[-1].value
|
||||
|
||||
yield path
|
||||
|
||||
new_entries = []
|
||||
for subtable_entry in current.iterSubTables():
|
||||
new_entries.append(path + (subtable_entry,))
|
||||
|
||||
add_to_frontier_fn(frontier, new_entries)
|
||||
|
||||
|
||||
def reorderGlyphs(font: ttLib.TTFont, new_glyph_order: List[str]):
|
||||
old_glyph_order = font.getGlyphOrder()
|
||||
if len(new_glyph_order) != len(old_glyph_order):
|
||||
raise ValueError(
|
||||
f"New glyph order contains {len(new_glyph_order)} glyphs, "
|
||||
f"but font has {len(old_glyph_order)} glyphs"
|
||||
)
|
||||
|
||||
if set(old_glyph_order) != set(new_glyph_order):
|
||||
raise ValueError(
|
||||
"New glyph order does not contain the same set of glyphs as the font:\n"
|
||||
f"* only in new: {set(new_glyph_order) - set(old_glyph_order)}\n"
|
||||
f"* only in old: {set(old_glyph_order) - set(new_glyph_order)}"
|
||||
)
|
||||
|
||||
# Changing the order of glyphs in a TTFont requires that all tables that use
|
||||
# glyph indexes have been fully.
|
||||
# Cf. https://github.com/fonttools/fonttools/issues/2060
|
||||
font.ensureDecompiled()
|
||||
not_loaded = sorted(t for t in font.keys() if not font.isLoaded(t))
|
||||
if not_loaded:
|
||||
raise ValueError(f"Everything should be loaded, following aren't: {not_loaded}")
|
||||
|
||||
font.setGlyphOrder(new_glyph_order)
|
||||
|
||||
coverage_containers = {"GDEF", "GPOS", "GSUB", "MATH"}
|
||||
for tag in coverage_containers:
|
||||
if tag in font.keys():
|
||||
for path in _bfs_base_table(font[tag].table, f'font["{tag}"]'):
|
||||
value = path[-1].value
|
||||
reorder_key = (type(value), getattr(value, "Format", None))
|
||||
for reorder in _REORDER_RULES.get(reorder_key, []):
|
||||
reorder.apply(font, value)
|
||||
|
||||
if "CFF " in font:
|
||||
cff_table = font["CFF "]
|
||||
charstrings = cff_table.cff.topDictIndex[0].CharStrings.charStrings
|
||||
cff_table.cff.topDictIndex[0].charset = new_glyph_order
|
||||
cff_table.cff.topDictIndex[0].CharStrings.charStrings = {
|
||||
k: charstrings.get(k) for k in new_glyph_order
|
||||
}
|
||||
436
venv/lib/python3.12/site-packages/fontTools/ttLib/scaleUpem.py
Normal file
436
venv/lib/python3.12/site-packages/fontTools/ttLib/scaleUpem.py
Normal file
@ -0,0 +1,436 @@
|
||||
"""Change the units-per-EM of a font.
|
||||
|
||||
AAT and Graphite tables are not supported. CFF/CFF2 fonts
|
||||
are de-subroutinized."""
|
||||
|
||||
from fontTools.ttLib.ttVisitor import TTVisitor
|
||||
import fontTools.ttLib as ttLib
|
||||
import fontTools.ttLib.tables.otBase as otBase
|
||||
import fontTools.ttLib.tables.otTables as otTables
|
||||
from fontTools.cffLib import VarStoreData
|
||||
import fontTools.cffLib.specializer as cffSpecializer
|
||||
from fontTools.varLib import builder # for VarData.calculateNumShorts
|
||||
from fontTools.varLib.multiVarStore import OnlineMultiVarStoreBuilder
|
||||
from fontTools.misc.vector import Vector
|
||||
from fontTools.misc.fixedTools import otRound
|
||||
from fontTools.misc.iterTools import batched
|
||||
|
||||
|
||||
__all__ = ["scale_upem", "ScalerVisitor"]
|
||||
|
||||
|
||||
class ScalerVisitor(TTVisitor):
|
||||
def __init__(self, scaleFactor):
|
||||
self.scaleFactor = scaleFactor
|
||||
|
||||
def scale(self, v):
|
||||
return otRound(v * self.scaleFactor)
|
||||
|
||||
|
||||
@ScalerVisitor.register_attrs(
|
||||
(
|
||||
(ttLib.getTableClass("head"), ("unitsPerEm", "xMin", "yMin", "xMax", "yMax")),
|
||||
(ttLib.getTableClass("post"), ("underlinePosition", "underlineThickness")),
|
||||
(ttLib.getTableClass("VORG"), ("defaultVertOriginY")),
|
||||
(
|
||||
ttLib.getTableClass("hhea"),
|
||||
(
|
||||
"ascent",
|
||||
"descent",
|
||||
"lineGap",
|
||||
"advanceWidthMax",
|
||||
"minLeftSideBearing",
|
||||
"minRightSideBearing",
|
||||
"xMaxExtent",
|
||||
"caretOffset",
|
||||
),
|
||||
),
|
||||
(
|
||||
ttLib.getTableClass("vhea"),
|
||||
(
|
||||
"ascent",
|
||||
"descent",
|
||||
"lineGap",
|
||||
"advanceHeightMax",
|
||||
"minTopSideBearing",
|
||||
"minBottomSideBearing",
|
||||
"yMaxExtent",
|
||||
"caretOffset",
|
||||
),
|
||||
),
|
||||
(
|
||||
ttLib.getTableClass("OS/2"),
|
||||
(
|
||||
"xAvgCharWidth",
|
||||
"ySubscriptXSize",
|
||||
"ySubscriptYSize",
|
||||
"ySubscriptXOffset",
|
||||
"ySubscriptYOffset",
|
||||
"ySuperscriptXSize",
|
||||
"ySuperscriptYSize",
|
||||
"ySuperscriptXOffset",
|
||||
"ySuperscriptYOffset",
|
||||
"yStrikeoutSize",
|
||||
"yStrikeoutPosition",
|
||||
"sTypoAscender",
|
||||
"sTypoDescender",
|
||||
"sTypoLineGap",
|
||||
"usWinAscent",
|
||||
"usWinDescent",
|
||||
"sxHeight",
|
||||
"sCapHeight",
|
||||
),
|
||||
),
|
||||
(
|
||||
otTables.ValueRecord,
|
||||
("XAdvance", "YAdvance", "XPlacement", "YPlacement"),
|
||||
), # GPOS
|
||||
(otTables.Anchor, ("XCoordinate", "YCoordinate")), # GPOS
|
||||
(otTables.CaretValue, ("Coordinate")), # GDEF
|
||||
(otTables.BaseCoord, ("Coordinate")), # BASE
|
||||
(otTables.MathValueRecord, ("Value")), # MATH
|
||||
(otTables.ClipBox, ("xMin", "yMin", "xMax", "yMax")), # COLR
|
||||
)
|
||||
)
|
||||
def visit(visitor, obj, attr, value):
|
||||
setattr(obj, attr, visitor.scale(value))
|
||||
|
||||
|
||||
@ScalerVisitor.register_attr(
|
||||
(ttLib.getTableClass("hmtx"), ttLib.getTableClass("vmtx")), "metrics"
|
||||
)
|
||||
def visit(visitor, obj, attr, metrics):
|
||||
for g in metrics:
|
||||
advance, lsb = metrics[g]
|
||||
metrics[g] = visitor.scale(advance), visitor.scale(lsb)
|
||||
|
||||
|
||||
@ScalerVisitor.register_attr(ttLib.getTableClass("VMTX"), "VOriginRecords")
|
||||
def visit(visitor, obj, attr, VOriginRecords):
|
||||
for g in VOriginRecords:
|
||||
VOriginRecords[g] = visitor.scale(VOriginRecords[g])
|
||||
|
||||
|
||||
@ScalerVisitor.register_attr(ttLib.getTableClass("glyf"), "glyphs")
|
||||
def visit(visitor, obj, attr, glyphs):
|
||||
for g in glyphs.values():
|
||||
for attr in ("xMin", "xMax", "yMin", "yMax"):
|
||||
v = getattr(g, attr, None)
|
||||
if v is not None:
|
||||
setattr(g, attr, visitor.scale(v))
|
||||
|
||||
if g.isComposite():
|
||||
for component in g.components:
|
||||
component.x = visitor.scale(component.x)
|
||||
component.y = visitor.scale(component.y)
|
||||
continue
|
||||
|
||||
if hasattr(g, "coordinates"):
|
||||
coordinates = g.coordinates
|
||||
for i, (x, y) in enumerate(coordinates):
|
||||
coordinates[i] = visitor.scale(x), visitor.scale(y)
|
||||
|
||||
|
||||
@ScalerVisitor.register_attr(ttLib.getTableClass("gvar"), "variations")
|
||||
def visit(visitor, obj, attr, variations):
|
||||
glyfTable = visitor.font["glyf"]
|
||||
|
||||
for glyphName, varlist in variations.items():
|
||||
glyph = glyfTable[glyphName]
|
||||
for var in varlist:
|
||||
coordinates = var.coordinates
|
||||
for i, xy in enumerate(coordinates):
|
||||
if xy is None:
|
||||
continue
|
||||
coordinates[i] = visitor.scale(xy[0]), visitor.scale(xy[1])
|
||||
|
||||
|
||||
@ScalerVisitor.register_attr(ttLib.getTableClass("VARC"), "table")
|
||||
def visit(visitor, obj, attr, varc):
|
||||
# VarComposite variations are a pain
|
||||
|
||||
fvar = visitor.font["fvar"]
|
||||
fvarAxes = [a.axisTag for a in fvar.axes]
|
||||
|
||||
store = varc.MultiVarStore
|
||||
storeBuilder = OnlineMultiVarStoreBuilder(fvarAxes)
|
||||
|
||||
for g in varc.VarCompositeGlyphs.VarCompositeGlyph:
|
||||
for component in g.components:
|
||||
t = component.transform
|
||||
t.translateX = visitor.scale(t.translateX)
|
||||
t.translateY = visitor.scale(t.translateY)
|
||||
t.tCenterX = visitor.scale(t.tCenterX)
|
||||
t.tCenterY = visitor.scale(t.tCenterY)
|
||||
|
||||
if component.axisValuesVarIndex != otTables.NO_VARIATION_INDEX:
|
||||
varIdx = component.axisValuesVarIndex
|
||||
# TODO Move this code duplicated below to MultiVarStore.__getitem__,
|
||||
# or a getDeltasAndSupports().
|
||||
if varIdx != otTables.NO_VARIATION_INDEX:
|
||||
major = varIdx >> 16
|
||||
minor = varIdx & 0xFFFF
|
||||
varData = store.MultiVarData[major]
|
||||
vec = varData.Item[minor]
|
||||
storeBuilder.setSupports(store.get_supports(major, fvar.axes))
|
||||
if vec:
|
||||
m = len(vec) // varData.VarRegionCount
|
||||
vec = list(batched(vec, m))
|
||||
vec = [Vector(v) for v in vec]
|
||||
component.axisValuesVarIndex = storeBuilder.storeDeltas(vec)
|
||||
else:
|
||||
component.axisValuesVarIndex = otTables.NO_VARIATION_INDEX
|
||||
|
||||
if component.transformVarIndex != otTables.NO_VARIATION_INDEX:
|
||||
varIdx = component.transformVarIndex
|
||||
if varIdx != otTables.NO_VARIATION_INDEX:
|
||||
major = varIdx >> 16
|
||||
minor = varIdx & 0xFFFF
|
||||
vec = varData.Item[varIdx & 0xFFFF]
|
||||
major = varIdx >> 16
|
||||
minor = varIdx & 0xFFFF
|
||||
varData = store.MultiVarData[major]
|
||||
vec = varData.Item[minor]
|
||||
storeBuilder.setSupports(store.get_supports(major, fvar.axes))
|
||||
if vec:
|
||||
m = len(vec) // varData.VarRegionCount
|
||||
flags = component.flags
|
||||
vec = list(batched(vec, m))
|
||||
newVec = []
|
||||
for v in vec:
|
||||
v = list(v)
|
||||
i = 0
|
||||
## Scale translate & tCenter
|
||||
if flags & otTables.VarComponentFlags.HAVE_TRANSLATE_X:
|
||||
v[i] = visitor.scale(v[i])
|
||||
i += 1
|
||||
if flags & otTables.VarComponentFlags.HAVE_TRANSLATE_Y:
|
||||
v[i] = visitor.scale(v[i])
|
||||
i += 1
|
||||
if flags & otTables.VarComponentFlags.HAVE_ROTATION:
|
||||
i += 1
|
||||
if flags & otTables.VarComponentFlags.HAVE_SCALE_X:
|
||||
i += 1
|
||||
if flags & otTables.VarComponentFlags.HAVE_SCALE_Y:
|
||||
i += 1
|
||||
if flags & otTables.VarComponentFlags.HAVE_SKEW_X:
|
||||
i += 1
|
||||
if flags & otTables.VarComponentFlags.HAVE_SKEW_Y:
|
||||
i += 1
|
||||
if flags & otTables.VarComponentFlags.HAVE_TCENTER_X:
|
||||
v[i] = visitor.scale(v[i])
|
||||
i += 1
|
||||
if flags & otTables.VarComponentFlags.HAVE_TCENTER_Y:
|
||||
v[i] = visitor.scale(v[i])
|
||||
i += 1
|
||||
|
||||
newVec.append(Vector(v))
|
||||
vec = newVec
|
||||
|
||||
component.transformVarIndex = storeBuilder.storeDeltas(vec)
|
||||
else:
|
||||
component.transformVarIndex = otTables.NO_VARIATION_INDEX
|
||||
|
||||
varc.MultiVarStore = storeBuilder.finish()
|
||||
|
||||
|
||||
@ScalerVisitor.register_attr(ttLib.getTableClass("kern"), "kernTables")
|
||||
def visit(visitor, obj, attr, kernTables):
|
||||
for table in kernTables:
|
||||
kernTable = table.kernTable
|
||||
for k in kernTable.keys():
|
||||
kernTable[k] = visitor.scale(kernTable[k])
|
||||
|
||||
|
||||
def _cff_scale(visitor, args):
|
||||
for i, arg in enumerate(args):
|
||||
if not isinstance(arg, list):
|
||||
if not isinstance(arg, bytes):
|
||||
args[i] = visitor.scale(arg)
|
||||
else:
|
||||
num_blends = arg[-1]
|
||||
_cff_scale(visitor, arg)
|
||||
arg[-1] = num_blends
|
||||
|
||||
|
||||
@ScalerVisitor.register_attr(
|
||||
(ttLib.getTableClass("CFF "), ttLib.getTableClass("CFF2")), "cff"
|
||||
)
|
||||
def visit(visitor, obj, attr, cff):
|
||||
cff.desubroutinize()
|
||||
topDict = cff.topDictIndex[0]
|
||||
varStore = getattr(topDict, "VarStore", None)
|
||||
getNumRegions = varStore.getNumRegions if varStore is not None else None
|
||||
privates = set()
|
||||
for fontname in cff.keys():
|
||||
font = cff[fontname]
|
||||
cs = font.CharStrings
|
||||
for g in font.charset:
|
||||
c, _ = cs.getItemAndSelector(g)
|
||||
privates.add(c.private)
|
||||
|
||||
commands = cffSpecializer.programToCommands(
|
||||
c.program, getNumRegions=getNumRegions
|
||||
)
|
||||
for op, args in commands:
|
||||
if op == "vsindex":
|
||||
continue
|
||||
_cff_scale(visitor, args)
|
||||
c.program[:] = cffSpecializer.commandsToProgram(commands)
|
||||
|
||||
# Annoying business of scaling numbers that do not matter whatsoever
|
||||
|
||||
for attr in (
|
||||
"UnderlinePosition",
|
||||
"UnderlineThickness",
|
||||
"FontBBox",
|
||||
"StrokeWidth",
|
||||
):
|
||||
value = getattr(topDict, attr, None)
|
||||
if value is None:
|
||||
continue
|
||||
if isinstance(value, list):
|
||||
_cff_scale(visitor, value)
|
||||
else:
|
||||
setattr(topDict, attr, visitor.scale(value))
|
||||
|
||||
for i in range(6):
|
||||
topDict.FontMatrix[i] /= visitor.scaleFactor
|
||||
|
||||
for private in privates:
|
||||
for attr in (
|
||||
"BlueValues",
|
||||
"OtherBlues",
|
||||
"FamilyBlues",
|
||||
"FamilyOtherBlues",
|
||||
# "BlueScale",
|
||||
# "BlueShift",
|
||||
# "BlueFuzz",
|
||||
"StdHW",
|
||||
"StdVW",
|
||||
"StemSnapH",
|
||||
"StemSnapV",
|
||||
"defaultWidthX",
|
||||
"nominalWidthX",
|
||||
):
|
||||
value = getattr(private, attr, None)
|
||||
if value is None:
|
||||
continue
|
||||
if isinstance(value, list):
|
||||
_cff_scale(visitor, value)
|
||||
else:
|
||||
setattr(private, attr, visitor.scale(value))
|
||||
|
||||
|
||||
# ItemVariationStore
|
||||
|
||||
|
||||
@ScalerVisitor.register(otTables.VarData)
|
||||
def visit(visitor, varData):
|
||||
for item in varData.Item:
|
||||
for i, v in enumerate(item):
|
||||
item[i] = visitor.scale(v)
|
||||
varData.calculateNumShorts()
|
||||
|
||||
|
||||
# COLRv1
|
||||
|
||||
|
||||
def _setup_scale_paint(paint, scale):
|
||||
if -2 <= scale <= 2 - (1 >> 14):
|
||||
paint.Format = otTables.PaintFormat.PaintScaleUniform
|
||||
paint.scale = scale
|
||||
return
|
||||
|
||||
transform = otTables.Affine2x3()
|
||||
transform.populateDefaults()
|
||||
transform.xy = transform.yx = transform.dx = transform.dy = 0
|
||||
transform.xx = transform.yy = scale
|
||||
|
||||
paint.Format = otTables.PaintFormat.PaintTransform
|
||||
paint.Transform = transform
|
||||
|
||||
|
||||
@ScalerVisitor.register(otTables.BaseGlyphPaintRecord)
|
||||
def visit(visitor, record):
|
||||
oldPaint = record.Paint
|
||||
|
||||
scale = otTables.Paint()
|
||||
_setup_scale_paint(scale, visitor.scaleFactor)
|
||||
scale.Paint = oldPaint
|
||||
|
||||
record.Paint = scale
|
||||
|
||||
return True
|
||||
|
||||
|
||||
@ScalerVisitor.register(otTables.Paint)
|
||||
def visit(visitor, paint):
|
||||
if paint.Format != otTables.PaintFormat.PaintGlyph:
|
||||
return True
|
||||
|
||||
newPaint = otTables.Paint()
|
||||
newPaint.Format = paint.Format
|
||||
newPaint.Paint = paint.Paint
|
||||
newPaint.Glyph = paint.Glyph
|
||||
del paint.Paint
|
||||
del paint.Glyph
|
||||
|
||||
_setup_scale_paint(paint, 1 / visitor.scaleFactor)
|
||||
paint.Paint = newPaint
|
||||
|
||||
visitor.visit(newPaint.Paint)
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def scale_upem(font, new_upem):
|
||||
"""Change the units-per-EM of font to the new value."""
|
||||
upem = font["head"].unitsPerEm
|
||||
visitor = ScalerVisitor(new_upem / upem)
|
||||
visitor.visit(font)
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Change the units-per-EM of fonts"""
|
||||
|
||||
if args is None:
|
||||
import sys
|
||||
|
||||
args = sys.argv[1:]
|
||||
|
||||
from fontTools.ttLib import TTFont
|
||||
from fontTools.misc.cliTools import makeOutputFileName
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools ttLib.scaleUpem", description="Change the units-per-EM of fonts"
|
||||
)
|
||||
parser.add_argument("font", metavar="font", help="Font file.")
|
||||
parser.add_argument(
|
||||
"new_upem", metavar="new-upem", help="New units-per-EM integer value."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--output-file", metavar="path", default=None, help="Output file."
|
||||
)
|
||||
|
||||
options = parser.parse_args(args)
|
||||
|
||||
font = TTFont(options.font)
|
||||
new_upem = int(options.new_upem)
|
||||
output_file = (
|
||||
options.output_file
|
||||
if options.output_file is not None
|
||||
else makeOutputFileName(options.font, overWrite=True, suffix="-scaled")
|
||||
)
|
||||
|
||||
scale_upem(font, new_upem)
|
||||
|
||||
print("Writing %s" % output_file)
|
||||
font.save(output_file)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
sys.exit(main())
|
||||
662
venv/lib/python3.12/site-packages/fontTools/ttLib/sfnt.py
Normal file
662
venv/lib/python3.12/site-packages/fontTools/ttLib/sfnt.py
Normal file
@ -0,0 +1,662 @@
|
||||
"""ttLib/sfnt.py -- low-level module to deal with the sfnt file format.
|
||||
|
||||
Defines two public classes:
|
||||
|
||||
- SFNTReader
|
||||
- SFNTWriter
|
||||
|
||||
(Normally you don't have to use these classes explicitly; they are
|
||||
used automatically by ttLib.TTFont.)
|
||||
|
||||
The reading and writing of sfnt files is separated in two distinct
|
||||
classes, since whenever the number of tables changes or whenever
|
||||
a table's length changes you need to rewrite the whole file anyway.
|
||||
"""
|
||||
|
||||
from io import BytesIO
|
||||
from types import SimpleNamespace
|
||||
from fontTools.misc.textTools import Tag
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.ttLib import TTLibError, TTLibFileIsCollectionError
|
||||
import struct
|
||||
from collections import OrderedDict
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SFNTReader(object):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
"""Return an instance of the SFNTReader sub-class which is compatible
|
||||
with the input file type.
|
||||
"""
|
||||
if args and cls is SFNTReader:
|
||||
infile = args[0]
|
||||
infile.seek(0)
|
||||
sfntVersion = Tag(infile.read(4))
|
||||
infile.seek(0)
|
||||
if sfntVersion == "wOF2":
|
||||
# return new WOFF2Reader object
|
||||
from fontTools.ttLib.woff2 import WOFF2Reader
|
||||
|
||||
return object.__new__(WOFF2Reader)
|
||||
# return default object
|
||||
return object.__new__(cls)
|
||||
|
||||
def __init__(self, file, checkChecksums=0, fontNumber=-1):
|
||||
self.file = file
|
||||
self.checkChecksums = checkChecksums
|
||||
|
||||
self.flavor = None
|
||||
self.flavorData = None
|
||||
self.DirectoryEntry = SFNTDirectoryEntry
|
||||
self.file.seek(0)
|
||||
self.sfntVersion = self.file.read(4)
|
||||
self.file.seek(0)
|
||||
if self.sfntVersion == b"ttcf":
|
||||
header = readTTCHeader(self.file)
|
||||
numFonts = header.numFonts
|
||||
if not 0 <= fontNumber < numFonts:
|
||||
raise TTLibFileIsCollectionError(
|
||||
"specify a font number between 0 and %d (inclusive)"
|
||||
% (numFonts - 1)
|
||||
)
|
||||
self.numFonts = numFonts
|
||||
self.file.seek(header.offsetTable[fontNumber])
|
||||
data = self.file.read(sfntDirectorySize)
|
||||
if len(data) != sfntDirectorySize:
|
||||
raise TTLibError("Not a Font Collection (not enough data)")
|
||||
sstruct.unpack(sfntDirectoryFormat, data, self)
|
||||
elif self.sfntVersion == b"wOFF":
|
||||
self.flavor = "woff"
|
||||
self.DirectoryEntry = WOFFDirectoryEntry
|
||||
data = self.file.read(woffDirectorySize)
|
||||
if len(data) != woffDirectorySize:
|
||||
raise TTLibError("Not a WOFF font (not enough data)")
|
||||
sstruct.unpack(woffDirectoryFormat, data, self)
|
||||
else:
|
||||
data = self.file.read(sfntDirectorySize)
|
||||
if len(data) != sfntDirectorySize:
|
||||
raise TTLibError("Not a TrueType or OpenType font (not enough data)")
|
||||
sstruct.unpack(sfntDirectoryFormat, data, self)
|
||||
self.sfntVersion = Tag(self.sfntVersion)
|
||||
|
||||
if self.sfntVersion not in ("\x00\x01\x00\x00", "OTTO", "true"):
|
||||
raise TTLibError("Not a TrueType or OpenType font (bad sfntVersion)")
|
||||
tables = {}
|
||||
for i in range(self.numTables):
|
||||
entry = self.DirectoryEntry()
|
||||
entry.fromFile(self.file)
|
||||
tag = Tag(entry.tag)
|
||||
tables[tag] = entry
|
||||
self.tables = OrderedDict(sorted(tables.items(), key=lambda i: i[1].offset))
|
||||
|
||||
# Load flavor data if any
|
||||
if self.flavor == "woff":
|
||||
self.flavorData = WOFFFlavorData(self)
|
||||
|
||||
def has_key(self, tag):
|
||||
return tag in self.tables
|
||||
|
||||
__contains__ = has_key
|
||||
|
||||
def keys(self):
|
||||
return self.tables.keys()
|
||||
|
||||
def __getitem__(self, tag):
|
||||
"""Fetch the raw table data."""
|
||||
entry = self.tables[Tag(tag)]
|
||||
data = entry.loadData(self.file)
|
||||
if self.checkChecksums:
|
||||
if tag == "head":
|
||||
# Beh: we have to special-case the 'head' table.
|
||||
checksum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:])
|
||||
else:
|
||||
checksum = calcChecksum(data)
|
||||
if self.checkChecksums > 1:
|
||||
# Be obnoxious, and barf when it's wrong
|
||||
assert checksum == entry.checkSum, "bad checksum for '%s' table" % tag
|
||||
elif checksum != entry.checkSum:
|
||||
# Be friendly, and just log a warning.
|
||||
log.warning("bad checksum for '%s' table", tag)
|
||||
return data
|
||||
|
||||
def __delitem__(self, tag):
|
||||
del self.tables[Tag(tag)]
|
||||
|
||||
def close(self):
|
||||
self.file.close()
|
||||
|
||||
# We define custom __getstate__ and __setstate__ to make SFNTReader pickle-able
|
||||
# and deepcopy-able. When a TTFont is loaded as lazy=True, SFNTReader holds a
|
||||
# reference to an external file object which is not pickleable. So in __getstate__
|
||||
# we store the file name and current position, and in __setstate__ we reopen the
|
||||
# same named file after unpickling.
|
||||
|
||||
def __getstate__(self):
|
||||
if isinstance(self.file, BytesIO):
|
||||
# BytesIO is already pickleable, return the state unmodified
|
||||
return self.__dict__
|
||||
|
||||
# remove unpickleable file attribute, and only store its name and pos
|
||||
state = self.__dict__.copy()
|
||||
del state["file"]
|
||||
state["_filename"] = self.file.name
|
||||
state["_filepos"] = self.file.tell()
|
||||
return state
|
||||
|
||||
def __setstate__(self, state):
|
||||
if "file" not in state:
|
||||
self.file = open(state.pop("_filename"), "rb")
|
||||
self.file.seek(state.pop("_filepos"))
|
||||
self.__dict__.update(state)
|
||||
|
||||
|
||||
# default compression level for WOFF 1.0 tables and metadata
|
||||
ZLIB_COMPRESSION_LEVEL = 6
|
||||
|
||||
# if set to True, use zopfli instead of zlib for compressing WOFF 1.0.
|
||||
# The Python bindings are available at https://pypi.python.org/pypi/zopfli
|
||||
USE_ZOPFLI = False
|
||||
|
||||
# mapping between zlib's compression levels and zopfli's 'numiterations'.
|
||||
# Use lower values for files over several MB in size or it will be too slow
|
||||
ZOPFLI_LEVELS = {
|
||||
# 0: 0, # can't do 0 iterations...
|
||||
1: 1,
|
||||
2: 3,
|
||||
3: 5,
|
||||
4: 8,
|
||||
5: 10,
|
||||
6: 15,
|
||||
7: 25,
|
||||
8: 50,
|
||||
9: 100,
|
||||
}
|
||||
|
||||
|
||||
def compress(data, level=ZLIB_COMPRESSION_LEVEL):
|
||||
"""Compress 'data' to Zlib format. If 'USE_ZOPFLI' variable is True,
|
||||
zopfli is used instead of the zlib module.
|
||||
The compression 'level' must be between 0 and 9. 1 gives best speed,
|
||||
9 gives best compression (0 gives no compression at all).
|
||||
The default value is a compromise between speed and compression (6).
|
||||
"""
|
||||
if not (0 <= level <= 9):
|
||||
raise ValueError("Bad compression level: %s" % level)
|
||||
if not USE_ZOPFLI or level == 0:
|
||||
from zlib import compress
|
||||
|
||||
return compress(data, level)
|
||||
else:
|
||||
from zopfli.zlib import compress
|
||||
|
||||
return compress(data, numiterations=ZOPFLI_LEVELS[level])
|
||||
|
||||
|
||||
class SFNTWriter(object):
|
||||
def __new__(cls, *args, **kwargs):
|
||||
"""Return an instance of the SFNTWriter sub-class which is compatible
|
||||
with the specified 'flavor'.
|
||||
"""
|
||||
flavor = None
|
||||
if kwargs and "flavor" in kwargs:
|
||||
flavor = kwargs["flavor"]
|
||||
elif args and len(args) > 3:
|
||||
flavor = args[3]
|
||||
if cls is SFNTWriter:
|
||||
if flavor == "woff2":
|
||||
# return new WOFF2Writer object
|
||||
from fontTools.ttLib.woff2 import WOFF2Writer
|
||||
|
||||
return object.__new__(WOFF2Writer)
|
||||
# return default object
|
||||
return object.__new__(cls)
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
file,
|
||||
numTables,
|
||||
sfntVersion="\000\001\000\000",
|
||||
flavor=None,
|
||||
flavorData=None,
|
||||
):
|
||||
self.file = file
|
||||
self.numTables = numTables
|
||||
self.sfntVersion = Tag(sfntVersion)
|
||||
self.flavor = flavor
|
||||
self.flavorData = flavorData
|
||||
|
||||
if self.flavor == "woff":
|
||||
self.directoryFormat = woffDirectoryFormat
|
||||
self.directorySize = woffDirectorySize
|
||||
self.DirectoryEntry = WOFFDirectoryEntry
|
||||
|
||||
self.signature = "wOFF"
|
||||
|
||||
# to calculate WOFF checksum adjustment, we also need the original SFNT offsets
|
||||
self.origNextTableOffset = (
|
||||
sfntDirectorySize + numTables * sfntDirectoryEntrySize
|
||||
)
|
||||
else:
|
||||
assert not self.flavor, "Unknown flavor '%s'" % self.flavor
|
||||
self.directoryFormat = sfntDirectoryFormat
|
||||
self.directorySize = sfntDirectorySize
|
||||
self.DirectoryEntry = SFNTDirectoryEntry
|
||||
|
||||
from fontTools.ttLib import getSearchRange
|
||||
|
||||
self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(
|
||||
numTables, 16
|
||||
)
|
||||
|
||||
self.directoryOffset = self.file.tell()
|
||||
self.nextTableOffset = (
|
||||
self.directoryOffset
|
||||
+ self.directorySize
|
||||
+ numTables * self.DirectoryEntry.formatSize
|
||||
)
|
||||
# clear out directory area
|
||||
self.file.seek(self.nextTableOffset)
|
||||
# make sure we're actually where we want to be. (old cStringIO bug)
|
||||
self.file.write(b"\0" * (self.nextTableOffset - self.file.tell()))
|
||||
self.tables = OrderedDict()
|
||||
|
||||
def setEntry(self, tag, entry):
|
||||
if tag in self.tables:
|
||||
raise TTLibError("cannot rewrite '%s' table" % tag)
|
||||
|
||||
self.tables[tag] = entry
|
||||
|
||||
def __setitem__(self, tag, data):
|
||||
"""Write raw table data to disk."""
|
||||
if tag in self.tables:
|
||||
raise TTLibError("cannot rewrite '%s' table" % tag)
|
||||
|
||||
entry = self.DirectoryEntry()
|
||||
entry.tag = tag
|
||||
entry.offset = self.nextTableOffset
|
||||
if tag == "head":
|
||||
entry.checkSum = calcChecksum(data[:8] + b"\0\0\0\0" + data[12:])
|
||||
self.headTable = data
|
||||
entry.uncompressed = True
|
||||
else:
|
||||
entry.checkSum = calcChecksum(data)
|
||||
entry.saveData(self.file, data)
|
||||
|
||||
if self.flavor == "woff":
|
||||
entry.origOffset = self.origNextTableOffset
|
||||
self.origNextTableOffset += (entry.origLength + 3) & ~3
|
||||
|
||||
self.nextTableOffset = self.nextTableOffset + ((entry.length + 3) & ~3)
|
||||
# Add NUL bytes to pad the table data to a 4-byte boundary.
|
||||
# Don't depend on f.seek() as we need to add the padding even if no
|
||||
# subsequent write follows (seek is lazy), ie. after the final table
|
||||
# in the font.
|
||||
self.file.write(b"\0" * (self.nextTableOffset - self.file.tell()))
|
||||
assert self.nextTableOffset == self.file.tell()
|
||||
|
||||
self.setEntry(tag, entry)
|
||||
|
||||
def __getitem__(self, tag):
|
||||
return self.tables[tag]
|
||||
|
||||
def close(self):
|
||||
"""All tables must have been written to disk. Now write the
|
||||
directory.
|
||||
"""
|
||||
tables = sorted(self.tables.items())
|
||||
if len(tables) != self.numTables:
|
||||
raise TTLibError(
|
||||
"wrong number of tables; expected %d, found %d"
|
||||
% (self.numTables, len(tables))
|
||||
)
|
||||
|
||||
if self.flavor == "woff":
|
||||
self.signature = b"wOFF"
|
||||
self.reserved = 0
|
||||
|
||||
self.totalSfntSize = 12
|
||||
self.totalSfntSize += 16 * len(tables)
|
||||
for tag, entry in tables:
|
||||
self.totalSfntSize += (entry.origLength + 3) & ~3
|
||||
|
||||
data = self.flavorData if self.flavorData else WOFFFlavorData()
|
||||
if data.majorVersion is not None and data.minorVersion is not None:
|
||||
self.majorVersion = data.majorVersion
|
||||
self.minorVersion = data.minorVersion
|
||||
else:
|
||||
if hasattr(self, "headTable"):
|
||||
self.majorVersion, self.minorVersion = struct.unpack(
|
||||
">HH", self.headTable[4:8]
|
||||
)
|
||||
else:
|
||||
self.majorVersion = self.minorVersion = 0
|
||||
if data.metaData:
|
||||
self.metaOrigLength = len(data.metaData)
|
||||
self.file.seek(0, 2)
|
||||
self.metaOffset = self.file.tell()
|
||||
compressedMetaData = compress(data.metaData)
|
||||
self.metaLength = len(compressedMetaData)
|
||||
self.file.write(compressedMetaData)
|
||||
else:
|
||||
self.metaOffset = self.metaLength = self.metaOrigLength = 0
|
||||
if data.privData:
|
||||
self.file.seek(0, 2)
|
||||
off = self.file.tell()
|
||||
paddedOff = (off + 3) & ~3
|
||||
self.file.write(b"\0" * (paddedOff - off))
|
||||
self.privOffset = self.file.tell()
|
||||
self.privLength = len(data.privData)
|
||||
self.file.write(data.privData)
|
||||
else:
|
||||
self.privOffset = self.privLength = 0
|
||||
|
||||
self.file.seek(0, 2)
|
||||
self.length = self.file.tell()
|
||||
|
||||
else:
|
||||
assert not self.flavor, "Unknown flavor '%s'" % self.flavor
|
||||
pass
|
||||
|
||||
directory = sstruct.pack(self.directoryFormat, self)
|
||||
|
||||
self.file.seek(self.directoryOffset + self.directorySize)
|
||||
seenHead = 0
|
||||
for tag, entry in tables:
|
||||
if tag == "head":
|
||||
seenHead = 1
|
||||
directory = directory + entry.toString()
|
||||
if seenHead:
|
||||
self.writeMasterChecksum(directory)
|
||||
self.file.seek(self.directoryOffset)
|
||||
self.file.write(directory)
|
||||
|
||||
def _calcMasterChecksum(self, directory):
|
||||
# calculate checkSumAdjustment
|
||||
tags = list(self.tables.keys())
|
||||
checksums = []
|
||||
for i in range(len(tags)):
|
||||
checksums.append(self.tables[tags[i]].checkSum)
|
||||
|
||||
if self.DirectoryEntry != SFNTDirectoryEntry:
|
||||
# Create a SFNT directory for checksum calculation purposes
|
||||
from fontTools.ttLib import getSearchRange
|
||||
|
||||
self.searchRange, self.entrySelector, self.rangeShift = getSearchRange(
|
||||
self.numTables, 16
|
||||
)
|
||||
directory = sstruct.pack(sfntDirectoryFormat, self)
|
||||
tables = sorted(self.tables.items())
|
||||
for tag, entry in tables:
|
||||
sfntEntry = SFNTDirectoryEntry()
|
||||
sfntEntry.tag = entry.tag
|
||||
sfntEntry.checkSum = entry.checkSum
|
||||
sfntEntry.offset = entry.origOffset
|
||||
sfntEntry.length = entry.origLength
|
||||
directory = directory + sfntEntry.toString()
|
||||
|
||||
directory_end = sfntDirectorySize + len(self.tables) * sfntDirectoryEntrySize
|
||||
assert directory_end == len(directory)
|
||||
|
||||
checksums.append(calcChecksum(directory))
|
||||
checksum = sum(checksums) & 0xFFFFFFFF
|
||||
# BiboAfba!
|
||||
checksumadjustment = (0xB1B0AFBA - checksum) & 0xFFFFFFFF
|
||||
return checksumadjustment
|
||||
|
||||
def writeMasterChecksum(self, directory):
|
||||
checksumadjustment = self._calcMasterChecksum(directory)
|
||||
# write the checksum to the file
|
||||
self.file.seek(self.tables["head"].offset + 8)
|
||||
self.file.write(struct.pack(">L", checksumadjustment))
|
||||
|
||||
def reordersTables(self):
|
||||
return False
|
||||
|
||||
|
||||
# -- sfnt directory helpers and cruft
|
||||
|
||||
ttcHeaderFormat = """
|
||||
> # big endian
|
||||
TTCTag: 4s # "ttcf"
|
||||
Version: L # 0x00010000 or 0x00020000
|
||||
numFonts: L # number of fonts
|
||||
# OffsetTable[numFonts]: L # array with offsets from beginning of file
|
||||
# ulDsigTag: L # version 2.0 only
|
||||
# ulDsigLength: L # version 2.0 only
|
||||
# ulDsigOffset: L # version 2.0 only
|
||||
"""
|
||||
|
||||
ttcHeaderSize = sstruct.calcsize(ttcHeaderFormat)
|
||||
|
||||
sfntDirectoryFormat = """
|
||||
> # big endian
|
||||
sfntVersion: 4s
|
||||
numTables: H # number of tables
|
||||
searchRange: H # (max2 <= numTables)*16
|
||||
entrySelector: H # log2(max2 <= numTables)
|
||||
rangeShift: H # numTables*16-searchRange
|
||||
"""
|
||||
|
||||
sfntDirectorySize = sstruct.calcsize(sfntDirectoryFormat)
|
||||
|
||||
sfntDirectoryEntryFormat = """
|
||||
> # big endian
|
||||
tag: 4s
|
||||
checkSum: L
|
||||
offset: L
|
||||
length: L
|
||||
"""
|
||||
|
||||
sfntDirectoryEntrySize = sstruct.calcsize(sfntDirectoryEntryFormat)
|
||||
|
||||
woffDirectoryFormat = """
|
||||
> # big endian
|
||||
signature: 4s # "wOFF"
|
||||
sfntVersion: 4s
|
||||
length: L # total woff file size
|
||||
numTables: H # number of tables
|
||||
reserved: H # set to 0
|
||||
totalSfntSize: L # uncompressed size
|
||||
majorVersion: H # major version of WOFF file
|
||||
minorVersion: H # minor version of WOFF file
|
||||
metaOffset: L # offset to metadata block
|
||||
metaLength: L # length of compressed metadata
|
||||
metaOrigLength: L # length of uncompressed metadata
|
||||
privOffset: L # offset to private data block
|
||||
privLength: L # length of private data block
|
||||
"""
|
||||
|
||||
woffDirectorySize = sstruct.calcsize(woffDirectoryFormat)
|
||||
|
||||
woffDirectoryEntryFormat = """
|
||||
> # big endian
|
||||
tag: 4s
|
||||
offset: L
|
||||
length: L # compressed length
|
||||
origLength: L # original length
|
||||
checkSum: L # original checksum
|
||||
"""
|
||||
|
||||
woffDirectoryEntrySize = sstruct.calcsize(woffDirectoryEntryFormat)
|
||||
|
||||
|
||||
class DirectoryEntry(object):
|
||||
def __init__(self):
|
||||
self.uncompressed = False # if True, always embed entry raw
|
||||
|
||||
def fromFile(self, file):
|
||||
sstruct.unpack(self.format, file.read(self.formatSize), self)
|
||||
|
||||
def fromString(self, str):
|
||||
sstruct.unpack(self.format, str, self)
|
||||
|
||||
def toString(self):
|
||||
return sstruct.pack(self.format, self)
|
||||
|
||||
def __repr__(self):
|
||||
if hasattr(self, "tag"):
|
||||
return "<%s '%s' at %x>" % (self.__class__.__name__, self.tag, id(self))
|
||||
else:
|
||||
return "<%s at %x>" % (self.__class__.__name__, id(self))
|
||||
|
||||
def loadData(self, file):
|
||||
file.seek(self.offset)
|
||||
data = file.read(self.length)
|
||||
assert len(data) == self.length
|
||||
if hasattr(self.__class__, "decodeData"):
|
||||
data = self.decodeData(data)
|
||||
return data
|
||||
|
||||
def saveData(self, file, data):
|
||||
if hasattr(self.__class__, "encodeData"):
|
||||
data = self.encodeData(data)
|
||||
self.length = len(data)
|
||||
file.seek(self.offset)
|
||||
file.write(data)
|
||||
|
||||
def decodeData(self, rawData):
|
||||
return rawData
|
||||
|
||||
def encodeData(self, data):
|
||||
return data
|
||||
|
||||
|
||||
class SFNTDirectoryEntry(DirectoryEntry):
|
||||
format = sfntDirectoryEntryFormat
|
||||
formatSize = sfntDirectoryEntrySize
|
||||
|
||||
|
||||
class WOFFDirectoryEntry(DirectoryEntry):
|
||||
format = woffDirectoryEntryFormat
|
||||
formatSize = woffDirectoryEntrySize
|
||||
|
||||
def __init__(self):
|
||||
super(WOFFDirectoryEntry, self).__init__()
|
||||
# With fonttools<=3.1.2, the only way to set a different zlib
|
||||
# compression level for WOFF directory entries was to set the class
|
||||
# attribute 'zlibCompressionLevel'. This is now replaced by a globally
|
||||
# defined `ZLIB_COMPRESSION_LEVEL`, which is also applied when
|
||||
# compressing the metadata. For backward compatibility, we still
|
||||
# use the class attribute if it was already set.
|
||||
if not hasattr(WOFFDirectoryEntry, "zlibCompressionLevel"):
|
||||
self.zlibCompressionLevel = ZLIB_COMPRESSION_LEVEL
|
||||
|
||||
def decodeData(self, rawData):
|
||||
import zlib
|
||||
|
||||
if self.length == self.origLength:
|
||||
data = rawData
|
||||
else:
|
||||
assert self.length < self.origLength
|
||||
data = zlib.decompress(rawData)
|
||||
assert len(data) == self.origLength
|
||||
return data
|
||||
|
||||
def encodeData(self, data):
|
||||
self.origLength = len(data)
|
||||
if not self.uncompressed:
|
||||
compressedData = compress(data, self.zlibCompressionLevel)
|
||||
if self.uncompressed or len(compressedData) >= self.origLength:
|
||||
# Encode uncompressed
|
||||
rawData = data
|
||||
self.length = self.origLength
|
||||
else:
|
||||
rawData = compressedData
|
||||
self.length = len(rawData)
|
||||
return rawData
|
||||
|
||||
|
||||
class WOFFFlavorData:
|
||||
Flavor = "woff"
|
||||
|
||||
def __init__(self, reader=None):
|
||||
self.majorVersion = None
|
||||
self.minorVersion = None
|
||||
self.metaData = None
|
||||
self.privData = None
|
||||
if reader:
|
||||
self.majorVersion = reader.majorVersion
|
||||
self.minorVersion = reader.minorVersion
|
||||
if reader.metaLength:
|
||||
reader.file.seek(reader.metaOffset)
|
||||
rawData = reader.file.read(reader.metaLength)
|
||||
assert len(rawData) == reader.metaLength
|
||||
data = self._decompress(rawData)
|
||||
assert len(data) == reader.metaOrigLength
|
||||
self.metaData = data
|
||||
if reader.privLength:
|
||||
reader.file.seek(reader.privOffset)
|
||||
data = reader.file.read(reader.privLength)
|
||||
assert len(data) == reader.privLength
|
||||
self.privData = data
|
||||
|
||||
def _decompress(self, rawData):
|
||||
import zlib
|
||||
|
||||
return zlib.decompress(rawData)
|
||||
|
||||
|
||||
def calcChecksum(data):
|
||||
"""Calculate the checksum for an arbitrary block of data.
|
||||
|
||||
If the data length is not a multiple of four, it assumes
|
||||
it is to be padded with null byte.
|
||||
|
||||
>>> print(calcChecksum(b"abcd"))
|
||||
1633837924
|
||||
>>> print(calcChecksum(b"abcdxyz"))
|
||||
3655064932
|
||||
"""
|
||||
remainder = len(data) % 4
|
||||
if remainder:
|
||||
data += b"\0" * (4 - remainder)
|
||||
value = 0
|
||||
blockSize = 4096
|
||||
assert blockSize % 4 == 0
|
||||
for i in range(0, len(data), blockSize):
|
||||
block = data[i : i + blockSize]
|
||||
longs = struct.unpack(">%dL" % (len(block) // 4), block)
|
||||
value = (value + sum(longs)) & 0xFFFFFFFF
|
||||
return value
|
||||
|
||||
|
||||
def readTTCHeader(file):
|
||||
file.seek(0)
|
||||
data = file.read(ttcHeaderSize)
|
||||
if len(data) != ttcHeaderSize:
|
||||
raise TTLibError("Not a Font Collection (not enough data)")
|
||||
self = SimpleNamespace()
|
||||
sstruct.unpack(ttcHeaderFormat, data, self)
|
||||
if self.TTCTag != "ttcf":
|
||||
raise TTLibError("Not a Font Collection")
|
||||
assert self.Version == 0x00010000 or self.Version == 0x00020000, (
|
||||
"unrecognized TTC version 0x%08x" % self.Version
|
||||
)
|
||||
self.offsetTable = struct.unpack(
|
||||
">%dL" % self.numFonts, file.read(self.numFonts * 4)
|
||||
)
|
||||
if self.Version == 0x00020000:
|
||||
pass # ignoring version 2.0 signatures
|
||||
return self
|
||||
|
||||
|
||||
def writeTTCHeader(file, numFonts):
|
||||
self = SimpleNamespace()
|
||||
self.TTCTag = "ttcf"
|
||||
self.Version = 0x00010000
|
||||
self.numFonts = numFonts
|
||||
file.seek(0)
|
||||
file.write(sstruct.pack(ttcHeaderFormat, self))
|
||||
offset = file.tell()
|
||||
file.write(struct.pack(">%dL" % self.numFonts, *([0] * self.numFonts)))
|
||||
return offset
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import doctest
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
@ -0,0 +1,271 @@
|
||||
#
|
||||
# 'post' table formats 1.0 and 2.0 rely on this list of "standard"
|
||||
# glyphs.
|
||||
#
|
||||
# My list is correct according to the Apple documentation for the 'post' table:
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6post.html
|
||||
# (However, it seems that TTFdump (from MS) and FontLab disagree, at
|
||||
# least with respect to the last glyph, which they list as 'dslash'
|
||||
# instead of 'dcroat'.)
|
||||
#
|
||||
|
||||
standardGlyphOrder = [
|
||||
".notdef", # 0
|
||||
".null", # 1
|
||||
"nonmarkingreturn", # 2
|
||||
"space", # 3
|
||||
"exclam", # 4
|
||||
"quotedbl", # 5
|
||||
"numbersign", # 6
|
||||
"dollar", # 7
|
||||
"percent", # 8
|
||||
"ampersand", # 9
|
||||
"quotesingle", # 10
|
||||
"parenleft", # 11
|
||||
"parenright", # 12
|
||||
"asterisk", # 13
|
||||
"plus", # 14
|
||||
"comma", # 15
|
||||
"hyphen", # 16
|
||||
"period", # 17
|
||||
"slash", # 18
|
||||
"zero", # 19
|
||||
"one", # 20
|
||||
"two", # 21
|
||||
"three", # 22
|
||||
"four", # 23
|
||||
"five", # 24
|
||||
"six", # 25
|
||||
"seven", # 26
|
||||
"eight", # 27
|
||||
"nine", # 28
|
||||
"colon", # 29
|
||||
"semicolon", # 30
|
||||
"less", # 31
|
||||
"equal", # 32
|
||||
"greater", # 33
|
||||
"question", # 34
|
||||
"at", # 35
|
||||
"A", # 36
|
||||
"B", # 37
|
||||
"C", # 38
|
||||
"D", # 39
|
||||
"E", # 40
|
||||
"F", # 41
|
||||
"G", # 42
|
||||
"H", # 43
|
||||
"I", # 44
|
||||
"J", # 45
|
||||
"K", # 46
|
||||
"L", # 47
|
||||
"M", # 48
|
||||
"N", # 49
|
||||
"O", # 50
|
||||
"P", # 51
|
||||
"Q", # 52
|
||||
"R", # 53
|
||||
"S", # 54
|
||||
"T", # 55
|
||||
"U", # 56
|
||||
"V", # 57
|
||||
"W", # 58
|
||||
"X", # 59
|
||||
"Y", # 60
|
||||
"Z", # 61
|
||||
"bracketleft", # 62
|
||||
"backslash", # 63
|
||||
"bracketright", # 64
|
||||
"asciicircum", # 65
|
||||
"underscore", # 66
|
||||
"grave", # 67
|
||||
"a", # 68
|
||||
"b", # 69
|
||||
"c", # 70
|
||||
"d", # 71
|
||||
"e", # 72
|
||||
"f", # 73
|
||||
"g", # 74
|
||||
"h", # 75
|
||||
"i", # 76
|
||||
"j", # 77
|
||||
"k", # 78
|
||||
"l", # 79
|
||||
"m", # 80
|
||||
"n", # 81
|
||||
"o", # 82
|
||||
"p", # 83
|
||||
"q", # 84
|
||||
"r", # 85
|
||||
"s", # 86
|
||||
"t", # 87
|
||||
"u", # 88
|
||||
"v", # 89
|
||||
"w", # 90
|
||||
"x", # 91
|
||||
"y", # 92
|
||||
"z", # 93
|
||||
"braceleft", # 94
|
||||
"bar", # 95
|
||||
"braceright", # 96
|
||||
"asciitilde", # 97
|
||||
"Adieresis", # 98
|
||||
"Aring", # 99
|
||||
"Ccedilla", # 100
|
||||
"Eacute", # 101
|
||||
"Ntilde", # 102
|
||||
"Odieresis", # 103
|
||||
"Udieresis", # 104
|
||||
"aacute", # 105
|
||||
"agrave", # 106
|
||||
"acircumflex", # 107
|
||||
"adieresis", # 108
|
||||
"atilde", # 109
|
||||
"aring", # 110
|
||||
"ccedilla", # 111
|
||||
"eacute", # 112
|
||||
"egrave", # 113
|
||||
"ecircumflex", # 114
|
||||
"edieresis", # 115
|
||||
"iacute", # 116
|
||||
"igrave", # 117
|
||||
"icircumflex", # 118
|
||||
"idieresis", # 119
|
||||
"ntilde", # 120
|
||||
"oacute", # 121
|
||||
"ograve", # 122
|
||||
"ocircumflex", # 123
|
||||
"odieresis", # 124
|
||||
"otilde", # 125
|
||||
"uacute", # 126
|
||||
"ugrave", # 127
|
||||
"ucircumflex", # 128
|
||||
"udieresis", # 129
|
||||
"dagger", # 130
|
||||
"degree", # 131
|
||||
"cent", # 132
|
||||
"sterling", # 133
|
||||
"section", # 134
|
||||
"bullet", # 135
|
||||
"paragraph", # 136
|
||||
"germandbls", # 137
|
||||
"registered", # 138
|
||||
"copyright", # 139
|
||||
"trademark", # 140
|
||||
"acute", # 141
|
||||
"dieresis", # 142
|
||||
"notequal", # 143
|
||||
"AE", # 144
|
||||
"Oslash", # 145
|
||||
"infinity", # 146
|
||||
"plusminus", # 147
|
||||
"lessequal", # 148
|
||||
"greaterequal", # 149
|
||||
"yen", # 150
|
||||
"mu", # 151
|
||||
"partialdiff", # 152
|
||||
"summation", # 153
|
||||
"product", # 154
|
||||
"pi", # 155
|
||||
"integral", # 156
|
||||
"ordfeminine", # 157
|
||||
"ordmasculine", # 158
|
||||
"Omega", # 159
|
||||
"ae", # 160
|
||||
"oslash", # 161
|
||||
"questiondown", # 162
|
||||
"exclamdown", # 163
|
||||
"logicalnot", # 164
|
||||
"radical", # 165
|
||||
"florin", # 166
|
||||
"approxequal", # 167
|
||||
"Delta", # 168
|
||||
"guillemotleft", # 169
|
||||
"guillemotright", # 170
|
||||
"ellipsis", # 171
|
||||
"nonbreakingspace", # 172
|
||||
"Agrave", # 173
|
||||
"Atilde", # 174
|
||||
"Otilde", # 175
|
||||
"OE", # 176
|
||||
"oe", # 177
|
||||
"endash", # 178
|
||||
"emdash", # 179
|
||||
"quotedblleft", # 180
|
||||
"quotedblright", # 181
|
||||
"quoteleft", # 182
|
||||
"quoteright", # 183
|
||||
"divide", # 184
|
||||
"lozenge", # 185
|
||||
"ydieresis", # 186
|
||||
"Ydieresis", # 187
|
||||
"fraction", # 188
|
||||
"currency", # 189
|
||||
"guilsinglleft", # 190
|
||||
"guilsinglright", # 191
|
||||
"fi", # 192
|
||||
"fl", # 193
|
||||
"daggerdbl", # 194
|
||||
"periodcentered", # 195
|
||||
"quotesinglbase", # 196
|
||||
"quotedblbase", # 197
|
||||
"perthousand", # 198
|
||||
"Acircumflex", # 199
|
||||
"Ecircumflex", # 200
|
||||
"Aacute", # 201
|
||||
"Edieresis", # 202
|
||||
"Egrave", # 203
|
||||
"Iacute", # 204
|
||||
"Icircumflex", # 205
|
||||
"Idieresis", # 206
|
||||
"Igrave", # 207
|
||||
"Oacute", # 208
|
||||
"Ocircumflex", # 209
|
||||
"apple", # 210
|
||||
"Ograve", # 211
|
||||
"Uacute", # 212
|
||||
"Ucircumflex", # 213
|
||||
"Ugrave", # 214
|
||||
"dotlessi", # 215
|
||||
"circumflex", # 216
|
||||
"tilde", # 217
|
||||
"macron", # 218
|
||||
"breve", # 219
|
||||
"dotaccent", # 220
|
||||
"ring", # 221
|
||||
"cedilla", # 222
|
||||
"hungarumlaut", # 223
|
||||
"ogonek", # 224
|
||||
"caron", # 225
|
||||
"Lslash", # 226
|
||||
"lslash", # 227
|
||||
"Scaron", # 228
|
||||
"scaron", # 229
|
||||
"Zcaron", # 230
|
||||
"zcaron", # 231
|
||||
"brokenbar", # 232
|
||||
"Eth", # 233
|
||||
"eth", # 234
|
||||
"Yacute", # 235
|
||||
"yacute", # 236
|
||||
"Thorn", # 237
|
||||
"thorn", # 238
|
||||
"minus", # 239
|
||||
"multiply", # 240
|
||||
"onesuperior", # 241
|
||||
"twosuperior", # 242
|
||||
"threesuperior", # 243
|
||||
"onehalf", # 244
|
||||
"onequarter", # 245
|
||||
"threequarters", # 246
|
||||
"franc", # 247
|
||||
"Gbreve", # 248
|
||||
"gbreve", # 249
|
||||
"Idotaccent", # 250
|
||||
"Scedilla", # 251
|
||||
"scedilla", # 252
|
||||
"Cacute", # 253
|
||||
"cacute", # 254
|
||||
"Ccaron", # 255
|
||||
"ccaron", # 256
|
||||
"dcroat", # 257
|
||||
]
|
||||
@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_B_A_S_E_(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,64 @@
|
||||
# Since bitmap glyph metrics are shared between EBLC and EBDT
|
||||
# this class gets its own python file.
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
bigGlyphMetricsFormat = """
|
||||
> # big endian
|
||||
height: B
|
||||
width: B
|
||||
horiBearingX: b
|
||||
horiBearingY: b
|
||||
horiAdvance: B
|
||||
vertBearingX: b
|
||||
vertBearingY: b
|
||||
vertAdvance: B
|
||||
"""
|
||||
|
||||
smallGlyphMetricsFormat = """
|
||||
> # big endian
|
||||
height: B
|
||||
width: B
|
||||
BearingX: b
|
||||
BearingY: b
|
||||
Advance: B
|
||||
"""
|
||||
|
||||
|
||||
class BitmapGlyphMetrics(object):
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag(self.__class__.__name__)
|
||||
writer.newline()
|
||||
for metricName in sstruct.getformat(self.__class__.binaryFormat)[1]:
|
||||
writer.simpletag(metricName, value=getattr(self, metricName))
|
||||
writer.newline()
|
||||
writer.endtag(self.__class__.__name__)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
metricNames = set(sstruct.getformat(self.__class__.binaryFormat)[1])
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
# Make sure this is a metric that is needed by GlyphMetrics.
|
||||
if name in metricNames:
|
||||
vars(self)[name] = safeEval(attrs["value"])
|
||||
else:
|
||||
log.warning(
|
||||
"unknown name '%s' being ignored in %s.",
|
||||
name,
|
||||
self.__class__.__name__,
|
||||
)
|
||||
|
||||
|
||||
class BigGlyphMetrics(BitmapGlyphMetrics):
|
||||
binaryFormat = bigGlyphMetricsFormat
|
||||
|
||||
|
||||
class SmallGlyphMetrics(BitmapGlyphMetrics):
|
||||
binaryFormat = smallGlyphMetricsFormat
|
||||
@ -0,0 +1,103 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Matt Fontaine
|
||||
|
||||
|
||||
from fontTools.misc.textTools import bytesjoin
|
||||
from fontTools.misc import sstruct
|
||||
from . import E_B_D_T_
|
||||
from .BitmapGlyphMetrics import (
|
||||
BigGlyphMetrics,
|
||||
bigGlyphMetricsFormat,
|
||||
SmallGlyphMetrics,
|
||||
smallGlyphMetricsFormat,
|
||||
)
|
||||
from .E_B_D_T_ import (
|
||||
BitmapGlyph,
|
||||
BitmapPlusSmallMetricsMixin,
|
||||
BitmapPlusBigMetricsMixin,
|
||||
)
|
||||
import struct
|
||||
|
||||
|
||||
class table_C_B_D_T_(E_B_D_T_.table_E_B_D_T_):
|
||||
# Change the data locator table being referenced.
|
||||
locatorName = "CBLC"
|
||||
|
||||
# Modify the format class accessor for color bitmap use.
|
||||
def getImageFormatClass(self, imageFormat):
|
||||
try:
|
||||
return E_B_D_T_.table_E_B_D_T_.getImageFormatClass(self, imageFormat)
|
||||
except KeyError:
|
||||
return cbdt_bitmap_classes[imageFormat]
|
||||
|
||||
|
||||
# Helper method for removing export features not supported by color bitmaps.
|
||||
# Write data in the parent class will default to raw if an option is unsupported.
|
||||
def _removeUnsupportedForColor(dataFunctions):
|
||||
dataFunctions = dict(dataFunctions)
|
||||
del dataFunctions["row"]
|
||||
return dataFunctions
|
||||
|
||||
|
||||
class ColorBitmapGlyph(BitmapGlyph):
|
||||
fileExtension = ".png"
|
||||
xmlDataFunctions = _removeUnsupportedForColor(BitmapGlyph.xmlDataFunctions)
|
||||
|
||||
|
||||
class cbdt_bitmap_format_17(BitmapPlusSmallMetricsMixin, ColorBitmapGlyph):
|
||||
def decompile(self):
|
||||
self.metrics = SmallGlyphMetrics()
|
||||
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
|
||||
(dataLen,) = struct.unpack(">L", data[:4])
|
||||
data = data[4:]
|
||||
|
||||
# For the image data cut it to the size specified by dataLen.
|
||||
assert dataLen <= len(data), "Data overun in format 17"
|
||||
self.imageData = data[:dataLen]
|
||||
|
||||
def compile(self, ttFont):
|
||||
dataList = []
|
||||
dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
|
||||
dataList.append(struct.pack(">L", len(self.imageData)))
|
||||
dataList.append(self.imageData)
|
||||
return bytesjoin(dataList)
|
||||
|
||||
|
||||
class cbdt_bitmap_format_18(BitmapPlusBigMetricsMixin, ColorBitmapGlyph):
|
||||
def decompile(self):
|
||||
self.metrics = BigGlyphMetrics()
|
||||
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
|
||||
(dataLen,) = struct.unpack(">L", data[:4])
|
||||
data = data[4:]
|
||||
|
||||
# For the image data cut it to the size specified by dataLen.
|
||||
assert dataLen <= len(data), "Data overun in format 18"
|
||||
self.imageData = data[:dataLen]
|
||||
|
||||
def compile(self, ttFont):
|
||||
dataList = []
|
||||
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
|
||||
dataList.append(struct.pack(">L", len(self.imageData)))
|
||||
dataList.append(self.imageData)
|
||||
return bytesjoin(dataList)
|
||||
|
||||
|
||||
class cbdt_bitmap_format_19(ColorBitmapGlyph):
|
||||
def decompile(self):
|
||||
(dataLen,) = struct.unpack(">L", self.data[:4])
|
||||
data = self.data[4:]
|
||||
|
||||
assert dataLen <= len(data), "Data overun in format 19"
|
||||
self.imageData = data[:dataLen]
|
||||
|
||||
def compile(self, ttFont):
|
||||
return struct.pack(">L", len(self.imageData)) + self.imageData
|
||||
|
||||
|
||||
# Dict for CBDT extended formats.
|
||||
cbdt_bitmap_classes = {
|
||||
17: cbdt_bitmap_format_17,
|
||||
18: cbdt_bitmap_format_18,
|
||||
19: cbdt_bitmap_format_19,
|
||||
}
|
||||
@ -0,0 +1,9 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Matt Fontaine
|
||||
|
||||
from . import E_B_L_C_
|
||||
|
||||
|
||||
class table_C_B_L_C_(E_B_L_C_.table_E_B_L_C_):
|
||||
dependencies = ["CBDT"]
|
||||
@ -0,0 +1,46 @@
|
||||
from io import BytesIO
|
||||
from fontTools import cffLib
|
||||
from . import DefaultTable
|
||||
|
||||
|
||||
class table_C_F_F_(DefaultTable.DefaultTable):
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.cff = cffLib.CFFFontSet()
|
||||
self._gaveGlyphOrder = False
|
||||
|
||||
def decompile(self, data, otFont):
|
||||
self.cff.decompile(BytesIO(data), otFont, isCFF2=False)
|
||||
assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
|
||||
|
||||
def compile(self, otFont):
|
||||
f = BytesIO()
|
||||
self.cff.compile(f, otFont, isCFF2=False)
|
||||
return f.getvalue()
|
||||
|
||||
def haveGlyphNames(self):
|
||||
if hasattr(self.cff[self.cff.fontNames[0]], "ROS"):
|
||||
return False # CID-keyed font
|
||||
else:
|
||||
return True
|
||||
|
||||
def getGlyphOrder(self):
|
||||
if self._gaveGlyphOrder:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError("illegal use of getGlyphOrder()")
|
||||
self._gaveGlyphOrder = True
|
||||
return self.cff[self.cff.fontNames[0]].getGlyphOrder()
|
||||
|
||||
def setGlyphOrder(self, glyphOrder):
|
||||
pass
|
||||
# XXX
|
||||
# self.cff[self.cff.fontNames[0]].setGlyphOrder(glyphOrder)
|
||||
|
||||
def toXML(self, writer, otFont):
|
||||
self.cff.toXML(writer)
|
||||
|
||||
def fromXML(self, name, attrs, content, otFont):
|
||||
if not hasattr(self, "cff"):
|
||||
self.cff = cffLib.CFFFontSet()
|
||||
self.cff.fromXML(name, attrs, content, otFont)
|
||||
@ -0,0 +1,13 @@
|
||||
from io import BytesIO
|
||||
from fontTools.ttLib.tables.C_F_F_ import table_C_F_F_
|
||||
|
||||
|
||||
class table_C_F_F__2(table_C_F_F_):
|
||||
def decompile(self, data, otFont):
|
||||
self.cff.decompile(BytesIO(data), otFont, isCFF2=True)
|
||||
assert len(self.cff) == 1, "can't deal with multi-font CFF tables."
|
||||
|
||||
def compile(self, otFont):
|
||||
f = BytesIO()
|
||||
self.cff.compile(f, otFont, isCFF2=True)
|
||||
return f.getvalue()
|
||||
@ -0,0 +1,157 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Behdad Esfahbod
|
||||
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
|
||||
|
||||
class table_C_O_L_R_(DefaultTable.DefaultTable):
|
||||
"""This table is structured so that you can treat it like a dictionary keyed by glyph name.
|
||||
|
||||
``ttFont['COLR'][<glyphName>]`` will return the color layers for any glyph.
|
||||
|
||||
``ttFont['COLR'][<glyphName>] = <value>`` will set the color layers for any glyph.
|
||||
"""
|
||||
|
||||
@staticmethod
|
||||
def _decompileColorLayersV0(table):
|
||||
if not table.LayerRecordArray:
|
||||
return {}
|
||||
colorLayerLists = {}
|
||||
layerRecords = table.LayerRecordArray.LayerRecord
|
||||
numLayerRecords = len(layerRecords)
|
||||
for baseRec in table.BaseGlyphRecordArray.BaseGlyphRecord:
|
||||
baseGlyph = baseRec.BaseGlyph
|
||||
firstLayerIndex = baseRec.FirstLayerIndex
|
||||
numLayers = baseRec.NumLayers
|
||||
assert firstLayerIndex + numLayers <= numLayerRecords
|
||||
layers = []
|
||||
for i in range(firstLayerIndex, firstLayerIndex + numLayers):
|
||||
layerRec = layerRecords[i]
|
||||
layers.append(LayerRecord(layerRec.LayerGlyph, layerRec.PaletteIndex))
|
||||
colorLayerLists[baseGlyph] = layers
|
||||
return colorLayerLists
|
||||
|
||||
def _toOTTable(self, ttFont):
|
||||
from . import otTables
|
||||
from fontTools.colorLib.builder import populateCOLRv0
|
||||
|
||||
tableClass = getattr(otTables, self.tableTag)
|
||||
table = tableClass()
|
||||
table.Version = self.version
|
||||
|
||||
populateCOLRv0(
|
||||
table,
|
||||
{
|
||||
baseGlyph: [(layer.name, layer.colorID) for layer in layers]
|
||||
for baseGlyph, layers in self.ColorLayers.items()
|
||||
},
|
||||
glyphMap=ttFont.getReverseGlyphMap(rebuild=True),
|
||||
)
|
||||
return table
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
from .otBase import OTTableReader
|
||||
from . import otTables
|
||||
|
||||
# We use otData to decompile, but we adapt the decompiled otTables to the
|
||||
# existing COLR v0 API for backward compatibility.
|
||||
reader = OTTableReader(data, tableTag=self.tableTag)
|
||||
tableClass = getattr(otTables, self.tableTag)
|
||||
table = tableClass()
|
||||
table.decompile(reader, ttFont)
|
||||
|
||||
self.version = table.Version
|
||||
if self.version == 0:
|
||||
self.ColorLayers = self._decompileColorLayersV0(table)
|
||||
else:
|
||||
# for new versions, keep the raw otTables around
|
||||
self.table = table
|
||||
|
||||
def compile(self, ttFont):
|
||||
from .otBase import OTTableWriter
|
||||
|
||||
if hasattr(self, "table"):
|
||||
table = self.table
|
||||
else:
|
||||
table = self._toOTTable(ttFont)
|
||||
|
||||
writer = OTTableWriter(tableTag=self.tableTag)
|
||||
table.compile(writer, ttFont)
|
||||
return writer.getAllData()
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
if hasattr(self, "table"):
|
||||
self.table.toXML2(writer, ttFont)
|
||||
else:
|
||||
writer.simpletag("version", value=self.version)
|
||||
writer.newline()
|
||||
for baseGlyph in sorted(self.ColorLayers.keys(), key=ttFont.getGlyphID):
|
||||
writer.begintag("ColorGlyph", name=baseGlyph)
|
||||
writer.newline()
|
||||
for layer in self.ColorLayers[baseGlyph]:
|
||||
layer.toXML(writer, ttFont)
|
||||
writer.endtag("ColorGlyph")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version": # old COLR v0 API
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
elif name == "ColorGlyph":
|
||||
if not hasattr(self, "ColorLayers"):
|
||||
self.ColorLayers = {}
|
||||
glyphName = attrs["name"]
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
layers = []
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
layer = LayerRecord()
|
||||
layer.fromXML(element[0], element[1], element[2], ttFont)
|
||||
layers.append(layer)
|
||||
self.ColorLayers[glyphName] = layers
|
||||
else: # new COLR v1 API
|
||||
from . import otTables
|
||||
|
||||
if not hasattr(self, "table"):
|
||||
tableClass = getattr(otTables, self.tableTag)
|
||||
self.table = tableClass()
|
||||
self.table.fromXML(name, attrs, content, ttFont)
|
||||
self.table.populateDefaults()
|
||||
self.version = self.table.Version
|
||||
|
||||
def __getitem__(self, glyphName):
|
||||
if not isinstance(glyphName, str):
|
||||
raise TypeError(f"expected str, found {type(glyphName).__name__}")
|
||||
return self.ColorLayers[glyphName]
|
||||
|
||||
def __setitem__(self, glyphName, value):
|
||||
if not isinstance(glyphName, str):
|
||||
raise TypeError(f"expected str, found {type(glyphName).__name__}")
|
||||
if value is not None:
|
||||
self.ColorLayers[glyphName] = value
|
||||
elif glyphName in self.ColorLayers:
|
||||
del self.ColorLayers[glyphName]
|
||||
|
||||
def __delitem__(self, glyphName):
|
||||
del self.ColorLayers[glyphName]
|
||||
|
||||
|
||||
class LayerRecord(object):
|
||||
def __init__(self, name=None, colorID=None):
|
||||
self.name = name
|
||||
self.colorID = colorID
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("layer", name=self.name, colorID=self.colorID)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, eltname, attrs, content, ttFont):
|
||||
for name, value in attrs.items():
|
||||
if name == "name":
|
||||
setattr(self, name, value)
|
||||
else:
|
||||
setattr(self, name, safeEval(value))
|
||||
@ -0,0 +1,296 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Behdad Esfahbod
|
||||
|
||||
from fontTools.misc.textTools import bytesjoin, safeEval
|
||||
from . import DefaultTable
|
||||
import array
|
||||
from collections import namedtuple
|
||||
import struct
|
||||
import sys
|
||||
|
||||
|
||||
class table_C_P_A_L_(DefaultTable.DefaultTable):
|
||||
NO_NAME_ID = 0xFFFF
|
||||
DEFAULT_PALETTE_TYPE = 0
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.palettes = []
|
||||
self.paletteTypes = []
|
||||
self.paletteLabels = []
|
||||
self.paletteEntryLabels = []
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
(
|
||||
self.version,
|
||||
self.numPaletteEntries,
|
||||
numPalettes,
|
||||
numColorRecords,
|
||||
goffsetFirstColorRecord,
|
||||
) = struct.unpack(">HHHHL", data[:12])
|
||||
assert (
|
||||
self.version <= 1
|
||||
), "Version of CPAL table is higher than I know how to handle"
|
||||
self.palettes = []
|
||||
pos = 12
|
||||
for i in range(numPalettes):
|
||||
startIndex = struct.unpack(">H", data[pos : pos + 2])[0]
|
||||
assert startIndex + self.numPaletteEntries <= numColorRecords
|
||||
pos += 2
|
||||
palette = []
|
||||
ppos = goffsetFirstColorRecord + startIndex * 4
|
||||
for j in range(self.numPaletteEntries):
|
||||
palette.append(Color(*struct.unpack(">BBBB", data[ppos : ppos + 4])))
|
||||
ppos += 4
|
||||
self.palettes.append(palette)
|
||||
if self.version == 0:
|
||||
offsetToPaletteTypeArray = 0
|
||||
offsetToPaletteLabelArray = 0
|
||||
offsetToPaletteEntryLabelArray = 0
|
||||
else:
|
||||
pos = 12 + numPalettes * 2
|
||||
(
|
||||
offsetToPaletteTypeArray,
|
||||
offsetToPaletteLabelArray,
|
||||
offsetToPaletteEntryLabelArray,
|
||||
) = struct.unpack(">LLL", data[pos : pos + 12])
|
||||
self.paletteTypes = self._decompileUInt32Array(
|
||||
data,
|
||||
offsetToPaletteTypeArray,
|
||||
numPalettes,
|
||||
default=self.DEFAULT_PALETTE_TYPE,
|
||||
)
|
||||
self.paletteLabels = self._decompileUInt16Array(
|
||||
data, offsetToPaletteLabelArray, numPalettes, default=self.NO_NAME_ID
|
||||
)
|
||||
self.paletteEntryLabels = self._decompileUInt16Array(
|
||||
data,
|
||||
offsetToPaletteEntryLabelArray,
|
||||
self.numPaletteEntries,
|
||||
default=self.NO_NAME_ID,
|
||||
)
|
||||
|
||||
def _decompileUInt16Array(self, data, offset, numElements, default=0):
|
||||
if offset == 0:
|
||||
return [default] * numElements
|
||||
result = array.array("H", data[offset : offset + 2 * numElements])
|
||||
if sys.byteorder != "big":
|
||||
result.byteswap()
|
||||
assert len(result) == numElements, result
|
||||
return result.tolist()
|
||||
|
||||
def _decompileUInt32Array(self, data, offset, numElements, default=0):
|
||||
if offset == 0:
|
||||
return [default] * numElements
|
||||
result = array.array("I", data[offset : offset + 4 * numElements])
|
||||
if sys.byteorder != "big":
|
||||
result.byteswap()
|
||||
assert len(result) == numElements, result
|
||||
return result.tolist()
|
||||
|
||||
def compile(self, ttFont):
|
||||
colorRecordIndices, colorRecords = self._compileColorRecords()
|
||||
paletteTypes = self._compilePaletteTypes()
|
||||
paletteLabels = self._compilePaletteLabels()
|
||||
paletteEntryLabels = self._compilePaletteEntryLabels()
|
||||
numColorRecords = len(colorRecords) // 4
|
||||
offsetToFirstColorRecord = 12 + len(colorRecordIndices)
|
||||
if self.version >= 1:
|
||||
offsetToFirstColorRecord += 12
|
||||
header = struct.pack(
|
||||
">HHHHL",
|
||||
self.version,
|
||||
self.numPaletteEntries,
|
||||
len(self.palettes),
|
||||
numColorRecords,
|
||||
offsetToFirstColorRecord,
|
||||
)
|
||||
if self.version == 0:
|
||||
dataList = [header, colorRecordIndices, colorRecords]
|
||||
else:
|
||||
pos = offsetToFirstColorRecord + len(colorRecords)
|
||||
if len(paletteTypes) == 0:
|
||||
offsetToPaletteTypeArray = 0
|
||||
else:
|
||||
offsetToPaletteTypeArray = pos
|
||||
pos += len(paletteTypes)
|
||||
if len(paletteLabels) == 0:
|
||||
offsetToPaletteLabelArray = 0
|
||||
else:
|
||||
offsetToPaletteLabelArray = pos
|
||||
pos += len(paletteLabels)
|
||||
if len(paletteEntryLabels) == 0:
|
||||
offsetToPaletteEntryLabelArray = 0
|
||||
else:
|
||||
offsetToPaletteEntryLabelArray = pos
|
||||
pos += len(paletteLabels)
|
||||
header1 = struct.pack(
|
||||
">LLL",
|
||||
offsetToPaletteTypeArray,
|
||||
offsetToPaletteLabelArray,
|
||||
offsetToPaletteEntryLabelArray,
|
||||
)
|
||||
dataList = [
|
||||
header,
|
||||
colorRecordIndices,
|
||||
header1,
|
||||
colorRecords,
|
||||
paletteTypes,
|
||||
paletteLabels,
|
||||
paletteEntryLabels,
|
||||
]
|
||||
return bytesjoin(dataList)
|
||||
|
||||
def _compilePalette(self, palette):
|
||||
assert len(palette) == self.numPaletteEntries
|
||||
pack = lambda c: struct.pack(">BBBB", c.blue, c.green, c.red, c.alpha)
|
||||
return bytesjoin([pack(color) for color in palette])
|
||||
|
||||
def _compileColorRecords(self):
|
||||
colorRecords, colorRecordIndices, pool = [], [], {}
|
||||
for palette in self.palettes:
|
||||
packedPalette = self._compilePalette(palette)
|
||||
if packedPalette in pool:
|
||||
index = pool[packedPalette]
|
||||
else:
|
||||
index = len(colorRecords)
|
||||
colorRecords.append(packedPalette)
|
||||
pool[packedPalette] = index
|
||||
colorRecordIndices.append(struct.pack(">H", index * self.numPaletteEntries))
|
||||
return bytesjoin(colorRecordIndices), bytesjoin(colorRecords)
|
||||
|
||||
def _compilePaletteTypes(self):
|
||||
if self.version == 0 or not any(self.paletteTypes):
|
||||
return b""
|
||||
assert len(self.paletteTypes) == len(self.palettes)
|
||||
result = bytesjoin([struct.pack(">I", ptype) for ptype in self.paletteTypes])
|
||||
assert len(result) == 4 * len(self.palettes)
|
||||
return result
|
||||
|
||||
def _compilePaletteLabels(self):
|
||||
if self.version == 0 or all(l == self.NO_NAME_ID for l in self.paletteLabels):
|
||||
return b""
|
||||
assert len(self.paletteLabels) == len(self.palettes)
|
||||
result = bytesjoin([struct.pack(">H", label) for label in self.paletteLabels])
|
||||
assert len(result) == 2 * len(self.palettes)
|
||||
return result
|
||||
|
||||
def _compilePaletteEntryLabels(self):
|
||||
if self.version == 0 or all(
|
||||
l == self.NO_NAME_ID for l in self.paletteEntryLabels
|
||||
):
|
||||
return b""
|
||||
assert len(self.paletteEntryLabels) == self.numPaletteEntries
|
||||
result = bytesjoin(
|
||||
[struct.pack(">H", label) for label in self.paletteEntryLabels]
|
||||
)
|
||||
assert len(result) == 2 * self.numPaletteEntries
|
||||
return result
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
numPalettes = len(self.palettes)
|
||||
paletteLabels = {i: nameID for (i, nameID) in enumerate(self.paletteLabels)}
|
||||
paletteTypes = {i: typ for (i, typ) in enumerate(self.paletteTypes)}
|
||||
writer.simpletag("version", value=self.version)
|
||||
writer.newline()
|
||||
writer.simpletag("numPaletteEntries", value=self.numPaletteEntries)
|
||||
writer.newline()
|
||||
for index, palette in enumerate(self.palettes):
|
||||
attrs = {"index": index}
|
||||
paletteType = paletteTypes.get(index, self.DEFAULT_PALETTE_TYPE)
|
||||
paletteLabel = paletteLabels.get(index, self.NO_NAME_ID)
|
||||
if self.version > 0 and paletteLabel != self.NO_NAME_ID:
|
||||
attrs["label"] = paletteLabel
|
||||
if self.version > 0 and paletteType != self.DEFAULT_PALETTE_TYPE:
|
||||
attrs["type"] = paletteType
|
||||
writer.begintag("palette", **attrs)
|
||||
writer.newline()
|
||||
if (
|
||||
self.version > 0
|
||||
and paletteLabel != self.NO_NAME_ID
|
||||
and ttFont
|
||||
and "name" in ttFont
|
||||
):
|
||||
name = ttFont["name"].getDebugName(paletteLabel)
|
||||
if name is not None:
|
||||
writer.comment(name)
|
||||
writer.newline()
|
||||
assert len(palette) == self.numPaletteEntries
|
||||
for cindex, color in enumerate(palette):
|
||||
color.toXML(writer, ttFont, cindex)
|
||||
writer.endtag("palette")
|
||||
writer.newline()
|
||||
if self.version > 0 and not all(
|
||||
l == self.NO_NAME_ID for l in self.paletteEntryLabels
|
||||
):
|
||||
writer.begintag("paletteEntryLabels")
|
||||
writer.newline()
|
||||
for index, label in enumerate(self.paletteEntryLabels):
|
||||
if label != self.NO_NAME_ID:
|
||||
writer.simpletag("label", index=index, value=label)
|
||||
if self.version > 0 and label and ttFont and "name" in ttFont:
|
||||
name = ttFont["name"].getDebugName(label)
|
||||
if name is not None:
|
||||
writer.comment(name)
|
||||
writer.newline()
|
||||
writer.endtag("paletteEntryLabels")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "palette":
|
||||
self.paletteLabels.append(int(attrs.get("label", self.NO_NAME_ID)))
|
||||
self.paletteTypes.append(int(attrs.get("type", self.DEFAULT_PALETTE_TYPE)))
|
||||
palette = []
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
attrs = element[1]
|
||||
color = Color.fromHex(attrs["value"])
|
||||
palette.append(color)
|
||||
self.palettes.append(palette)
|
||||
elif name == "paletteEntryLabels":
|
||||
colorLabels = {}
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
elementName, elementAttr, _ = element
|
||||
if elementName == "label":
|
||||
labelIndex = safeEval(elementAttr["index"])
|
||||
nameID = safeEval(elementAttr["value"])
|
||||
colorLabels[labelIndex] = nameID
|
||||
self.paletteEntryLabels = [
|
||||
colorLabels.get(i, self.NO_NAME_ID)
|
||||
for i in range(self.numPaletteEntries)
|
||||
]
|
||||
elif "value" in attrs:
|
||||
value = safeEval(attrs["value"])
|
||||
setattr(self, name, value)
|
||||
if name == "numPaletteEntries":
|
||||
self.paletteEntryLabels = [self.NO_NAME_ID] * self.numPaletteEntries
|
||||
|
||||
|
||||
class Color(namedtuple("Color", "blue green red alpha")):
|
||||
def hex(self):
|
||||
return "#%02X%02X%02X%02X" % (self.red, self.green, self.blue, self.alpha)
|
||||
|
||||
def __repr__(self):
|
||||
return self.hex()
|
||||
|
||||
def toXML(self, writer, ttFont, index=None):
|
||||
writer.simpletag("color", value=self.hex(), index=index)
|
||||
writer.newline()
|
||||
|
||||
@classmethod
|
||||
def fromHex(cls, value):
|
||||
if value[0] == "#":
|
||||
value = value[1:]
|
||||
red = int(value[0:2], 16)
|
||||
green = int(value[2:4], 16)
|
||||
blue = int(value[4:6], 16)
|
||||
alpha = int(value[6:8], 16) if len(value) >= 8 else 0xFF
|
||||
return cls(red=red, green=green, blue=blue, alpha=alpha)
|
||||
|
||||
@classmethod
|
||||
def fromRGBA(cls, red, green, blue, alpha):
|
||||
return cls(red=red, green=green, blue=blue, alpha=alpha)
|
||||
@ -0,0 +1,151 @@
|
||||
from fontTools.misc.textTools import bytesjoin, strjoin, tobytes, tostr, safeEval
|
||||
from fontTools.misc import sstruct
|
||||
from . import DefaultTable
|
||||
import base64
|
||||
|
||||
DSIG_HeaderFormat = """
|
||||
> # big endian
|
||||
ulVersion: L
|
||||
usNumSigs: H
|
||||
usFlag: H
|
||||
"""
|
||||
# followed by an array of usNumSigs DSIG_Signature records
|
||||
DSIG_SignatureFormat = """
|
||||
> # big endian
|
||||
ulFormat: L
|
||||
ulLength: L # length includes DSIG_SignatureBlock header
|
||||
ulOffset: L
|
||||
"""
|
||||
# followed by an array of usNumSigs DSIG_SignatureBlock records,
|
||||
# each followed immediately by the pkcs7 bytes
|
||||
DSIG_SignatureBlockFormat = """
|
||||
> # big endian
|
||||
usReserved1: H
|
||||
usReserved2: H
|
||||
cbSignature: l # length of following raw pkcs7 data
|
||||
"""
|
||||
|
||||
#
|
||||
# NOTE
|
||||
# the DSIG table format allows for SignatureBlocks residing
|
||||
# anywhere in the table and possibly in a different order as
|
||||
# listed in the array after the first table header
|
||||
#
|
||||
# this implementation does not keep track of any gaps and/or data
|
||||
# before or after the actual signature blocks while decompiling,
|
||||
# and puts them in the same physical order as listed in the header
|
||||
# on compilation with no padding whatsoever.
|
||||
#
|
||||
|
||||
|
||||
class table_D_S_I_G_(DefaultTable.DefaultTable):
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, newData = sstruct.unpack2(DSIG_HeaderFormat, data, self)
|
||||
assert self.ulVersion == 1, "DSIG ulVersion must be 1"
|
||||
assert self.usFlag & ~1 == 0, "DSIG usFlag must be 0x1 or 0x0"
|
||||
self.signatureRecords = sigrecs = []
|
||||
for n in range(self.usNumSigs):
|
||||
sigrec, newData = sstruct.unpack2(
|
||||
DSIG_SignatureFormat, newData, SignatureRecord()
|
||||
)
|
||||
assert sigrec.ulFormat == 1, (
|
||||
"DSIG signature record #%d ulFormat must be 1" % n
|
||||
)
|
||||
sigrecs.append(sigrec)
|
||||
for sigrec in sigrecs:
|
||||
dummy, newData = sstruct.unpack2(
|
||||
DSIG_SignatureBlockFormat, data[sigrec.ulOffset :], sigrec
|
||||
)
|
||||
assert sigrec.usReserved1 == 0, (
|
||||
"DSIG signature record #%d usReserverd1 must be 0" % n
|
||||
)
|
||||
assert sigrec.usReserved2 == 0, (
|
||||
"DSIG signature record #%d usReserverd2 must be 0" % n
|
||||
)
|
||||
sigrec.pkcs7 = newData[: sigrec.cbSignature]
|
||||
|
||||
def compile(self, ttFont):
|
||||
packed = sstruct.pack(DSIG_HeaderFormat, self)
|
||||
headers = [packed]
|
||||
offset = len(packed) + self.usNumSigs * sstruct.calcsize(DSIG_SignatureFormat)
|
||||
data = []
|
||||
for sigrec in self.signatureRecords:
|
||||
# first pack signature block
|
||||
sigrec.cbSignature = len(sigrec.pkcs7)
|
||||
packed = sstruct.pack(DSIG_SignatureBlockFormat, sigrec) + sigrec.pkcs7
|
||||
data.append(packed)
|
||||
# update redundant length field
|
||||
sigrec.ulLength = len(packed)
|
||||
# update running table offset
|
||||
sigrec.ulOffset = offset
|
||||
headers.append(sstruct.pack(DSIG_SignatureFormat, sigrec))
|
||||
offset += sigrec.ulLength
|
||||
if offset % 2:
|
||||
# Pad to even bytes
|
||||
data.append(b"\0")
|
||||
return bytesjoin(headers + data)
|
||||
|
||||
def toXML(self, xmlWriter, ttFont):
|
||||
xmlWriter.comment(
|
||||
"note that the Digital Signature will be invalid after recompilation!"
|
||||
)
|
||||
xmlWriter.newline()
|
||||
xmlWriter.simpletag(
|
||||
"tableHeader",
|
||||
version=self.ulVersion,
|
||||
numSigs=self.usNumSigs,
|
||||
flag="0x%X" % self.usFlag,
|
||||
)
|
||||
for sigrec in self.signatureRecords:
|
||||
xmlWriter.newline()
|
||||
sigrec.toXML(xmlWriter, ttFont)
|
||||
xmlWriter.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "tableHeader":
|
||||
self.signatureRecords = []
|
||||
self.ulVersion = safeEval(attrs["version"])
|
||||
self.usNumSigs = safeEval(attrs["numSigs"])
|
||||
self.usFlag = safeEval(attrs["flag"])
|
||||
return
|
||||
if name == "SignatureRecord":
|
||||
sigrec = SignatureRecord()
|
||||
sigrec.fromXML(name, attrs, content, ttFont)
|
||||
self.signatureRecords.append(sigrec)
|
||||
|
||||
|
||||
pem_spam = lambda l, spam={
|
||||
"-----BEGIN PKCS7-----": True,
|
||||
"-----END PKCS7-----": True,
|
||||
"": True,
|
||||
}: not spam.get(l.strip())
|
||||
|
||||
|
||||
def b64encode(b):
|
||||
s = base64.b64encode(b)
|
||||
# Line-break at 76 chars.
|
||||
items = []
|
||||
while s:
|
||||
items.append(tostr(s[:76]))
|
||||
items.append("\n")
|
||||
s = s[76:]
|
||||
return strjoin(items)
|
||||
|
||||
|
||||
class SignatureRecord(object):
|
||||
def __repr__(self):
|
||||
return "<%s: %s>" % (self.__class__.__name__, self.__dict__)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag(self.__class__.__name__, format=self.ulFormat)
|
||||
writer.newline()
|
||||
writer.write_noindent("-----BEGIN PKCS7-----\n")
|
||||
writer.write_noindent(b64encode(self.pkcs7))
|
||||
writer.write_noindent("-----END PKCS7-----\n")
|
||||
writer.endtag(self.__class__.__name__)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
self.ulFormat = safeEval(attrs["format"])
|
||||
self.usReserved1 = safeEval(attrs.get("reserved1", "0"))
|
||||
self.usReserved2 = safeEval(attrs.get("reserved2", "0"))
|
||||
self.pkcs7 = base64.b64decode(tobytes(strjoin(filter(pem_spam, content))))
|
||||
@ -0,0 +1,17 @@
|
||||
import json
|
||||
|
||||
from . import DefaultTable
|
||||
|
||||
|
||||
class table_D__e_b_g(DefaultTable.DefaultTable):
|
||||
def decompile(self, data, ttFont):
|
||||
self.data = json.loads(data)
|
||||
|
||||
def compile(self, ttFont):
|
||||
return json.dumps(self.data).encode("utf-8")
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.writecdata(json.dumps(self.data, indent=2))
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
self.data = json.loads(content)
|
||||
@ -0,0 +1,49 @@
|
||||
from fontTools.misc.textTools import Tag
|
||||
from fontTools.ttLib import getClassTag
|
||||
|
||||
|
||||
class DefaultTable(object):
|
||||
dependencies = []
|
||||
|
||||
def __init__(self, tag=None):
|
||||
if tag is None:
|
||||
tag = getClassTag(self.__class__)
|
||||
self.tableTag = Tag(tag)
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
self.data = data
|
||||
|
||||
def compile(self, ttFont):
|
||||
return self.data
|
||||
|
||||
def toXML(self, writer, ttFont, **kwargs):
|
||||
if hasattr(self, "ERROR"):
|
||||
writer.comment("An error occurred during the decompilation of this table")
|
||||
writer.newline()
|
||||
writer.comment(self.ERROR)
|
||||
writer.newline()
|
||||
writer.begintag("hexdata")
|
||||
writer.newline()
|
||||
writer.dumphex(self.compile(ttFont))
|
||||
writer.endtag("hexdata")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
from fontTools.misc.textTools import readHex
|
||||
from fontTools import ttLib
|
||||
|
||||
if name != "hexdata":
|
||||
raise ttLib.TTLibError("can't handle '%s' element" % name)
|
||||
self.decompile(readHex(content), ttFont)
|
||||
|
||||
def __repr__(self):
|
||||
return "<'%s' table at %x>" % (self.tableTag, id(self))
|
||||
|
||||
def __eq__(self, other):
|
||||
if type(self) != type(other):
|
||||
return NotImplemented
|
||||
return self.__dict__ == other.__dict__
|
||||
|
||||
def __ne__(self, other):
|
||||
result = self.__eq__(other)
|
||||
return result if result is NotImplemented else not result
|
||||
@ -0,0 +1,827 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import (
|
||||
bytechr,
|
||||
byteord,
|
||||
bytesjoin,
|
||||
strjoin,
|
||||
safeEval,
|
||||
readHex,
|
||||
hexStr,
|
||||
deHexStr,
|
||||
)
|
||||
from .BitmapGlyphMetrics import (
|
||||
BigGlyphMetrics,
|
||||
bigGlyphMetricsFormat,
|
||||
SmallGlyphMetrics,
|
||||
smallGlyphMetricsFormat,
|
||||
)
|
||||
from . import DefaultTable
|
||||
import itertools
|
||||
import os
|
||||
import struct
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
ebdtTableVersionFormat = """
|
||||
> # big endian
|
||||
version: 16.16F
|
||||
"""
|
||||
|
||||
ebdtComponentFormat = """
|
||||
> # big endian
|
||||
glyphCode: H
|
||||
xOffset: b
|
||||
yOffset: b
|
||||
"""
|
||||
|
||||
|
||||
class table_E_B_D_T_(DefaultTable.DefaultTable):
|
||||
# Keep a reference to the name of the data locator table.
|
||||
locatorName = "EBLC"
|
||||
|
||||
# This method can be overridden in subclasses to support new formats
|
||||
# without changing the other implementation. Also can be used as a
|
||||
# convenience method for coverting a font file to an alternative format.
|
||||
def getImageFormatClass(self, imageFormat):
|
||||
return ebdt_bitmap_classes[imageFormat]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
# Get the version but don't advance the slice.
|
||||
# Most of the lookup for this table is done relative
|
||||
# to the begining so slice by the offsets provided
|
||||
# in the EBLC table.
|
||||
sstruct.unpack2(ebdtTableVersionFormat, data, self)
|
||||
|
||||
# Keep a dict of glyphs that have been seen so they aren't remade.
|
||||
# This dict maps intervals of data to the BitmapGlyph.
|
||||
glyphDict = {}
|
||||
|
||||
# Pull out the EBLC table and loop through glyphs.
|
||||
# A strike is a concept that spans both tables.
|
||||
# The actual bitmap data is stored in the EBDT.
|
||||
locator = ttFont[self.__class__.locatorName]
|
||||
self.strikeData = []
|
||||
for curStrike in locator.strikes:
|
||||
bitmapGlyphDict = {}
|
||||
self.strikeData.append(bitmapGlyphDict)
|
||||
for indexSubTable in curStrike.indexSubTables:
|
||||
dataIter = zip(indexSubTable.names, indexSubTable.locations)
|
||||
for curName, curLoc in dataIter:
|
||||
# Don't create duplicate data entries for the same glyphs.
|
||||
# Instead just use the structures that already exist if they exist.
|
||||
if curLoc in glyphDict:
|
||||
curGlyph = glyphDict[curLoc]
|
||||
else:
|
||||
curGlyphData = data[slice(*curLoc)]
|
||||
imageFormatClass = self.getImageFormatClass(
|
||||
indexSubTable.imageFormat
|
||||
)
|
||||
curGlyph = imageFormatClass(curGlyphData, ttFont)
|
||||
glyphDict[curLoc] = curGlyph
|
||||
bitmapGlyphDict[curName] = curGlyph
|
||||
|
||||
def compile(self, ttFont):
|
||||
dataList = []
|
||||
dataList.append(sstruct.pack(ebdtTableVersionFormat, self))
|
||||
dataSize = len(dataList[0])
|
||||
|
||||
# Keep a dict of glyphs that have been seen so they aren't remade.
|
||||
# This dict maps the id of the BitmapGlyph to the interval
|
||||
# in the data.
|
||||
glyphDict = {}
|
||||
|
||||
# Go through the bitmap glyph data. Just in case the data for a glyph
|
||||
# changed the size metrics should be recalculated. There are a variety
|
||||
# of formats and they get stored in the EBLC table. That is why
|
||||
# recalculation is defered to the EblcIndexSubTable class and just
|
||||
# pass what is known about bitmap glyphs from this particular table.
|
||||
locator = ttFont[self.__class__.locatorName]
|
||||
for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
|
||||
for curIndexSubTable in curStrike.indexSubTables:
|
||||
dataLocations = []
|
||||
for curName in curIndexSubTable.names:
|
||||
# Handle the data placement based on seeing the glyph or not.
|
||||
# Just save a reference to the location if the glyph has already
|
||||
# been saved in compile. This code assumes that glyphs will only
|
||||
# be referenced multiple times from indexFormat5. By luck the
|
||||
# code may still work when referencing poorly ordered fonts with
|
||||
# duplicate references. If there is a font that is unlucky the
|
||||
# respective compile methods for the indexSubTables will fail
|
||||
# their assertions. All fonts seem to follow this assumption.
|
||||
# More complicated packing may be needed if a counter-font exists.
|
||||
glyph = curGlyphDict[curName]
|
||||
objectId = id(glyph)
|
||||
if objectId not in glyphDict:
|
||||
data = glyph.compile(ttFont)
|
||||
data = curIndexSubTable.padBitmapData(data)
|
||||
startByte = dataSize
|
||||
dataSize += len(data)
|
||||
endByte = dataSize
|
||||
dataList.append(data)
|
||||
dataLoc = (startByte, endByte)
|
||||
glyphDict[objectId] = dataLoc
|
||||
else:
|
||||
dataLoc = glyphDict[objectId]
|
||||
dataLocations.append(dataLoc)
|
||||
# Just use the new data locations in the indexSubTable.
|
||||
# The respective compile implementations will take care
|
||||
# of any of the problems in the convertion that may arise.
|
||||
curIndexSubTable.locations = dataLocations
|
||||
|
||||
return bytesjoin(dataList)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
# When exporting to XML if one of the data export formats
|
||||
# requires metrics then those metrics may be in the locator.
|
||||
# In this case populate the bitmaps with "export metrics".
|
||||
if ttFont.bitmapGlyphDataFormat in ("row", "bitwise"):
|
||||
locator = ttFont[self.__class__.locatorName]
|
||||
for curStrike, curGlyphDict in zip(locator.strikes, self.strikeData):
|
||||
for curIndexSubTable in curStrike.indexSubTables:
|
||||
for curName in curIndexSubTable.names:
|
||||
glyph = curGlyphDict[curName]
|
||||
# I'm not sure which metrics have priority here.
|
||||
# For now if both metrics exist go with glyph metrics.
|
||||
if hasattr(glyph, "metrics"):
|
||||
glyph.exportMetrics = glyph.metrics
|
||||
else:
|
||||
glyph.exportMetrics = curIndexSubTable.metrics
|
||||
glyph.exportBitDepth = curStrike.bitmapSizeTable.bitDepth
|
||||
|
||||
writer.simpletag("header", [("version", self.version)])
|
||||
writer.newline()
|
||||
locator = ttFont[self.__class__.locatorName]
|
||||
for strikeIndex, bitmapGlyphDict in enumerate(self.strikeData):
|
||||
writer.begintag("strikedata", [("index", strikeIndex)])
|
||||
writer.newline()
|
||||
for curName, curBitmap in bitmapGlyphDict.items():
|
||||
curBitmap.toXML(strikeIndex, curName, writer, ttFont)
|
||||
writer.endtag("strikedata")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "header":
|
||||
self.version = safeEval(attrs["version"])
|
||||
elif name == "strikedata":
|
||||
if not hasattr(self, "strikeData"):
|
||||
self.strikeData = []
|
||||
strikeIndex = safeEval(attrs["index"])
|
||||
|
||||
bitmapGlyphDict = {}
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name[4:].startswith(_bitmapGlyphSubclassPrefix[4:]):
|
||||
imageFormat = safeEval(name[len(_bitmapGlyphSubclassPrefix) :])
|
||||
glyphName = attrs["name"]
|
||||
imageFormatClass = self.getImageFormatClass(imageFormat)
|
||||
curGlyph = imageFormatClass(None, None)
|
||||
curGlyph.fromXML(name, attrs, content, ttFont)
|
||||
assert glyphName not in bitmapGlyphDict, (
|
||||
"Duplicate glyphs with the same name '%s' in the same strike."
|
||||
% glyphName
|
||||
)
|
||||
bitmapGlyphDict[glyphName] = curGlyph
|
||||
else:
|
||||
log.warning("%s being ignored by %s", name, self.__class__.__name__)
|
||||
|
||||
# Grow the strike data array to the appropriate size. The XML
|
||||
# format allows the strike index value to be out of order.
|
||||
if strikeIndex >= len(self.strikeData):
|
||||
self.strikeData += [None] * (strikeIndex + 1 - len(self.strikeData))
|
||||
assert (
|
||||
self.strikeData[strikeIndex] is None
|
||||
), "Duplicate strike EBDT indices."
|
||||
self.strikeData[strikeIndex] = bitmapGlyphDict
|
||||
|
||||
|
||||
class EbdtComponent(object):
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag("ebdtComponent", [("name", self.name)])
|
||||
writer.newline()
|
||||
for componentName in sstruct.getformat(ebdtComponentFormat)[1][1:]:
|
||||
writer.simpletag(componentName, value=getattr(self, componentName))
|
||||
writer.newline()
|
||||
writer.endtag("ebdtComponent")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
self.name = attrs["name"]
|
||||
componentNames = set(sstruct.getformat(ebdtComponentFormat)[1][1:])
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name in componentNames:
|
||||
vars(self)[name] = safeEval(attrs["value"])
|
||||
else:
|
||||
log.warning("unknown name '%s' being ignored by EbdtComponent.", name)
|
||||
|
||||
|
||||
# Helper functions for dealing with binary.
|
||||
|
||||
|
||||
def _data2binary(data, numBits):
|
||||
binaryList = []
|
||||
for curByte in data:
|
||||
value = byteord(curByte)
|
||||
numBitsCut = min(8, numBits)
|
||||
for i in range(numBitsCut):
|
||||
if value & 0x1:
|
||||
binaryList.append("1")
|
||||
else:
|
||||
binaryList.append("0")
|
||||
value = value >> 1
|
||||
numBits -= numBitsCut
|
||||
return strjoin(binaryList)
|
||||
|
||||
|
||||
def _binary2data(binary):
|
||||
byteList = []
|
||||
for bitLoc in range(0, len(binary), 8):
|
||||
byteString = binary[bitLoc : bitLoc + 8]
|
||||
curByte = 0
|
||||
for curBit in reversed(byteString):
|
||||
curByte = curByte << 1
|
||||
if curBit == "1":
|
||||
curByte |= 1
|
||||
byteList.append(bytechr(curByte))
|
||||
return bytesjoin(byteList)
|
||||
|
||||
|
||||
def _memoize(f):
|
||||
class memodict(dict):
|
||||
def __missing__(self, key):
|
||||
ret = f(key)
|
||||
if isinstance(key, int) or len(key) == 1:
|
||||
self[key] = ret
|
||||
return ret
|
||||
|
||||
return memodict().__getitem__
|
||||
|
||||
|
||||
# 00100111 -> 11100100 per byte, not to be confused with little/big endian.
|
||||
# Bitmap data per byte is in the order that binary is written on the page
|
||||
# with the least significant bit as far right as possible. This is the
|
||||
# opposite of what makes sense algorithmically and hence this function.
|
||||
@_memoize
|
||||
def _reverseBytes(data):
|
||||
r"""
|
||||
>>> bin(ord(_reverseBytes(0b00100111)))
|
||||
'0b11100100'
|
||||
>>> _reverseBytes(b'\x00\xf0')
|
||||
b'\x00\x0f'
|
||||
"""
|
||||
if isinstance(data, bytes) and len(data) != 1:
|
||||
return bytesjoin(map(_reverseBytes, data))
|
||||
byte = byteord(data)
|
||||
result = 0
|
||||
for i in range(8):
|
||||
result = result << 1
|
||||
result |= byte & 1
|
||||
byte = byte >> 1
|
||||
return bytechr(result)
|
||||
|
||||
|
||||
# This section of code is for reading and writing image data to/from XML.
|
||||
|
||||
|
||||
def _writeRawImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
|
||||
writer.begintag("rawimagedata")
|
||||
writer.newline()
|
||||
writer.dumphex(bitmapObject.imageData)
|
||||
writer.endtag("rawimagedata")
|
||||
writer.newline()
|
||||
|
||||
|
||||
def _readRawImageData(bitmapObject, name, attrs, content, ttFont):
|
||||
bitmapObject.imageData = readHex(content)
|
||||
|
||||
|
||||
def _writeRowImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
|
||||
metrics = bitmapObject.exportMetrics
|
||||
del bitmapObject.exportMetrics
|
||||
bitDepth = bitmapObject.exportBitDepth
|
||||
del bitmapObject.exportBitDepth
|
||||
|
||||
writer.begintag(
|
||||
"rowimagedata", bitDepth=bitDepth, width=metrics.width, height=metrics.height
|
||||
)
|
||||
writer.newline()
|
||||
for curRow in range(metrics.height):
|
||||
rowData = bitmapObject.getRow(curRow, bitDepth=bitDepth, metrics=metrics)
|
||||
writer.simpletag("row", value=hexStr(rowData))
|
||||
writer.newline()
|
||||
writer.endtag("rowimagedata")
|
||||
writer.newline()
|
||||
|
||||
|
||||
def _readRowImageData(bitmapObject, name, attrs, content, ttFont):
|
||||
bitDepth = safeEval(attrs["bitDepth"])
|
||||
metrics = SmallGlyphMetrics()
|
||||
metrics.width = safeEval(attrs["width"])
|
||||
metrics.height = safeEval(attrs["height"])
|
||||
|
||||
dataRows = []
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attr, content = element
|
||||
# Chop off 'imagedata' from the tag to get just the option.
|
||||
if name == "row":
|
||||
dataRows.append(deHexStr(attr["value"]))
|
||||
bitmapObject.setRows(dataRows, bitDepth=bitDepth, metrics=metrics)
|
||||
|
||||
|
||||
def _writeBitwiseImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
|
||||
metrics = bitmapObject.exportMetrics
|
||||
del bitmapObject.exportMetrics
|
||||
bitDepth = bitmapObject.exportBitDepth
|
||||
del bitmapObject.exportBitDepth
|
||||
|
||||
# A dict for mapping binary to more readable/artistic ASCII characters.
|
||||
binaryConv = {"0": ".", "1": "@"}
|
||||
|
||||
writer.begintag(
|
||||
"bitwiseimagedata",
|
||||
bitDepth=bitDepth,
|
||||
width=metrics.width,
|
||||
height=metrics.height,
|
||||
)
|
||||
writer.newline()
|
||||
for curRow in range(metrics.height):
|
||||
rowData = bitmapObject.getRow(
|
||||
curRow, bitDepth=1, metrics=metrics, reverseBytes=True
|
||||
)
|
||||
rowData = _data2binary(rowData, metrics.width)
|
||||
# Make the output a readable ASCII art form.
|
||||
rowData = strjoin(map(binaryConv.get, rowData))
|
||||
writer.simpletag("row", value=rowData)
|
||||
writer.newline()
|
||||
writer.endtag("bitwiseimagedata")
|
||||
writer.newline()
|
||||
|
||||
|
||||
def _readBitwiseImageData(bitmapObject, name, attrs, content, ttFont):
|
||||
bitDepth = safeEval(attrs["bitDepth"])
|
||||
metrics = SmallGlyphMetrics()
|
||||
metrics.width = safeEval(attrs["width"])
|
||||
metrics.height = safeEval(attrs["height"])
|
||||
|
||||
# A dict for mapping from ASCII to binary. All characters are considered
|
||||
# a '1' except space, period and '0' which maps to '0'.
|
||||
binaryConv = {" ": "0", ".": "0", "0": "0"}
|
||||
|
||||
dataRows = []
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attr, content = element
|
||||
if name == "row":
|
||||
mapParams = zip(attr["value"], itertools.repeat("1"))
|
||||
rowData = strjoin(itertools.starmap(binaryConv.get, mapParams))
|
||||
dataRows.append(_binary2data(rowData))
|
||||
|
||||
bitmapObject.setRows(
|
||||
dataRows, bitDepth=bitDepth, metrics=metrics, reverseBytes=True
|
||||
)
|
||||
|
||||
|
||||
def _writeExtFileImageData(strikeIndex, glyphName, bitmapObject, writer, ttFont):
|
||||
try:
|
||||
folder = os.path.dirname(writer.file.name)
|
||||
except AttributeError:
|
||||
# fall back to current directory if output file's directory isn't found
|
||||
folder = "."
|
||||
folder = os.path.join(folder, "bitmaps")
|
||||
filename = glyphName + bitmapObject.fileExtension
|
||||
if not os.path.isdir(folder):
|
||||
os.makedirs(folder)
|
||||
folder = os.path.join(folder, "strike%d" % strikeIndex)
|
||||
if not os.path.isdir(folder):
|
||||
os.makedirs(folder)
|
||||
|
||||
fullPath = os.path.join(folder, filename)
|
||||
writer.simpletag("extfileimagedata", value=fullPath)
|
||||
writer.newline()
|
||||
|
||||
with open(fullPath, "wb") as file:
|
||||
file.write(bitmapObject.imageData)
|
||||
|
||||
|
||||
def _readExtFileImageData(bitmapObject, name, attrs, content, ttFont):
|
||||
fullPath = attrs["value"]
|
||||
with open(fullPath, "rb") as file:
|
||||
bitmapObject.imageData = file.read()
|
||||
|
||||
|
||||
# End of XML writing code.
|
||||
|
||||
# Important information about the naming scheme. Used for identifying formats
|
||||
# in XML.
|
||||
_bitmapGlyphSubclassPrefix = "ebdt_bitmap_format_"
|
||||
|
||||
|
||||
class BitmapGlyph(object):
|
||||
# For the external file format. This can be changed in subclasses. This way
|
||||
# when the extfile option is turned on files have the form: glyphName.ext
|
||||
# The default is just a flat binary file with no meaning.
|
||||
fileExtension = ".bin"
|
||||
|
||||
# Keep track of reading and writing of various forms.
|
||||
xmlDataFunctions = {
|
||||
"raw": (_writeRawImageData, _readRawImageData),
|
||||
"row": (_writeRowImageData, _readRowImageData),
|
||||
"bitwise": (_writeBitwiseImageData, _readBitwiseImageData),
|
||||
"extfile": (_writeExtFileImageData, _readExtFileImageData),
|
||||
}
|
||||
|
||||
def __init__(self, data, ttFont):
|
||||
self.data = data
|
||||
self.ttFont = ttFont
|
||||
# TODO Currently non-lazy decompilation is untested here...
|
||||
# if not ttFont.lazy:
|
||||
# self.decompile()
|
||||
# del self.data
|
||||
|
||||
def __getattr__(self, attr):
|
||||
# Allow lazy decompile.
|
||||
if attr[:2] == "__":
|
||||
raise AttributeError(attr)
|
||||
if attr == "data":
|
||||
raise AttributeError(attr)
|
||||
self.decompile()
|
||||
del self.data
|
||||
return getattr(self, attr)
|
||||
|
||||
def ensureDecompiled(self, recurse=False):
|
||||
if hasattr(self, "data"):
|
||||
self.decompile()
|
||||
del self.data
|
||||
|
||||
# Not a fan of this but it is needed for safer safety checking.
|
||||
def getFormat(self):
|
||||
return safeEval(self.__class__.__name__[len(_bitmapGlyphSubclassPrefix) :])
|
||||
|
||||
def toXML(self, strikeIndex, glyphName, writer, ttFont):
|
||||
writer.begintag(self.__class__.__name__, [("name", glyphName)])
|
||||
writer.newline()
|
||||
|
||||
self.writeMetrics(writer, ttFont)
|
||||
# Use the internal write method to write using the correct output format.
|
||||
self.writeData(strikeIndex, glyphName, writer, ttFont)
|
||||
|
||||
writer.endtag(self.__class__.__name__)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
self.readMetrics(name, attrs, content, ttFont)
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attr, content = element
|
||||
if not name.endswith("imagedata"):
|
||||
continue
|
||||
# Chop off 'imagedata' from the tag to get just the option.
|
||||
option = name[: -len("imagedata")]
|
||||
assert option in self.__class__.xmlDataFunctions
|
||||
self.readData(name, attr, content, ttFont)
|
||||
|
||||
# Some of the glyphs have the metrics. This allows for metrics to be
|
||||
# added if the glyph format has them. Default behavior is to do nothing.
|
||||
def writeMetrics(self, writer, ttFont):
|
||||
pass
|
||||
|
||||
# The opposite of write metrics.
|
||||
def readMetrics(self, name, attrs, content, ttFont):
|
||||
pass
|
||||
|
||||
def writeData(self, strikeIndex, glyphName, writer, ttFont):
|
||||
try:
|
||||
writeFunc, readFunc = self.__class__.xmlDataFunctions[
|
||||
ttFont.bitmapGlyphDataFormat
|
||||
]
|
||||
except KeyError:
|
||||
writeFunc = _writeRawImageData
|
||||
writeFunc(strikeIndex, glyphName, self, writer, ttFont)
|
||||
|
||||
def readData(self, name, attrs, content, ttFont):
|
||||
# Chop off 'imagedata' from the tag to get just the option.
|
||||
option = name[: -len("imagedata")]
|
||||
writeFunc, readFunc = self.__class__.xmlDataFunctions[option]
|
||||
readFunc(self, name, attrs, content, ttFont)
|
||||
|
||||
|
||||
# A closure for creating a mixin for the two types of metrics handling.
|
||||
# Most of the code is very similar so its easier to deal with here.
|
||||
# Everything works just by passing the class that the mixin is for.
|
||||
def _createBitmapPlusMetricsMixin(metricsClass):
|
||||
# Both metrics names are listed here to make meaningful error messages.
|
||||
metricStrings = [BigGlyphMetrics.__name__, SmallGlyphMetrics.__name__]
|
||||
curMetricsName = metricsClass.__name__
|
||||
# Find which metrics this is for and determine the opposite name.
|
||||
metricsId = metricStrings.index(curMetricsName)
|
||||
oppositeMetricsName = metricStrings[1 - metricsId]
|
||||
|
||||
class BitmapPlusMetricsMixin(object):
|
||||
def writeMetrics(self, writer, ttFont):
|
||||
self.metrics.toXML(writer, ttFont)
|
||||
|
||||
def readMetrics(self, name, attrs, content, ttFont):
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == curMetricsName:
|
||||
self.metrics = metricsClass()
|
||||
self.metrics.fromXML(name, attrs, content, ttFont)
|
||||
elif name == oppositeMetricsName:
|
||||
log.warning(
|
||||
"Warning: %s being ignored in format %d.",
|
||||
oppositeMetricsName,
|
||||
self.getFormat(),
|
||||
)
|
||||
|
||||
return BitmapPlusMetricsMixin
|
||||
|
||||
|
||||
# Since there are only two types of mixin's just create them here.
|
||||
BitmapPlusBigMetricsMixin = _createBitmapPlusMetricsMixin(BigGlyphMetrics)
|
||||
BitmapPlusSmallMetricsMixin = _createBitmapPlusMetricsMixin(SmallGlyphMetrics)
|
||||
|
||||
|
||||
# Data that is bit aligned can be tricky to deal with. These classes implement
|
||||
# helper functionality for dealing with the data and getting a particular row
|
||||
# of bitwise data. Also helps implement fancy data export/import in XML.
|
||||
class BitAlignedBitmapMixin(object):
|
||||
def _getBitRange(self, row, bitDepth, metrics):
|
||||
rowBits = bitDepth * metrics.width
|
||||
bitOffset = row * rowBits
|
||||
return (bitOffset, bitOffset + rowBits)
|
||||
|
||||
def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
|
||||
if metrics is None:
|
||||
metrics = self.metrics
|
||||
assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
|
||||
|
||||
# Loop through each byte. This can cover two bytes in the original data or
|
||||
# a single byte if things happen to be aligned. The very last entry might
|
||||
# not be aligned so take care to trim the binary data to size and pad with
|
||||
# zeros in the row data. Bit aligned data is somewhat tricky.
|
||||
#
|
||||
# Example of data cut. Data cut represented in x's.
|
||||
# '|' represents byte boundary.
|
||||
# data = ...0XX|XXXXXX00|000... => XXXXXXXX
|
||||
# or
|
||||
# data = ...0XX|XXXX0000|000... => XXXXXX00
|
||||
# or
|
||||
# data = ...000|XXXXXXXX|000... => XXXXXXXX
|
||||
# or
|
||||
# data = ...000|00XXXX00|000... => XXXX0000
|
||||
#
|
||||
dataList = []
|
||||
bitRange = self._getBitRange(row, bitDepth, metrics)
|
||||
stepRange = bitRange + (8,)
|
||||
for curBit in range(*stepRange):
|
||||
endBit = min(curBit + 8, bitRange[1])
|
||||
numBits = endBit - curBit
|
||||
cutPoint = curBit % 8
|
||||
firstByteLoc = curBit // 8
|
||||
secondByteLoc = endBit // 8
|
||||
if firstByteLoc < secondByteLoc:
|
||||
numBitsCut = 8 - cutPoint
|
||||
else:
|
||||
numBitsCut = endBit - curBit
|
||||
curByte = _reverseBytes(self.imageData[firstByteLoc])
|
||||
firstHalf = byteord(curByte) >> cutPoint
|
||||
firstHalf = ((1 << numBitsCut) - 1) & firstHalf
|
||||
newByte = firstHalf
|
||||
if firstByteLoc < secondByteLoc and secondByteLoc < len(self.imageData):
|
||||
curByte = _reverseBytes(self.imageData[secondByteLoc])
|
||||
secondHalf = byteord(curByte) << numBitsCut
|
||||
newByte = (firstHalf | secondHalf) & ((1 << numBits) - 1)
|
||||
dataList.append(bytechr(newByte))
|
||||
|
||||
# The way the data is kept is opposite the algorithm used.
|
||||
data = bytesjoin(dataList)
|
||||
if not reverseBytes:
|
||||
data = _reverseBytes(data)
|
||||
return data
|
||||
|
||||
def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
|
||||
if metrics is None:
|
||||
metrics = self.metrics
|
||||
if not reverseBytes:
|
||||
dataRows = list(map(_reverseBytes, dataRows))
|
||||
|
||||
# Keep track of a list of ordinal values as they are easier to modify
|
||||
# than a list of strings. Map to actual strings later.
|
||||
numBytes = (self._getBitRange(len(dataRows), bitDepth, metrics)[0] + 7) // 8
|
||||
ordDataList = [0] * numBytes
|
||||
for row, data in enumerate(dataRows):
|
||||
bitRange = self._getBitRange(row, bitDepth, metrics)
|
||||
stepRange = bitRange + (8,)
|
||||
for curBit, curByte in zip(range(*stepRange), data):
|
||||
endBit = min(curBit + 8, bitRange[1])
|
||||
cutPoint = curBit % 8
|
||||
firstByteLoc = curBit // 8
|
||||
secondByteLoc = endBit // 8
|
||||
if firstByteLoc < secondByteLoc:
|
||||
numBitsCut = 8 - cutPoint
|
||||
else:
|
||||
numBitsCut = endBit - curBit
|
||||
curByte = byteord(curByte)
|
||||
firstByte = curByte & ((1 << numBitsCut) - 1)
|
||||
ordDataList[firstByteLoc] |= firstByte << cutPoint
|
||||
if firstByteLoc < secondByteLoc and secondByteLoc < numBytes:
|
||||
secondByte = (curByte >> numBitsCut) & ((1 << 8 - numBitsCut) - 1)
|
||||
ordDataList[secondByteLoc] |= secondByte
|
||||
|
||||
# Save the image data with the bits going the correct way.
|
||||
self.imageData = _reverseBytes(bytesjoin(map(bytechr, ordDataList)))
|
||||
|
||||
|
||||
class ByteAlignedBitmapMixin(object):
|
||||
def _getByteRange(self, row, bitDepth, metrics):
|
||||
rowBytes = (bitDepth * metrics.width + 7) // 8
|
||||
byteOffset = row * rowBytes
|
||||
return (byteOffset, byteOffset + rowBytes)
|
||||
|
||||
def getRow(self, row, bitDepth=1, metrics=None, reverseBytes=False):
|
||||
if metrics is None:
|
||||
metrics = self.metrics
|
||||
assert 0 <= row and row < metrics.height, "Illegal row access in bitmap"
|
||||
byteRange = self._getByteRange(row, bitDepth, metrics)
|
||||
data = self.imageData[slice(*byteRange)]
|
||||
if reverseBytes:
|
||||
data = _reverseBytes(data)
|
||||
return data
|
||||
|
||||
def setRows(self, dataRows, bitDepth=1, metrics=None, reverseBytes=False):
|
||||
if metrics is None:
|
||||
metrics = self.metrics
|
||||
if reverseBytes:
|
||||
dataRows = map(_reverseBytes, dataRows)
|
||||
self.imageData = bytesjoin(dataRows)
|
||||
|
||||
|
||||
class ebdt_bitmap_format_1(
|
||||
ByteAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph
|
||||
):
|
||||
def decompile(self):
|
||||
self.metrics = SmallGlyphMetrics()
|
||||
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
|
||||
self.imageData = data
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
|
||||
return data + self.imageData
|
||||
|
||||
|
||||
class ebdt_bitmap_format_2(
|
||||
BitAlignedBitmapMixin, BitmapPlusSmallMetricsMixin, BitmapGlyph
|
||||
):
|
||||
def decompile(self):
|
||||
self.metrics = SmallGlyphMetrics()
|
||||
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
|
||||
self.imageData = data
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(smallGlyphMetricsFormat, self.metrics)
|
||||
return data + self.imageData
|
||||
|
||||
|
||||
class ebdt_bitmap_format_5(BitAlignedBitmapMixin, BitmapGlyph):
|
||||
def decompile(self):
|
||||
self.imageData = self.data
|
||||
|
||||
def compile(self, ttFont):
|
||||
return self.imageData
|
||||
|
||||
|
||||
class ebdt_bitmap_format_6(
|
||||
ByteAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph
|
||||
):
|
||||
def decompile(self):
|
||||
self.metrics = BigGlyphMetrics()
|
||||
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
|
||||
self.imageData = data
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
|
||||
return data + self.imageData
|
||||
|
||||
|
||||
class ebdt_bitmap_format_7(
|
||||
BitAlignedBitmapMixin, BitmapPlusBigMetricsMixin, BitmapGlyph
|
||||
):
|
||||
def decompile(self):
|
||||
self.metrics = BigGlyphMetrics()
|
||||
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
|
||||
self.imageData = data
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(bigGlyphMetricsFormat, self.metrics)
|
||||
return data + self.imageData
|
||||
|
||||
|
||||
class ComponentBitmapGlyph(BitmapGlyph):
|
||||
def toXML(self, strikeIndex, glyphName, writer, ttFont):
|
||||
writer.begintag(self.__class__.__name__, [("name", glyphName)])
|
||||
writer.newline()
|
||||
|
||||
self.writeMetrics(writer, ttFont)
|
||||
|
||||
writer.begintag("components")
|
||||
writer.newline()
|
||||
for curComponent in self.componentArray:
|
||||
curComponent.toXML(writer, ttFont)
|
||||
writer.endtag("components")
|
||||
writer.newline()
|
||||
|
||||
writer.endtag(self.__class__.__name__)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
self.readMetrics(name, attrs, content, ttFont)
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attr, content = element
|
||||
if name == "components":
|
||||
self.componentArray = []
|
||||
for compElement in content:
|
||||
if not isinstance(compElement, tuple):
|
||||
continue
|
||||
name, attrs, content = compElement
|
||||
if name == "ebdtComponent":
|
||||
curComponent = EbdtComponent()
|
||||
curComponent.fromXML(name, attrs, content, ttFont)
|
||||
self.componentArray.append(curComponent)
|
||||
else:
|
||||
log.warning("'%s' being ignored in component array.", name)
|
||||
|
||||
|
||||
class ebdt_bitmap_format_8(BitmapPlusSmallMetricsMixin, ComponentBitmapGlyph):
|
||||
def decompile(self):
|
||||
self.metrics = SmallGlyphMetrics()
|
||||
dummy, data = sstruct.unpack2(smallGlyphMetricsFormat, self.data, self.metrics)
|
||||
data = data[1:]
|
||||
|
||||
(numComponents,) = struct.unpack(">H", data[:2])
|
||||
data = data[2:]
|
||||
self.componentArray = []
|
||||
for i in range(numComponents):
|
||||
curComponent = EbdtComponent()
|
||||
dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
|
||||
curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
|
||||
self.componentArray.append(curComponent)
|
||||
|
||||
def compile(self, ttFont):
|
||||
dataList = []
|
||||
dataList.append(sstruct.pack(smallGlyphMetricsFormat, self.metrics))
|
||||
dataList.append(b"\0")
|
||||
dataList.append(struct.pack(">H", len(self.componentArray)))
|
||||
for curComponent in self.componentArray:
|
||||
curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
|
||||
dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
|
||||
return bytesjoin(dataList)
|
||||
|
||||
|
||||
class ebdt_bitmap_format_9(BitmapPlusBigMetricsMixin, ComponentBitmapGlyph):
|
||||
def decompile(self):
|
||||
self.metrics = BigGlyphMetrics()
|
||||
dummy, data = sstruct.unpack2(bigGlyphMetricsFormat, self.data, self.metrics)
|
||||
(numComponents,) = struct.unpack(">H", data[:2])
|
||||
data = data[2:]
|
||||
self.componentArray = []
|
||||
for i in range(numComponents):
|
||||
curComponent = EbdtComponent()
|
||||
dummy, data = sstruct.unpack2(ebdtComponentFormat, data, curComponent)
|
||||
curComponent.name = self.ttFont.getGlyphName(curComponent.glyphCode)
|
||||
self.componentArray.append(curComponent)
|
||||
|
||||
def compile(self, ttFont):
|
||||
dataList = []
|
||||
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
|
||||
dataList.append(struct.pack(">H", len(self.componentArray)))
|
||||
for curComponent in self.componentArray:
|
||||
curComponent.glyphCode = ttFont.getGlyphID(curComponent.name)
|
||||
dataList.append(sstruct.pack(ebdtComponentFormat, curComponent))
|
||||
return bytesjoin(dataList)
|
||||
|
||||
|
||||
# Dictionary of bitmap formats to the class representing that format
|
||||
# currently only the ones listed in this map are the ones supported.
|
||||
ebdt_bitmap_classes = {
|
||||
1: ebdt_bitmap_format_1,
|
||||
2: ebdt_bitmap_format_2,
|
||||
5: ebdt_bitmap_format_5,
|
||||
6: ebdt_bitmap_format_6,
|
||||
7: ebdt_bitmap_format_7,
|
||||
8: ebdt_bitmap_format_8,
|
||||
9: ebdt_bitmap_format_9,
|
||||
}
|
||||
@ -0,0 +1,710 @@
|
||||
from fontTools.misc import sstruct
|
||||
from . import DefaultTable
|
||||
from fontTools.misc.textTools import bytesjoin, safeEval
|
||||
from .BitmapGlyphMetrics import (
|
||||
BigGlyphMetrics,
|
||||
bigGlyphMetricsFormat,
|
||||
SmallGlyphMetrics,
|
||||
smallGlyphMetricsFormat,
|
||||
)
|
||||
import struct
|
||||
import itertools
|
||||
from collections import deque
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
eblcHeaderFormat = """
|
||||
> # big endian
|
||||
version: 16.16F
|
||||
numSizes: I
|
||||
"""
|
||||
# The table format string is split to handle sbitLineMetrics simply.
|
||||
bitmapSizeTableFormatPart1 = """
|
||||
> # big endian
|
||||
indexSubTableArrayOffset: I
|
||||
indexTablesSize: I
|
||||
numberOfIndexSubTables: I
|
||||
colorRef: I
|
||||
"""
|
||||
# The compound type for hori and vert.
|
||||
sbitLineMetricsFormat = """
|
||||
> # big endian
|
||||
ascender: b
|
||||
descender: b
|
||||
widthMax: B
|
||||
caretSlopeNumerator: b
|
||||
caretSlopeDenominator: b
|
||||
caretOffset: b
|
||||
minOriginSB: b
|
||||
minAdvanceSB: b
|
||||
maxBeforeBL: b
|
||||
minAfterBL: b
|
||||
pad1: b
|
||||
pad2: b
|
||||
"""
|
||||
# hori and vert go between the two parts.
|
||||
bitmapSizeTableFormatPart2 = """
|
||||
> # big endian
|
||||
startGlyphIndex: H
|
||||
endGlyphIndex: H
|
||||
ppemX: B
|
||||
ppemY: B
|
||||
bitDepth: B
|
||||
flags: b
|
||||
"""
|
||||
|
||||
indexSubTableArrayFormat = ">HHL"
|
||||
indexSubTableArraySize = struct.calcsize(indexSubTableArrayFormat)
|
||||
|
||||
indexSubHeaderFormat = ">HHL"
|
||||
indexSubHeaderSize = struct.calcsize(indexSubHeaderFormat)
|
||||
|
||||
codeOffsetPairFormat = ">HH"
|
||||
codeOffsetPairSize = struct.calcsize(codeOffsetPairFormat)
|
||||
|
||||
|
||||
class table_E_B_L_C_(DefaultTable.DefaultTable):
|
||||
dependencies = ["EBDT"]
|
||||
|
||||
# This method can be overridden in subclasses to support new formats
|
||||
# without changing the other implementation. Also can be used as a
|
||||
# convenience method for coverting a font file to an alternative format.
|
||||
def getIndexFormatClass(self, indexFormat):
|
||||
return eblc_sub_table_classes[indexFormat]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
# Save the original data because offsets are from the start of the table.
|
||||
origData = data
|
||||
i = 0
|
||||
|
||||
dummy = sstruct.unpack(eblcHeaderFormat, data[:8], self)
|
||||
i += 8
|
||||
|
||||
self.strikes = []
|
||||
for curStrikeIndex in range(self.numSizes):
|
||||
curStrike = Strike()
|
||||
self.strikes.append(curStrike)
|
||||
curTable = curStrike.bitmapSizeTable
|
||||
dummy = sstruct.unpack2(
|
||||
bitmapSizeTableFormatPart1, data[i : i + 16], curTable
|
||||
)
|
||||
i += 16
|
||||
for metric in ("hori", "vert"):
|
||||
metricObj = SbitLineMetrics()
|
||||
vars(curTable)[metric] = metricObj
|
||||
dummy = sstruct.unpack2(
|
||||
sbitLineMetricsFormat, data[i : i + 12], metricObj
|
||||
)
|
||||
i += 12
|
||||
dummy = sstruct.unpack(
|
||||
bitmapSizeTableFormatPart2, data[i : i + 8], curTable
|
||||
)
|
||||
i += 8
|
||||
|
||||
for curStrike in self.strikes:
|
||||
curTable = curStrike.bitmapSizeTable
|
||||
for subtableIndex in range(curTable.numberOfIndexSubTables):
|
||||
i = (
|
||||
curTable.indexSubTableArrayOffset
|
||||
+ subtableIndex * indexSubTableArraySize
|
||||
)
|
||||
|
||||
tup = struct.unpack(
|
||||
indexSubTableArrayFormat, data[i : i + indexSubTableArraySize]
|
||||
)
|
||||
(firstGlyphIndex, lastGlyphIndex, additionalOffsetToIndexSubtable) = tup
|
||||
i = curTable.indexSubTableArrayOffset + additionalOffsetToIndexSubtable
|
||||
|
||||
tup = struct.unpack(
|
||||
indexSubHeaderFormat, data[i : i + indexSubHeaderSize]
|
||||
)
|
||||
(indexFormat, imageFormat, imageDataOffset) = tup
|
||||
|
||||
indexFormatClass = self.getIndexFormatClass(indexFormat)
|
||||
indexSubTable = indexFormatClass(data[i + indexSubHeaderSize :], ttFont)
|
||||
indexSubTable.firstGlyphIndex = firstGlyphIndex
|
||||
indexSubTable.lastGlyphIndex = lastGlyphIndex
|
||||
indexSubTable.additionalOffsetToIndexSubtable = (
|
||||
additionalOffsetToIndexSubtable
|
||||
)
|
||||
indexSubTable.indexFormat = indexFormat
|
||||
indexSubTable.imageFormat = imageFormat
|
||||
indexSubTable.imageDataOffset = imageDataOffset
|
||||
indexSubTable.decompile() # https://github.com/fonttools/fonttools/issues/317
|
||||
curStrike.indexSubTables.append(indexSubTable)
|
||||
|
||||
def compile(self, ttFont):
|
||||
dataList = []
|
||||
self.numSizes = len(self.strikes)
|
||||
dataList.append(sstruct.pack(eblcHeaderFormat, self))
|
||||
|
||||
# Data size of the header + bitmapSizeTable needs to be calculated
|
||||
# in order to form offsets. This value will hold the size of the data
|
||||
# in dataList after all the data is consolidated in dataList.
|
||||
dataSize = len(dataList[0])
|
||||
|
||||
# The table will be structured in the following order:
|
||||
# (0) header
|
||||
# (1) Each bitmapSizeTable [1 ... self.numSizes]
|
||||
# (2) Alternate between indexSubTableArray and indexSubTable
|
||||
# for each bitmapSizeTable present.
|
||||
#
|
||||
# The issue is maintaining the proper offsets when table information
|
||||
# gets moved around. All offsets and size information must be recalculated
|
||||
# when building the table to allow editing within ttLib and also allow easy
|
||||
# import/export to and from XML. All of this offset information is lost
|
||||
# when exporting to XML so everything must be calculated fresh so importing
|
||||
# from XML will work cleanly. Only byte offset and size information is
|
||||
# calculated fresh. Count information like numberOfIndexSubTables is
|
||||
# checked through assertions. If the information in this table was not
|
||||
# touched or was changed properly then these types of values should match.
|
||||
#
|
||||
# The table will be rebuilt the following way:
|
||||
# (0) Precompute the size of all the bitmapSizeTables. This is needed to
|
||||
# compute the offsets properly.
|
||||
# (1) For each bitmapSizeTable compute the indexSubTable and
|
||||
# indexSubTableArray pair. The indexSubTable must be computed first
|
||||
# so that the offset information in indexSubTableArray can be
|
||||
# calculated. Update the data size after each pairing.
|
||||
# (2) Build each bitmapSizeTable.
|
||||
# (3) Consolidate all the data into the main dataList in the correct order.
|
||||
|
||||
for _ in self.strikes:
|
||||
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart1)
|
||||
dataSize += len(("hori", "vert")) * sstruct.calcsize(sbitLineMetricsFormat)
|
||||
dataSize += sstruct.calcsize(bitmapSizeTableFormatPart2)
|
||||
|
||||
indexSubTablePairDataList = []
|
||||
for curStrike in self.strikes:
|
||||
curTable = curStrike.bitmapSizeTable
|
||||
curTable.numberOfIndexSubTables = len(curStrike.indexSubTables)
|
||||
curTable.indexSubTableArrayOffset = dataSize
|
||||
|
||||
# Precompute the size of the indexSubTableArray. This information
|
||||
# is important for correctly calculating the new value for
|
||||
# additionalOffsetToIndexSubtable.
|
||||
sizeOfSubTableArray = (
|
||||
curTable.numberOfIndexSubTables * indexSubTableArraySize
|
||||
)
|
||||
lowerBound = dataSize
|
||||
dataSize += sizeOfSubTableArray
|
||||
upperBound = dataSize
|
||||
|
||||
indexSubTableDataList = []
|
||||
for indexSubTable in curStrike.indexSubTables:
|
||||
indexSubTable.additionalOffsetToIndexSubtable = (
|
||||
dataSize - curTable.indexSubTableArrayOffset
|
||||
)
|
||||
glyphIds = list(map(ttFont.getGlyphID, indexSubTable.names))
|
||||
indexSubTable.firstGlyphIndex = min(glyphIds)
|
||||
indexSubTable.lastGlyphIndex = max(glyphIds)
|
||||
data = indexSubTable.compile(ttFont)
|
||||
indexSubTableDataList.append(data)
|
||||
dataSize += len(data)
|
||||
curTable.startGlyphIndex = min(
|
||||
ist.firstGlyphIndex for ist in curStrike.indexSubTables
|
||||
)
|
||||
curTable.endGlyphIndex = max(
|
||||
ist.lastGlyphIndex for ist in curStrike.indexSubTables
|
||||
)
|
||||
|
||||
for i in curStrike.indexSubTables:
|
||||
data = struct.pack(
|
||||
indexSubHeaderFormat,
|
||||
i.firstGlyphIndex,
|
||||
i.lastGlyphIndex,
|
||||
i.additionalOffsetToIndexSubtable,
|
||||
)
|
||||
indexSubTablePairDataList.append(data)
|
||||
indexSubTablePairDataList.extend(indexSubTableDataList)
|
||||
curTable.indexTablesSize = dataSize - curTable.indexSubTableArrayOffset
|
||||
|
||||
for curStrike in self.strikes:
|
||||
curTable = curStrike.bitmapSizeTable
|
||||
data = sstruct.pack(bitmapSizeTableFormatPart1, curTable)
|
||||
dataList.append(data)
|
||||
for metric in ("hori", "vert"):
|
||||
metricObj = vars(curTable)[metric]
|
||||
data = sstruct.pack(sbitLineMetricsFormat, metricObj)
|
||||
dataList.append(data)
|
||||
data = sstruct.pack(bitmapSizeTableFormatPart2, curTable)
|
||||
dataList.append(data)
|
||||
dataList.extend(indexSubTablePairDataList)
|
||||
|
||||
return bytesjoin(dataList)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("header", [("version", self.version)])
|
||||
writer.newline()
|
||||
for curIndex, curStrike in enumerate(self.strikes):
|
||||
curStrike.toXML(curIndex, writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "header":
|
||||
self.version = safeEval(attrs["version"])
|
||||
elif name == "strike":
|
||||
if not hasattr(self, "strikes"):
|
||||
self.strikes = []
|
||||
strikeIndex = safeEval(attrs["index"])
|
||||
curStrike = Strike()
|
||||
curStrike.fromXML(name, attrs, content, ttFont, self)
|
||||
|
||||
# Grow the strike array to the appropriate size. The XML format
|
||||
# allows for the strike index value to be out of order.
|
||||
if strikeIndex >= len(self.strikes):
|
||||
self.strikes += [None] * (strikeIndex + 1 - len(self.strikes))
|
||||
assert self.strikes[strikeIndex] is None, "Duplicate strike EBLC indices."
|
||||
self.strikes[strikeIndex] = curStrike
|
||||
|
||||
|
||||
class Strike(object):
|
||||
def __init__(self):
|
||||
self.bitmapSizeTable = BitmapSizeTable()
|
||||
self.indexSubTables = []
|
||||
|
||||
def toXML(self, strikeIndex, writer, ttFont):
|
||||
writer.begintag("strike", [("index", strikeIndex)])
|
||||
writer.newline()
|
||||
self.bitmapSizeTable.toXML(writer, ttFont)
|
||||
writer.comment(
|
||||
"GlyphIds are written but not read. The firstGlyphIndex and\nlastGlyphIndex values will be recalculated by the compiler."
|
||||
)
|
||||
writer.newline()
|
||||
for indexSubTable in self.indexSubTables:
|
||||
indexSubTable.toXML(writer, ttFont)
|
||||
writer.endtag("strike")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont, locator):
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "bitmapSizeTable":
|
||||
self.bitmapSizeTable.fromXML(name, attrs, content, ttFont)
|
||||
elif name.startswith(_indexSubTableSubclassPrefix):
|
||||
indexFormat = safeEval(name[len(_indexSubTableSubclassPrefix) :])
|
||||
indexFormatClass = locator.getIndexFormatClass(indexFormat)
|
||||
indexSubTable = indexFormatClass(None, None)
|
||||
indexSubTable.indexFormat = indexFormat
|
||||
indexSubTable.fromXML(name, attrs, content, ttFont)
|
||||
self.indexSubTables.append(indexSubTable)
|
||||
|
||||
|
||||
class BitmapSizeTable(object):
|
||||
# Returns all the simple metric names that bitmap size table
|
||||
# cares about in terms of XML creation.
|
||||
def _getXMLMetricNames(self):
|
||||
dataNames = sstruct.getformat(bitmapSizeTableFormatPart1)[1]
|
||||
dataNames = {**dataNames, **sstruct.getformat(bitmapSizeTableFormatPart2)[1]}
|
||||
# Skip the first 3 data names because they are byte offsets and counts.
|
||||
return list(dataNames.keys())[3:]
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag("bitmapSizeTable")
|
||||
writer.newline()
|
||||
for metric in ("hori", "vert"):
|
||||
getattr(self, metric).toXML(metric, writer, ttFont)
|
||||
for metricName in self._getXMLMetricNames():
|
||||
writer.simpletag(metricName, value=getattr(self, metricName))
|
||||
writer.newline()
|
||||
writer.endtag("bitmapSizeTable")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
# Create a lookup for all the simple names that make sense to
|
||||
# bitmap size table. Only read the information from these names.
|
||||
dataNames = set(self._getXMLMetricNames())
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "sbitLineMetrics":
|
||||
direction = attrs["direction"]
|
||||
assert direction in (
|
||||
"hori",
|
||||
"vert",
|
||||
), "SbitLineMetrics direction specified invalid."
|
||||
metricObj = SbitLineMetrics()
|
||||
metricObj.fromXML(name, attrs, content, ttFont)
|
||||
vars(self)[direction] = metricObj
|
||||
elif name in dataNames:
|
||||
vars(self)[name] = safeEval(attrs["value"])
|
||||
else:
|
||||
log.warning("unknown name '%s' being ignored in BitmapSizeTable.", name)
|
||||
|
||||
|
||||
class SbitLineMetrics(object):
|
||||
def toXML(self, name, writer, ttFont):
|
||||
writer.begintag("sbitLineMetrics", [("direction", name)])
|
||||
writer.newline()
|
||||
for metricName in sstruct.getformat(sbitLineMetricsFormat)[1]:
|
||||
writer.simpletag(metricName, value=getattr(self, metricName))
|
||||
writer.newline()
|
||||
writer.endtag("sbitLineMetrics")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
metricNames = set(sstruct.getformat(sbitLineMetricsFormat)[1])
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name in metricNames:
|
||||
vars(self)[name] = safeEval(attrs["value"])
|
||||
|
||||
|
||||
# Important information about the naming scheme. Used for identifying subtables.
|
||||
_indexSubTableSubclassPrefix = "eblc_index_sub_table_"
|
||||
|
||||
|
||||
class EblcIndexSubTable(object):
|
||||
def __init__(self, data, ttFont):
|
||||
self.data = data
|
||||
self.ttFont = ttFont
|
||||
# TODO Currently non-lazy decompiling doesn't work for this class...
|
||||
# if not ttFont.lazy:
|
||||
# self.decompile()
|
||||
# del self.data, self.ttFont
|
||||
|
||||
def __getattr__(self, attr):
|
||||
# Allow lazy decompile.
|
||||
if attr[:2] == "__":
|
||||
raise AttributeError(attr)
|
||||
if attr == "data":
|
||||
raise AttributeError(attr)
|
||||
self.decompile()
|
||||
return getattr(self, attr)
|
||||
|
||||
def ensureDecompiled(self, recurse=False):
|
||||
if hasattr(self, "data"):
|
||||
self.decompile()
|
||||
|
||||
# This method just takes care of the indexSubHeader. Implementing subclasses
|
||||
# should call it to compile the indexSubHeader and then continue compiling
|
||||
# the remainder of their unique format.
|
||||
def compile(self, ttFont):
|
||||
return struct.pack(
|
||||
indexSubHeaderFormat,
|
||||
self.indexFormat,
|
||||
self.imageFormat,
|
||||
self.imageDataOffset,
|
||||
)
|
||||
|
||||
# Creates the XML for bitmap glyphs. Each index sub table basically makes
|
||||
# the same XML except for specific metric information that is written
|
||||
# out via a method call that a subclass implements optionally.
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag(
|
||||
self.__class__.__name__,
|
||||
[
|
||||
("imageFormat", self.imageFormat),
|
||||
("firstGlyphIndex", self.firstGlyphIndex),
|
||||
("lastGlyphIndex", self.lastGlyphIndex),
|
||||
],
|
||||
)
|
||||
writer.newline()
|
||||
self.writeMetrics(writer, ttFont)
|
||||
# Write out the names as thats all thats needed to rebuild etc.
|
||||
# For font debugging of consecutive formats the ids are also written.
|
||||
# The ids are not read when moving from the XML format.
|
||||
glyphIds = map(ttFont.getGlyphID, self.names)
|
||||
for glyphName, glyphId in zip(self.names, glyphIds):
|
||||
writer.simpletag("glyphLoc", name=glyphName, id=glyphId)
|
||||
writer.newline()
|
||||
writer.endtag(self.__class__.__name__)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
# Read all the attributes. Even though the glyph indices are
|
||||
# recalculated, they are still read in case there needs to
|
||||
# be an immediate export of the data.
|
||||
self.imageFormat = safeEval(attrs["imageFormat"])
|
||||
self.firstGlyphIndex = safeEval(attrs["firstGlyphIndex"])
|
||||
self.lastGlyphIndex = safeEval(attrs["lastGlyphIndex"])
|
||||
|
||||
self.readMetrics(name, attrs, content, ttFont)
|
||||
|
||||
self.names = []
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "glyphLoc":
|
||||
self.names.append(attrs["name"])
|
||||
|
||||
# A helper method that writes the metrics for the index sub table. It also
|
||||
# is responsible for writing the image size for fixed size data since fixed
|
||||
# size is not recalculated on compile. Default behavior is to do nothing.
|
||||
def writeMetrics(self, writer, ttFont):
|
||||
pass
|
||||
|
||||
# A helper method that is the inverse of writeMetrics.
|
||||
def readMetrics(self, name, attrs, content, ttFont):
|
||||
pass
|
||||
|
||||
# This method is for fixed glyph data sizes. There are formats where
|
||||
# the glyph data is fixed but are actually composite glyphs. To handle
|
||||
# this the font spec in indexSubTable makes the data the size of the
|
||||
# fixed size by padding the component arrays. This function abstracts
|
||||
# out this padding process. Input is data unpadded. Output is data
|
||||
# padded only in fixed formats. Default behavior is to return the data.
|
||||
def padBitmapData(self, data):
|
||||
return data
|
||||
|
||||
# Remove any of the glyph locations and names that are flagged as skipped.
|
||||
# This only occurs in formats {1,3}.
|
||||
def removeSkipGlyphs(self):
|
||||
# Determines if a name, location pair is a valid data location.
|
||||
# Skip glyphs are marked when the size is equal to zero.
|
||||
def isValidLocation(args):
|
||||
(name, (startByte, endByte)) = args
|
||||
return startByte < endByte
|
||||
|
||||
# Remove all skip glyphs.
|
||||
dataPairs = list(filter(isValidLocation, zip(self.names, self.locations)))
|
||||
self.names, self.locations = list(map(list, zip(*dataPairs)))
|
||||
|
||||
|
||||
# A closure for creating a custom mixin. This is done because formats 1 and 3
|
||||
# are very similar. The only difference between them is the size per offset
|
||||
# value. Code put in here should handle both cases generally.
|
||||
def _createOffsetArrayIndexSubTableMixin(formatStringForDataType):
|
||||
# Prep the data size for the offset array data format.
|
||||
dataFormat = ">" + formatStringForDataType
|
||||
offsetDataSize = struct.calcsize(dataFormat)
|
||||
|
||||
class OffsetArrayIndexSubTableMixin(object):
|
||||
def decompile(self):
|
||||
numGlyphs = self.lastGlyphIndex - self.firstGlyphIndex + 1
|
||||
indexingOffsets = [
|
||||
glyphIndex * offsetDataSize for glyphIndex in range(numGlyphs + 2)
|
||||
]
|
||||
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
|
||||
offsetArray = [
|
||||
struct.unpack(dataFormat, self.data[slice(*loc)])[0]
|
||||
for loc in indexingLocations
|
||||
]
|
||||
|
||||
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
|
||||
modifiedOffsets = [offset + self.imageDataOffset for offset in offsetArray]
|
||||
self.locations = list(zip(modifiedOffsets, modifiedOffsets[1:]))
|
||||
|
||||
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
|
||||
self.removeSkipGlyphs()
|
||||
del self.data, self.ttFont
|
||||
|
||||
def compile(self, ttFont):
|
||||
# First make sure that all the data lines up properly. Formats 1 and 3
|
||||
# must have all its data lined up consecutively. If not this will fail.
|
||||
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
|
||||
assert (
|
||||
curLoc[1] == nxtLoc[0]
|
||||
), "Data must be consecutive in indexSubTable offset formats"
|
||||
|
||||
glyphIds = list(map(ttFont.getGlyphID, self.names))
|
||||
# Make sure that all ids are sorted strictly increasing.
|
||||
assert all(glyphIds[i] < glyphIds[i + 1] for i in range(len(glyphIds) - 1))
|
||||
|
||||
# Run a simple algorithm to add skip glyphs to the data locations at
|
||||
# the places where an id is not present.
|
||||
idQueue = deque(glyphIds)
|
||||
locQueue = deque(self.locations)
|
||||
allGlyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
|
||||
allLocations = []
|
||||
for curId in allGlyphIds:
|
||||
if curId != idQueue[0]:
|
||||
allLocations.append((locQueue[0][0], locQueue[0][0]))
|
||||
else:
|
||||
idQueue.popleft()
|
||||
allLocations.append(locQueue.popleft())
|
||||
|
||||
# Now that all the locations are collected, pack them appropriately into
|
||||
# offsets. This is the form where offset[i] is the location and
|
||||
# offset[i+1]-offset[i] is the size of the data location.
|
||||
offsets = list(allLocations[0]) + [loc[1] for loc in allLocations[1:]]
|
||||
# Image data offset must be less than or equal to the minimum of locations.
|
||||
# This offset may change the value for round tripping but is safer and
|
||||
# allows imageDataOffset to not be required to be in the XML version.
|
||||
self.imageDataOffset = min(offsets)
|
||||
offsetArray = [offset - self.imageDataOffset for offset in offsets]
|
||||
|
||||
dataList = [EblcIndexSubTable.compile(self, ttFont)]
|
||||
dataList += [
|
||||
struct.pack(dataFormat, offsetValue) for offsetValue in offsetArray
|
||||
]
|
||||
# Take care of any padding issues. Only occurs in format 3.
|
||||
if offsetDataSize * len(offsetArray) % 4 != 0:
|
||||
dataList.append(struct.pack(dataFormat, 0))
|
||||
return bytesjoin(dataList)
|
||||
|
||||
return OffsetArrayIndexSubTableMixin
|
||||
|
||||
|
||||
# A Mixin for functionality shared between the different kinds
|
||||
# of fixed sized data handling. Both kinds have big metrics so
|
||||
# that kind of special processing is also handled in this mixin.
|
||||
class FixedSizeIndexSubTableMixin(object):
|
||||
def writeMetrics(self, writer, ttFont):
|
||||
writer.simpletag("imageSize", value=self.imageSize)
|
||||
writer.newline()
|
||||
self.metrics.toXML(writer, ttFont)
|
||||
|
||||
def readMetrics(self, name, attrs, content, ttFont):
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "imageSize":
|
||||
self.imageSize = safeEval(attrs["value"])
|
||||
elif name == BigGlyphMetrics.__name__:
|
||||
self.metrics = BigGlyphMetrics()
|
||||
self.metrics.fromXML(name, attrs, content, ttFont)
|
||||
elif name == SmallGlyphMetrics.__name__:
|
||||
log.warning(
|
||||
"SmallGlyphMetrics being ignored in format %d.", self.indexFormat
|
||||
)
|
||||
|
||||
def padBitmapData(self, data):
|
||||
# Make sure that the data isn't bigger than the fixed size.
|
||||
assert len(data) <= self.imageSize, (
|
||||
"Data in indexSubTable format %d must be less than the fixed size."
|
||||
% self.indexFormat
|
||||
)
|
||||
# Pad the data so that it matches the fixed size.
|
||||
pad = (self.imageSize - len(data)) * b"\0"
|
||||
return data + pad
|
||||
|
||||
|
||||
class eblc_index_sub_table_1(
|
||||
_createOffsetArrayIndexSubTableMixin("L"), EblcIndexSubTable
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class eblc_index_sub_table_2(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
|
||||
def decompile(self):
|
||||
(self.imageSize,) = struct.unpack(">L", self.data[:4])
|
||||
self.metrics = BigGlyphMetrics()
|
||||
sstruct.unpack2(bigGlyphMetricsFormat, self.data[4:], self.metrics)
|
||||
glyphIds = list(range(self.firstGlyphIndex, self.lastGlyphIndex + 1))
|
||||
offsets = [
|
||||
self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds) + 1)
|
||||
]
|
||||
self.locations = list(zip(offsets, offsets[1:]))
|
||||
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
|
||||
del self.data, self.ttFont
|
||||
|
||||
def compile(self, ttFont):
|
||||
glyphIds = list(map(ttFont.getGlyphID, self.names))
|
||||
# Make sure all the ids are consecutive. This is required by Format 2.
|
||||
assert glyphIds == list(
|
||||
range(self.firstGlyphIndex, self.lastGlyphIndex + 1)
|
||||
), "Format 2 ids must be consecutive."
|
||||
self.imageDataOffset = min(next(iter(zip(*self.locations))))
|
||||
|
||||
dataList = [EblcIndexSubTable.compile(self, ttFont)]
|
||||
dataList.append(struct.pack(">L", self.imageSize))
|
||||
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
|
||||
return bytesjoin(dataList)
|
||||
|
||||
|
||||
class eblc_index_sub_table_3(
|
||||
_createOffsetArrayIndexSubTableMixin("H"), EblcIndexSubTable
|
||||
):
|
||||
pass
|
||||
|
||||
|
||||
class eblc_index_sub_table_4(EblcIndexSubTable):
|
||||
def decompile(self):
|
||||
(numGlyphs,) = struct.unpack(">L", self.data[:4])
|
||||
data = self.data[4:]
|
||||
indexingOffsets = [
|
||||
glyphIndex * codeOffsetPairSize for glyphIndex in range(numGlyphs + 2)
|
||||
]
|
||||
indexingLocations = zip(indexingOffsets, indexingOffsets[1:])
|
||||
glyphArray = [
|
||||
struct.unpack(codeOffsetPairFormat, data[slice(*loc)])
|
||||
for loc in indexingLocations
|
||||
]
|
||||
glyphIds, offsets = list(map(list, zip(*glyphArray)))
|
||||
# There are one too many glyph ids. Get rid of the last one.
|
||||
glyphIds.pop()
|
||||
|
||||
offsets = [offset + self.imageDataOffset for offset in offsets]
|
||||
self.locations = list(zip(offsets, offsets[1:]))
|
||||
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
|
||||
del self.data, self.ttFont
|
||||
|
||||
def compile(self, ttFont):
|
||||
# First make sure that all the data lines up properly. Format 4
|
||||
# must have all its data lined up consecutively. If not this will fail.
|
||||
for curLoc, nxtLoc in zip(self.locations, self.locations[1:]):
|
||||
assert (
|
||||
curLoc[1] == nxtLoc[0]
|
||||
), "Data must be consecutive in indexSubTable format 4"
|
||||
|
||||
offsets = list(self.locations[0]) + [loc[1] for loc in self.locations[1:]]
|
||||
# Image data offset must be less than or equal to the minimum of locations.
|
||||
# Resetting this offset may change the value for round tripping but is safer
|
||||
# and allows imageDataOffset to not be required to be in the XML version.
|
||||
self.imageDataOffset = min(offsets)
|
||||
offsets = [offset - self.imageDataOffset for offset in offsets]
|
||||
glyphIds = list(map(ttFont.getGlyphID, self.names))
|
||||
# Create an iterator over the ids plus a padding value.
|
||||
idsPlusPad = list(itertools.chain(glyphIds, [0]))
|
||||
|
||||
dataList = [EblcIndexSubTable.compile(self, ttFont)]
|
||||
dataList.append(struct.pack(">L", len(glyphIds)))
|
||||
tmp = [
|
||||
struct.pack(codeOffsetPairFormat, *cop) for cop in zip(idsPlusPad, offsets)
|
||||
]
|
||||
dataList += tmp
|
||||
data = bytesjoin(dataList)
|
||||
return data
|
||||
|
||||
|
||||
class eblc_index_sub_table_5(FixedSizeIndexSubTableMixin, EblcIndexSubTable):
|
||||
def decompile(self):
|
||||
self.origDataLen = 0
|
||||
(self.imageSize,) = struct.unpack(">L", self.data[:4])
|
||||
data = self.data[4:]
|
||||
self.metrics, data = sstruct.unpack2(
|
||||
bigGlyphMetricsFormat, data, BigGlyphMetrics()
|
||||
)
|
||||
(numGlyphs,) = struct.unpack(">L", data[:4])
|
||||
data = data[4:]
|
||||
glyphIds = [
|
||||
struct.unpack(">H", data[2 * i : 2 * (i + 1)])[0] for i in range(numGlyphs)
|
||||
]
|
||||
|
||||
offsets = [
|
||||
self.imageSize * i + self.imageDataOffset for i in range(len(glyphIds) + 1)
|
||||
]
|
||||
self.locations = list(zip(offsets, offsets[1:]))
|
||||
self.names = list(map(self.ttFont.getGlyphName, glyphIds))
|
||||
del self.data, self.ttFont
|
||||
|
||||
def compile(self, ttFont):
|
||||
self.imageDataOffset = min(next(iter(zip(*self.locations))))
|
||||
dataList = [EblcIndexSubTable.compile(self, ttFont)]
|
||||
dataList.append(struct.pack(">L", self.imageSize))
|
||||
dataList.append(sstruct.pack(bigGlyphMetricsFormat, self.metrics))
|
||||
glyphIds = list(map(ttFont.getGlyphID, self.names))
|
||||
dataList.append(struct.pack(">L", len(glyphIds)))
|
||||
dataList += [struct.pack(">H", curId) for curId in glyphIds]
|
||||
if len(glyphIds) % 2 == 1:
|
||||
dataList.append(struct.pack(">H", 0))
|
||||
return bytesjoin(dataList)
|
||||
|
||||
|
||||
# Dictionary of indexFormat to the class representing that format.
|
||||
eblc_sub_table_classes = {
|
||||
1: eblc_index_sub_table_1,
|
||||
2: eblc_index_sub_table_2,
|
||||
3: eblc_index_sub_table_3,
|
||||
4: eblc_index_sub_table_4,
|
||||
5: eblc_index_sub_table_5,
|
||||
}
|
||||
@ -0,0 +1,42 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from fontTools.misc.timeTools import timestampFromString, timestampToString
|
||||
from . import DefaultTable
|
||||
|
||||
FFTMFormat = """
|
||||
> # big endian
|
||||
version: I
|
||||
FFTimeStamp: Q
|
||||
sourceCreated: Q
|
||||
sourceModified: Q
|
||||
"""
|
||||
|
||||
|
||||
class table_F_F_T_M_(DefaultTable.DefaultTable):
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, rest = sstruct.unpack2(FFTMFormat, data, self)
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(FFTMFormat, self)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment(
|
||||
"FontForge's timestamp, font source creation and modification dates"
|
||||
)
|
||||
writer.newline()
|
||||
formatstring, names, fixes = sstruct.getformat(FFTMFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
|
||||
value = timestampToString(value)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
value = attrs["value"]
|
||||
if name in ("FFTimeStamp", "sourceCreated", "sourceModified"):
|
||||
value = timestampFromString(value)
|
||||
else:
|
||||
value = safeEval(value)
|
||||
setattr(self, name, value)
|
||||
@ -0,0 +1,144 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.fixedTools import floatToFixedToStr
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
from . import grUtils
|
||||
import struct
|
||||
|
||||
Feat_hdr_format = """
|
||||
>
|
||||
version: 16.16F
|
||||
"""
|
||||
|
||||
|
||||
class table_F__e_a_t(DefaultTable.DefaultTable):
|
||||
"""The ``Feat`` table is used exclusively by the Graphite shaping engine
|
||||
to store features and possible settings specified in GDL. Graphite features
|
||||
determine what rules are applied to transform a glyph stream.
|
||||
|
||||
Not to be confused with ``feat``, or the OpenType Layout tables
|
||||
``GSUB``/``GPOS``."""
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.features = {}
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
(_, data) = sstruct.unpack2(Feat_hdr_format, data, self)
|
||||
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
|
||||
(numFeats,) = struct.unpack(">H", data[:2])
|
||||
data = data[8:]
|
||||
allfeats = []
|
||||
maxsetting = 0
|
||||
for i in range(numFeats):
|
||||
if self.version >= 2.0:
|
||||
(fid, nums, _, offset, flags, lid) = struct.unpack(
|
||||
">LHHLHH", data[16 * i : 16 * (i + 1)]
|
||||
)
|
||||
offset = int((offset - 12 - 16 * numFeats) / 4)
|
||||
else:
|
||||
(fid, nums, offset, flags, lid) = struct.unpack(
|
||||
">HHLHH", data[12 * i : 12 * (i + 1)]
|
||||
)
|
||||
offset = int((offset - 12 - 12 * numFeats) / 4)
|
||||
allfeats.append((fid, nums, offset, flags, lid))
|
||||
maxsetting = max(maxsetting, offset + nums)
|
||||
data = data[16 * numFeats :]
|
||||
allsettings = []
|
||||
for i in range(maxsetting):
|
||||
if len(data) >= 4 * (i + 1):
|
||||
(val, lid) = struct.unpack(">HH", data[4 * i : 4 * (i + 1)])
|
||||
allsettings.append((val, lid))
|
||||
for i, f in enumerate(allfeats):
|
||||
(fid, nums, offset, flags, lid) = f
|
||||
fobj = Feature()
|
||||
fobj.flags = flags
|
||||
fobj.label = lid
|
||||
self.features[grUtils.num2tag(fid)] = fobj
|
||||
fobj.settings = {}
|
||||
fobj.default = None
|
||||
fobj.index = i
|
||||
for i in range(offset, offset + nums):
|
||||
if i >= len(allsettings):
|
||||
continue
|
||||
(vid, vlid) = allsettings[i]
|
||||
fobj.settings[vid] = vlid
|
||||
if fobj.default is None:
|
||||
fobj.default = vid
|
||||
|
||||
def compile(self, ttFont):
|
||||
fdat = b""
|
||||
vdat = b""
|
||||
offset = 0
|
||||
for f, v in sorted(self.features.items(), key=lambda x: x[1].index):
|
||||
fnum = grUtils.tag2num(f)
|
||||
if self.version >= 2.0:
|
||||
fdat += struct.pack(
|
||||
">LHHLHH",
|
||||
grUtils.tag2num(f),
|
||||
len(v.settings),
|
||||
0,
|
||||
offset * 4 + 12 + 16 * len(self.features),
|
||||
v.flags,
|
||||
v.label,
|
||||
)
|
||||
elif fnum > 65535: # self healing for alphabetic ids
|
||||
self.version = 2.0
|
||||
return self.compile(ttFont)
|
||||
else:
|
||||
fdat += struct.pack(
|
||||
">HHLHH",
|
||||
grUtils.tag2num(f),
|
||||
len(v.settings),
|
||||
offset * 4 + 12 + 12 * len(self.features),
|
||||
v.flags,
|
||||
v.label,
|
||||
)
|
||||
for s, l in sorted(
|
||||
v.settings.items(), key=lambda x: (-1, x[1]) if x[0] == v.default else x
|
||||
):
|
||||
vdat += struct.pack(">HH", s, l)
|
||||
offset += len(v.settings)
|
||||
hdr = sstruct.pack(Feat_hdr_format, self)
|
||||
return hdr + struct.pack(">HHL", len(self.features), 0, 0) + fdat + vdat
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("version", version=self.version)
|
||||
writer.newline()
|
||||
for f, v in sorted(self.features.items(), key=lambda x: x[1].index):
|
||||
writer.begintag(
|
||||
"feature",
|
||||
fid=f,
|
||||
label=v.label,
|
||||
flags=v.flags,
|
||||
default=(v.default if v.default else 0),
|
||||
)
|
||||
writer.newline()
|
||||
for s, l in sorted(v.settings.items()):
|
||||
writer.simpletag("setting", value=s, label=l)
|
||||
writer.newline()
|
||||
writer.endtag("feature")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
self.version = float(safeEval(attrs["version"]))
|
||||
elif name == "feature":
|
||||
fid = attrs["fid"]
|
||||
fobj = Feature()
|
||||
fobj.flags = int(safeEval(attrs["flags"]))
|
||||
fobj.label = int(safeEval(attrs["label"]))
|
||||
fobj.default = int(safeEval(attrs.get("default", "0")))
|
||||
fobj.index = len(self.features)
|
||||
self.features[fid] = fobj
|
||||
fobj.settings = {}
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
tag, a, c = element
|
||||
if tag == "setting":
|
||||
fobj.settings[int(safeEval(a["value"]))] = int(safeEval(a["label"]))
|
||||
|
||||
|
||||
class Feature(object):
|
||||
pass
|
||||
@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_G_D_E_F_(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,141 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import tobytes, tostr, safeEval
|
||||
from . import DefaultTable
|
||||
|
||||
GMAPFormat = """
|
||||
> # big endian
|
||||
tableVersionMajor: H
|
||||
tableVersionMinor: H
|
||||
flags: H
|
||||
recordsCount: H
|
||||
recordsOffset: H
|
||||
fontNameLength: H
|
||||
"""
|
||||
# psFontName is a byte string which follows the record above. This is zero padded
|
||||
# to the beginning of the records array. The recordsOffsst is 32 bit aligned.
|
||||
|
||||
GMAPRecordFormat1 = """
|
||||
> # big endian
|
||||
UV: L
|
||||
cid: H
|
||||
gid: H
|
||||
ggid: H
|
||||
name: 32s
|
||||
"""
|
||||
|
||||
|
||||
class GMAPRecord(object):
|
||||
def __init__(self, uv=0, cid=0, gid=0, ggid=0, name=""):
|
||||
self.UV = uv
|
||||
self.cid = cid
|
||||
self.gid = gid
|
||||
self.ggid = ggid
|
||||
self.name = name
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag("GMAPRecord")
|
||||
writer.newline()
|
||||
writer.simpletag("UV", value=self.UV)
|
||||
writer.newline()
|
||||
writer.simpletag("cid", value=self.cid)
|
||||
writer.newline()
|
||||
writer.simpletag("gid", value=self.gid)
|
||||
writer.newline()
|
||||
writer.simpletag("glyphletGid", value=self.gid)
|
||||
writer.newline()
|
||||
writer.simpletag("GlyphletName", value=self.name)
|
||||
writer.newline()
|
||||
writer.endtag("GMAPRecord")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
value = attrs["value"]
|
||||
if name == "GlyphletName":
|
||||
self.name = value
|
||||
else:
|
||||
setattr(self, name, safeEval(value))
|
||||
|
||||
def compile(self, ttFont):
|
||||
if self.UV is None:
|
||||
self.UV = 0
|
||||
nameLen = len(self.name)
|
||||
if nameLen < 32:
|
||||
self.name = self.name + "\0" * (32 - nameLen)
|
||||
data = sstruct.pack(GMAPRecordFormat1, self)
|
||||
return data
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"GMAPRecord[ UV: "
|
||||
+ str(self.UV)
|
||||
+ ", cid: "
|
||||
+ str(self.cid)
|
||||
+ ", gid: "
|
||||
+ str(self.gid)
|
||||
+ ", ggid: "
|
||||
+ str(self.ggid)
|
||||
+ ", Glyphlet Name: "
|
||||
+ str(self.name)
|
||||
+ " ]"
|
||||
)
|
||||
|
||||
|
||||
class table_G_M_A_P_(DefaultTable.DefaultTable):
|
||||
dependencies = []
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, newData = sstruct.unpack2(GMAPFormat, data, self)
|
||||
self.psFontName = tostr(newData[: self.fontNameLength])
|
||||
assert (
|
||||
self.recordsOffset % 4
|
||||
) == 0, "GMAP error: recordsOffset is not 32 bit aligned."
|
||||
newData = data[self.recordsOffset :]
|
||||
self.gmapRecords = []
|
||||
for i in range(self.recordsCount):
|
||||
gmapRecord, newData = sstruct.unpack2(
|
||||
GMAPRecordFormat1, newData, GMAPRecord()
|
||||
)
|
||||
gmapRecord.name = gmapRecord.name.strip("\0")
|
||||
self.gmapRecords.append(gmapRecord)
|
||||
|
||||
def compile(self, ttFont):
|
||||
self.recordsCount = len(self.gmapRecords)
|
||||
self.fontNameLength = len(self.psFontName)
|
||||
self.recordsOffset = 4 * (((self.fontNameLength + 12) + 3) // 4)
|
||||
data = sstruct.pack(GMAPFormat, self)
|
||||
data = data + tobytes(self.psFontName)
|
||||
data = data + b"\0" * (self.recordsOffset - len(data))
|
||||
for record in self.gmapRecords:
|
||||
data = data + record.compile(ttFont)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment("Most of this table will be recalculated by the compiler")
|
||||
writer.newline()
|
||||
formatstring, names, fixes = sstruct.getformat(GMAPFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
writer.simpletag("PSFontName", value=self.psFontName)
|
||||
writer.newline()
|
||||
for gmapRecord in self.gmapRecords:
|
||||
gmapRecord.toXML(writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "GMAPRecord":
|
||||
if not hasattr(self, "gmapRecords"):
|
||||
self.gmapRecords = []
|
||||
gmapRecord = GMAPRecord()
|
||||
self.gmapRecords.append(gmapRecord)
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
gmapRecord.fromXML(name, attrs, content, ttFont)
|
||||
else:
|
||||
value = attrs["value"]
|
||||
if name == "PSFontName":
|
||||
self.psFontName = value
|
||||
else:
|
||||
setattr(self, name, safeEval(value))
|
||||
@ -0,0 +1,126 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytesjoin, safeEval, readHex
|
||||
from . import DefaultTable
|
||||
import sys
|
||||
import array
|
||||
|
||||
GPKGFormat = """
|
||||
> # big endian
|
||||
version: H
|
||||
flags: H
|
||||
numGMAPs: H
|
||||
numGlyplets: H
|
||||
"""
|
||||
# psFontName is a byte string which follows the record above. This is zero padded
|
||||
# to the beginning of the records array. The recordsOffsst is 32 bit aligned.
|
||||
|
||||
|
||||
class table_G_P_K_G_(DefaultTable.DefaultTable):
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, newData = sstruct.unpack2(GPKGFormat, data, self)
|
||||
|
||||
GMAPoffsets = array.array("I")
|
||||
endPos = (self.numGMAPs + 1) * 4
|
||||
GMAPoffsets.frombytes(newData[:endPos])
|
||||
if sys.byteorder != "big":
|
||||
GMAPoffsets.byteswap()
|
||||
self.GMAPs = []
|
||||
for i in range(self.numGMAPs):
|
||||
start = GMAPoffsets[i]
|
||||
end = GMAPoffsets[i + 1]
|
||||
self.GMAPs.append(data[start:end])
|
||||
pos = endPos
|
||||
endPos = pos + (self.numGlyplets + 1) * 4
|
||||
glyphletOffsets = array.array("I")
|
||||
glyphletOffsets.frombytes(newData[pos:endPos])
|
||||
if sys.byteorder != "big":
|
||||
glyphletOffsets.byteswap()
|
||||
self.glyphlets = []
|
||||
for i in range(self.numGlyplets):
|
||||
start = glyphletOffsets[i]
|
||||
end = glyphletOffsets[i + 1]
|
||||
self.glyphlets.append(data[start:end])
|
||||
|
||||
def compile(self, ttFont):
|
||||
self.numGMAPs = len(self.GMAPs)
|
||||
self.numGlyplets = len(self.glyphlets)
|
||||
GMAPoffsets = [0] * (self.numGMAPs + 1)
|
||||
glyphletOffsets = [0] * (self.numGlyplets + 1)
|
||||
|
||||
dataList = [sstruct.pack(GPKGFormat, self)]
|
||||
|
||||
pos = len(dataList[0]) + (self.numGMAPs + 1) * 4 + (self.numGlyplets + 1) * 4
|
||||
GMAPoffsets[0] = pos
|
||||
for i in range(1, self.numGMAPs + 1):
|
||||
pos += len(self.GMAPs[i - 1])
|
||||
GMAPoffsets[i] = pos
|
||||
gmapArray = array.array("I", GMAPoffsets)
|
||||
if sys.byteorder != "big":
|
||||
gmapArray.byteswap()
|
||||
dataList.append(gmapArray.tobytes())
|
||||
|
||||
glyphletOffsets[0] = pos
|
||||
for i in range(1, self.numGlyplets + 1):
|
||||
pos += len(self.glyphlets[i - 1])
|
||||
glyphletOffsets[i] = pos
|
||||
glyphletArray = array.array("I", glyphletOffsets)
|
||||
if sys.byteorder != "big":
|
||||
glyphletArray.byteswap()
|
||||
dataList.append(glyphletArray.tobytes())
|
||||
dataList += self.GMAPs
|
||||
dataList += self.glyphlets
|
||||
data = bytesjoin(dataList)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment("Most of this table will be recalculated by the compiler")
|
||||
writer.newline()
|
||||
formatstring, names, fixes = sstruct.getformat(GPKGFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
|
||||
writer.begintag("GMAPs")
|
||||
writer.newline()
|
||||
for gmapData in self.GMAPs:
|
||||
writer.begintag("hexdata")
|
||||
writer.newline()
|
||||
writer.dumphex(gmapData)
|
||||
writer.endtag("hexdata")
|
||||
writer.newline()
|
||||
writer.endtag("GMAPs")
|
||||
writer.newline()
|
||||
|
||||
writer.begintag("glyphlets")
|
||||
writer.newline()
|
||||
for glyphletData in self.glyphlets:
|
||||
writer.begintag("hexdata")
|
||||
writer.newline()
|
||||
writer.dumphex(glyphletData)
|
||||
writer.endtag("hexdata")
|
||||
writer.newline()
|
||||
writer.endtag("glyphlets")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "GMAPs":
|
||||
if not hasattr(self, "GMAPs"):
|
||||
self.GMAPs = []
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
itemName, itemAttrs, itemContent = element
|
||||
if itemName == "hexdata":
|
||||
self.GMAPs.append(readHex(itemContent))
|
||||
elif name == "glyphlets":
|
||||
if not hasattr(self, "glyphlets"):
|
||||
self.glyphlets = []
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
itemName, itemAttrs, itemContent = element
|
||||
if itemName == "hexdata":
|
||||
self.glyphlets.append(readHex(itemContent))
|
||||
else:
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_G_P_O_S_(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_G_S_U_B_(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,234 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.fixedTools import floatToFixedToStr
|
||||
from fontTools.misc.textTools import safeEval
|
||||
|
||||
# from itertools import *
|
||||
from functools import partial
|
||||
from . import DefaultTable
|
||||
from . import grUtils
|
||||
import struct
|
||||
|
||||
|
||||
Glat_format_0 = """
|
||||
> # big endian
|
||||
version: 16.16F
|
||||
"""
|
||||
|
||||
Glat_format_3 = """
|
||||
>
|
||||
version: 16.16F
|
||||
compression:L # compression scheme or reserved
|
||||
"""
|
||||
|
||||
Glat_format_1_entry = """
|
||||
>
|
||||
attNum: B # Attribute number of first attribute
|
||||
num: B # Number of attributes in this run
|
||||
"""
|
||||
Glat_format_23_entry = """
|
||||
>
|
||||
attNum: H # Attribute number of first attribute
|
||||
num: H # Number of attributes in this run
|
||||
"""
|
||||
|
||||
Glat_format_3_octabox_metrics = """
|
||||
>
|
||||
subboxBitmap: H # Which subboxes exist on 4x4 grid
|
||||
diagNegMin: B # Defines minimum negatively-sloped diagonal (si)
|
||||
diagNegMax: B # Defines maximum negatively-sloped diagonal (sa)
|
||||
diagPosMin: B # Defines minimum positively-sloped diagonal (di)
|
||||
diagPosMax: B # Defines maximum positively-sloped diagonal (da)
|
||||
"""
|
||||
|
||||
Glat_format_3_subbox_entry = """
|
||||
>
|
||||
left: B # xi
|
||||
right: B # xa
|
||||
bottom: B # yi
|
||||
top: B # ya
|
||||
diagNegMin: B # Defines minimum negatively-sloped diagonal (si)
|
||||
diagNegMax: B # Defines maximum negatively-sloped diagonal (sa)
|
||||
diagPosMin: B # Defines minimum positively-sloped diagonal (di)
|
||||
diagPosMax: B # Defines maximum positively-sloped diagonal (da)
|
||||
"""
|
||||
|
||||
|
||||
class _Object:
|
||||
pass
|
||||
|
||||
|
||||
class _Dict(dict):
|
||||
pass
|
||||
|
||||
|
||||
class table_G__l_a_t(DefaultTable.DefaultTable):
|
||||
"""
|
||||
Support Graphite Glat tables
|
||||
"""
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.scheme = 0
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
sstruct.unpack2(Glat_format_0, data, self)
|
||||
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
|
||||
if self.version <= 1.9:
|
||||
decoder = partial(self.decompileAttributes12, fmt=Glat_format_1_entry)
|
||||
elif self.version <= 2.9:
|
||||
decoder = partial(self.decompileAttributes12, fmt=Glat_format_23_entry)
|
||||
elif self.version >= 3.0:
|
||||
(data, self.scheme) = grUtils.decompress(data)
|
||||
sstruct.unpack2(Glat_format_3, data, self)
|
||||
self.hasOctaboxes = (self.compression & 1) == 1
|
||||
decoder = self.decompileAttributes3
|
||||
|
||||
gloc = ttFont["Gloc"]
|
||||
self.attributes = {}
|
||||
count = 0
|
||||
for s, e in zip(gloc, gloc[1:]):
|
||||
self.attributes[ttFont.getGlyphName(count)] = decoder(data[s:e])
|
||||
count += 1
|
||||
|
||||
def decompileAttributes12(self, data, fmt):
|
||||
attributes = _Dict()
|
||||
while len(data) > 3:
|
||||
e, data = sstruct.unpack2(fmt, data, _Object())
|
||||
keys = range(e.attNum, e.attNum + e.num)
|
||||
if len(data) >= 2 * e.num:
|
||||
vals = struct.unpack_from((">%dh" % e.num), data)
|
||||
attributes.update(zip(keys, vals))
|
||||
data = data[2 * e.num :]
|
||||
return attributes
|
||||
|
||||
def decompileAttributes3(self, data):
|
||||
if self.hasOctaboxes:
|
||||
o, data = sstruct.unpack2(Glat_format_3_octabox_metrics, data, _Object())
|
||||
numsub = bin(o.subboxBitmap).count("1")
|
||||
o.subboxes = []
|
||||
for b in range(numsub):
|
||||
if len(data) >= 8:
|
||||
subbox, data = sstruct.unpack2(
|
||||
Glat_format_3_subbox_entry, data, _Object()
|
||||
)
|
||||
o.subboxes.append(subbox)
|
||||
attrs = self.decompileAttributes12(data, Glat_format_23_entry)
|
||||
if self.hasOctaboxes:
|
||||
attrs.octabox = o
|
||||
return attrs
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(Glat_format_0, self)
|
||||
if self.version <= 1.9:
|
||||
encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry)
|
||||
elif self.version <= 2.9:
|
||||
encoder = partial(self.compileAttributes12, fmt=Glat_format_1_entry)
|
||||
elif self.version >= 3.0:
|
||||
self.compression = (self.scheme << 27) + (1 if self.hasOctaboxes else 0)
|
||||
data = sstruct.pack(Glat_format_3, self)
|
||||
encoder = self.compileAttributes3
|
||||
|
||||
glocs = []
|
||||
for n in range(len(self.attributes)):
|
||||
glocs.append(len(data))
|
||||
data += encoder(self.attributes[ttFont.getGlyphName(n)])
|
||||
glocs.append(len(data))
|
||||
ttFont["Gloc"].set(glocs)
|
||||
|
||||
if self.version >= 3.0:
|
||||
data = grUtils.compress(self.scheme, data)
|
||||
return data
|
||||
|
||||
def compileAttributes12(self, attrs, fmt):
|
||||
data = b""
|
||||
for e in grUtils.entries(attrs):
|
||||
data += sstruct.pack(fmt, {"attNum": e[0], "num": e[1]}) + struct.pack(
|
||||
(">%dh" % len(e[2])), *e[2]
|
||||
)
|
||||
return data
|
||||
|
||||
def compileAttributes3(self, attrs):
|
||||
if self.hasOctaboxes:
|
||||
o = attrs.octabox
|
||||
data = sstruct.pack(Glat_format_3_octabox_metrics, o)
|
||||
numsub = bin(o.subboxBitmap).count("1")
|
||||
for b in range(numsub):
|
||||
data += sstruct.pack(Glat_format_3_subbox_entry, o.subboxes[b])
|
||||
else:
|
||||
data = ""
|
||||
return data + self.compileAttributes12(attrs, Glat_format_23_entry)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("version", version=self.version, compressionScheme=self.scheme)
|
||||
writer.newline()
|
||||
for n, a in sorted(
|
||||
self.attributes.items(), key=lambda x: ttFont.getGlyphID(x[0])
|
||||
):
|
||||
writer.begintag("glyph", name=n)
|
||||
writer.newline()
|
||||
if hasattr(a, "octabox"):
|
||||
o = a.octabox
|
||||
formatstring, names, fixes = sstruct.getformat(
|
||||
Glat_format_3_octabox_metrics
|
||||
)
|
||||
vals = {}
|
||||
for k in names:
|
||||
if k == "subboxBitmap":
|
||||
continue
|
||||
vals[k] = "{:.3f}%".format(getattr(o, k) * 100.0 / 255)
|
||||
vals["bitmap"] = "{:0X}".format(o.subboxBitmap)
|
||||
writer.begintag("octaboxes", **vals)
|
||||
writer.newline()
|
||||
formatstring, names, fixes = sstruct.getformat(
|
||||
Glat_format_3_subbox_entry
|
||||
)
|
||||
for s in o.subboxes:
|
||||
vals = {}
|
||||
for k in names:
|
||||
vals[k] = "{:.3f}%".format(getattr(s, k) * 100.0 / 255)
|
||||
writer.simpletag("octabox", **vals)
|
||||
writer.newline()
|
||||
writer.endtag("octaboxes")
|
||||
writer.newline()
|
||||
for k, v in sorted(a.items()):
|
||||
writer.simpletag("attribute", index=k, value=v)
|
||||
writer.newline()
|
||||
writer.endtag("glyph")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
self.version = float(safeEval(attrs["version"]))
|
||||
self.scheme = int(safeEval(attrs["compressionScheme"]))
|
||||
if name != "glyph":
|
||||
return
|
||||
if not hasattr(self, "attributes"):
|
||||
self.attributes = {}
|
||||
gname = attrs["name"]
|
||||
attributes = _Dict()
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
tag, attrs, subcontent = element
|
||||
if tag == "attribute":
|
||||
k = int(safeEval(attrs["index"]))
|
||||
v = int(safeEval(attrs["value"]))
|
||||
attributes[k] = v
|
||||
elif tag == "octaboxes":
|
||||
self.hasOctaboxes = True
|
||||
o = _Object()
|
||||
o.subboxBitmap = int(attrs["bitmap"], 16)
|
||||
o.subboxes = []
|
||||
del attrs["bitmap"]
|
||||
for k, v in attrs.items():
|
||||
setattr(o, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5))
|
||||
for element in subcontent:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
(tag, attrs, subcontent) = element
|
||||
so = _Object()
|
||||
for k, v in attrs.items():
|
||||
setattr(so, k, int(float(v[:-1]) * 255.0 / 100.0 + 0.5))
|
||||
o.subboxes.append(so)
|
||||
attributes.octabox = o
|
||||
self.attributes[gname] = attributes
|
||||
@ -0,0 +1,84 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
import array
|
||||
import sys
|
||||
|
||||
|
||||
Gloc_header = """
|
||||
> # big endian
|
||||
version: 16.16F # Table version
|
||||
flags: H # bit 0: 1=long format, 0=short format
|
||||
# bit 1: 1=attribute names, 0=no names
|
||||
numAttribs: H # NUmber of attributes
|
||||
"""
|
||||
|
||||
|
||||
class table_G__l_o_c(DefaultTable.DefaultTable):
|
||||
"""
|
||||
Support Graphite Gloc tables
|
||||
"""
|
||||
|
||||
dependencies = ["Glat"]
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.attribIds = None
|
||||
self.numAttribs = 0
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
_, data = sstruct.unpack2(Gloc_header, data, self)
|
||||
flags = self.flags
|
||||
del self.flags
|
||||
self.locations = array.array("I" if flags & 1 else "H")
|
||||
self.locations.frombytes(data[: len(data) - self.numAttribs * (flags & 2)])
|
||||
if sys.byteorder != "big":
|
||||
self.locations.byteswap()
|
||||
self.attribIds = array.array("H")
|
||||
if flags & 2:
|
||||
self.attribIds.frombytes(data[-self.numAttribs * 2 :])
|
||||
if sys.byteorder != "big":
|
||||
self.attribIds.byteswap()
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(
|
||||
Gloc_header,
|
||||
dict(
|
||||
version=1.0,
|
||||
flags=(bool(self.attribIds) << 1) + (self.locations.typecode == "I"),
|
||||
numAttribs=self.numAttribs,
|
||||
),
|
||||
)
|
||||
if sys.byteorder != "big":
|
||||
self.locations.byteswap()
|
||||
data += self.locations.tobytes()
|
||||
if sys.byteorder != "big":
|
||||
self.locations.byteswap()
|
||||
if self.attribIds:
|
||||
if sys.byteorder != "big":
|
||||
self.attribIds.byteswap()
|
||||
data += self.attribIds.tobytes()
|
||||
if sys.byteorder != "big":
|
||||
self.attribIds.byteswap()
|
||||
return data
|
||||
|
||||
def set(self, locations):
|
||||
long_format = max(locations) >= 65536
|
||||
self.locations = array.array("I" if long_format else "H", locations)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("attributes", number=self.numAttribs)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "attributes":
|
||||
self.numAttribs = int(safeEval(attrs["number"]))
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self.locations[index]
|
||||
|
||||
def __len__(self):
|
||||
return len(self.locations)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self.locations)
|
||||
@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_H_V_A_R_(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_J_S_T_F_(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,48 @@
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
import array
|
||||
|
||||
# XXX I've lowered the strictness, to make sure Apple's own Chicago
|
||||
# XXX gets through. They're looking into it, I hope to raise the standards
|
||||
# XXX back to normal eventually.
|
||||
|
||||
|
||||
class table_L_T_S_H_(DefaultTable.DefaultTable):
|
||||
def decompile(self, data, ttFont):
|
||||
version, numGlyphs = struct.unpack(">HH", data[:4])
|
||||
data = data[4:]
|
||||
assert version == 0, "unknown version: %s" % version
|
||||
assert (len(data) % numGlyphs) < 4, "numGlyphs doesn't match data length"
|
||||
# ouch: the assertion is not true in Chicago!
|
||||
# assert numGlyphs == ttFont['maxp'].numGlyphs
|
||||
yPels = array.array("B")
|
||||
yPels.frombytes(data)
|
||||
self.yPels = {}
|
||||
for i in range(numGlyphs):
|
||||
self.yPels[ttFont.getGlyphName(i)] = yPels[i]
|
||||
|
||||
def compile(self, ttFont):
|
||||
version = 0
|
||||
names = list(self.yPels.keys())
|
||||
numGlyphs = len(names)
|
||||
yPels = [0] * numGlyphs
|
||||
# ouch: the assertion is not true in Chicago!
|
||||
# assert len(self.yPels) == ttFont['maxp'].numGlyphs == numGlyphs
|
||||
for name in names:
|
||||
yPels[ttFont.getGlyphID(name)] = self.yPels[name]
|
||||
yPels = array.array("B", yPels)
|
||||
return struct.pack(">HH", version, numGlyphs) + yPels.tobytes()
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
names = sorted(self.yPels.keys())
|
||||
for name in names:
|
||||
writer.simpletag("yPel", name=name, value=self.yPels[name])
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if not hasattr(self, "yPels"):
|
||||
self.yPels = {}
|
||||
if name != "yPel":
|
||||
return # ignore unknown tags
|
||||
self.yPels[attrs["name"]] = safeEval(attrs["value"])
|
||||
@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_M_A_T_H_(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,345 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import byteord, safeEval
|
||||
from . import DefaultTable
|
||||
import pdb
|
||||
import struct
|
||||
|
||||
|
||||
METAHeaderFormat = """
|
||||
> # big endian
|
||||
tableVersionMajor: H
|
||||
tableVersionMinor: H
|
||||
metaEntriesVersionMajor: H
|
||||
metaEntriesVersionMinor: H
|
||||
unicodeVersion: L
|
||||
metaFlags: H
|
||||
nMetaRecs: H
|
||||
"""
|
||||
# This record is followed by nMetaRecs of METAGlyphRecordFormat.
|
||||
# This in turn is followd by as many METAStringRecordFormat entries
|
||||
# as specified by the METAGlyphRecordFormat entries
|
||||
# this is followed by the strings specifried in the METAStringRecordFormat
|
||||
METAGlyphRecordFormat = """
|
||||
> # big endian
|
||||
glyphID: H
|
||||
nMetaEntry: H
|
||||
"""
|
||||
# This record is followd by a variable data length field:
|
||||
# USHORT or ULONG hdrOffset
|
||||
# Offset from start of META table to the beginning
|
||||
# of this glyphs array of ns Metadata string entries.
|
||||
# Size determined by metaFlags field
|
||||
# METAGlyphRecordFormat entries must be sorted by glyph ID
|
||||
|
||||
METAStringRecordFormat = """
|
||||
> # big endian
|
||||
labelID: H
|
||||
stringLen: H
|
||||
"""
|
||||
# This record is followd by a variable data length field:
|
||||
# USHORT or ULONG stringOffset
|
||||
# METAStringRecordFormat entries must be sorted in order of labelID
|
||||
# There may be more than one entry with the same labelID
|
||||
# There may be more than one strign with the same content.
|
||||
|
||||
# Strings shall be Unicode UTF-8 encoded, and null-terminated.
|
||||
|
||||
METALabelDict = {
|
||||
0: "MojikumiX4051", # An integer in the range 1-20
|
||||
1: "UNIUnifiedBaseChars",
|
||||
2: "BaseFontName",
|
||||
3: "Language",
|
||||
4: "CreationDate",
|
||||
5: "FoundryName",
|
||||
6: "FoundryCopyright",
|
||||
7: "OwnerURI",
|
||||
8: "WritingScript",
|
||||
10: "StrokeCount",
|
||||
11: "IndexingRadical",
|
||||
}
|
||||
|
||||
|
||||
def getLabelString(labelID):
|
||||
try:
|
||||
label = METALabelDict[labelID]
|
||||
except KeyError:
|
||||
label = "Unknown label"
|
||||
return str(label)
|
||||
|
||||
|
||||
class table_M_E_T_A_(DefaultTable.DefaultTable):
|
||||
dependencies = []
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, newData = sstruct.unpack2(METAHeaderFormat, data, self)
|
||||
self.glyphRecords = []
|
||||
for i in range(self.nMetaRecs):
|
||||
glyphRecord, newData = sstruct.unpack2(
|
||||
METAGlyphRecordFormat, newData, GlyphRecord()
|
||||
)
|
||||
if self.metaFlags == 0:
|
||||
[glyphRecord.offset] = struct.unpack(">H", newData[:2])
|
||||
newData = newData[2:]
|
||||
elif self.metaFlags == 1:
|
||||
[glyphRecord.offset] = struct.unpack(">H", newData[:4])
|
||||
newData = newData[4:]
|
||||
else:
|
||||
assert 0, (
|
||||
"The metaFlags field in the META table header has a value other than 0 or 1 :"
|
||||
+ str(self.metaFlags)
|
||||
)
|
||||
glyphRecord.stringRecs = []
|
||||
newData = data[glyphRecord.offset :]
|
||||
for j in range(glyphRecord.nMetaEntry):
|
||||
stringRec, newData = sstruct.unpack2(
|
||||
METAStringRecordFormat, newData, StringRecord()
|
||||
)
|
||||
if self.metaFlags == 0:
|
||||
[stringRec.offset] = struct.unpack(">H", newData[:2])
|
||||
newData = newData[2:]
|
||||
else:
|
||||
[stringRec.offset] = struct.unpack(">H", newData[:4])
|
||||
newData = newData[4:]
|
||||
stringRec.string = data[
|
||||
stringRec.offset : stringRec.offset + stringRec.stringLen
|
||||
]
|
||||
glyphRecord.stringRecs.append(stringRec)
|
||||
self.glyphRecords.append(glyphRecord)
|
||||
|
||||
def compile(self, ttFont):
|
||||
offsetOK = 0
|
||||
self.nMetaRecs = len(self.glyphRecords)
|
||||
count = 0
|
||||
while offsetOK != 1:
|
||||
count = count + 1
|
||||
if count > 4:
|
||||
pdb.set_trace()
|
||||
metaData = sstruct.pack(METAHeaderFormat, self)
|
||||
stringRecsOffset = len(metaData) + self.nMetaRecs * (
|
||||
6 + 2 * (self.metaFlags & 1)
|
||||
)
|
||||
stringRecSize = 6 + 2 * (self.metaFlags & 1)
|
||||
for glyphRec in self.glyphRecords:
|
||||
glyphRec.offset = stringRecsOffset
|
||||
if (glyphRec.offset > 65535) and ((self.metaFlags & 1) == 0):
|
||||
self.metaFlags = self.metaFlags + 1
|
||||
offsetOK = -1
|
||||
break
|
||||
metaData = metaData + glyphRec.compile(self)
|
||||
stringRecsOffset = stringRecsOffset + (
|
||||
glyphRec.nMetaEntry * stringRecSize
|
||||
)
|
||||
# this will be the String Record offset for the next GlyphRecord.
|
||||
if offsetOK == -1:
|
||||
offsetOK = 0
|
||||
continue
|
||||
|
||||
# metaData now contains the header and all of the GlyphRecords. Its length should bw
|
||||
# the offset to the first StringRecord.
|
||||
stringOffset = stringRecsOffset
|
||||
for glyphRec in self.glyphRecords:
|
||||
assert glyphRec.offset == len(
|
||||
metaData
|
||||
), "Glyph record offset did not compile correctly! for rec:" + str(
|
||||
glyphRec
|
||||
)
|
||||
for stringRec in glyphRec.stringRecs:
|
||||
stringRec.offset = stringOffset
|
||||
if (stringRec.offset > 65535) and ((self.metaFlags & 1) == 0):
|
||||
self.metaFlags = self.metaFlags + 1
|
||||
offsetOK = -1
|
||||
break
|
||||
metaData = metaData + stringRec.compile(self)
|
||||
stringOffset = stringOffset + stringRec.stringLen
|
||||
if offsetOK == -1:
|
||||
offsetOK = 0
|
||||
continue
|
||||
|
||||
if ((self.metaFlags & 1) == 1) and (stringOffset < 65536):
|
||||
self.metaFlags = self.metaFlags - 1
|
||||
continue
|
||||
else:
|
||||
offsetOK = 1
|
||||
|
||||
# metaData now contains the header and all of the GlyphRecords and all of the String Records.
|
||||
# Its length should be the offset to the first string datum.
|
||||
for glyphRec in self.glyphRecords:
|
||||
for stringRec in glyphRec.stringRecs:
|
||||
assert stringRec.offset == len(
|
||||
metaData
|
||||
), "String offset did not compile correctly! for string:" + str(
|
||||
stringRec.string
|
||||
)
|
||||
metaData = metaData + stringRec.string
|
||||
|
||||
return metaData
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment(
|
||||
"Lengths and number of entries in this table will be recalculated by the compiler"
|
||||
)
|
||||
writer.newline()
|
||||
formatstring, names, fixes = sstruct.getformat(METAHeaderFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
for glyphRec in self.glyphRecords:
|
||||
glyphRec.toXML(writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "GlyphRecord":
|
||||
if not hasattr(self, "glyphRecords"):
|
||||
self.glyphRecords = []
|
||||
glyphRec = GlyphRecord()
|
||||
self.glyphRecords.append(glyphRec)
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
glyphRec.fromXML(name, attrs, content, ttFont)
|
||||
glyphRec.offset = -1
|
||||
glyphRec.nMetaEntry = len(glyphRec.stringRecs)
|
||||
else:
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
|
||||
|
||||
class GlyphRecord(object):
|
||||
def __init__(self):
|
||||
self.glyphID = -1
|
||||
self.nMetaEntry = -1
|
||||
self.offset = -1
|
||||
self.stringRecs = []
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag("GlyphRecord")
|
||||
writer.newline()
|
||||
writer.simpletag("glyphID", value=self.glyphID)
|
||||
writer.newline()
|
||||
writer.simpletag("nMetaEntry", value=self.nMetaEntry)
|
||||
writer.newline()
|
||||
for stringRec in self.stringRecs:
|
||||
stringRec.toXML(writer, ttFont)
|
||||
writer.endtag("GlyphRecord")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "StringRecord":
|
||||
stringRec = StringRecord()
|
||||
self.stringRecs.append(stringRec)
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
stringRec.fromXML(name, attrs, content, ttFont)
|
||||
stringRec.stringLen = len(stringRec.string)
|
||||
else:
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
|
||||
def compile(self, parentTable):
|
||||
data = sstruct.pack(METAGlyphRecordFormat, self)
|
||||
if parentTable.metaFlags == 0:
|
||||
datum = struct.pack(">H", self.offset)
|
||||
elif parentTable.metaFlags == 1:
|
||||
datum = struct.pack(">L", self.offset)
|
||||
data = data + datum
|
||||
return data
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"GlyphRecord[ glyphID: "
|
||||
+ str(self.glyphID)
|
||||
+ ", nMetaEntry: "
|
||||
+ str(self.nMetaEntry)
|
||||
+ ", offset: "
|
||||
+ str(self.offset)
|
||||
+ " ]"
|
||||
)
|
||||
|
||||
|
||||
# XXX The following two functions are really broken around UTF-8 vs Unicode
|
||||
|
||||
|
||||
def mapXMLToUTF8(string):
|
||||
uString = str()
|
||||
strLen = len(string)
|
||||
i = 0
|
||||
while i < strLen:
|
||||
prefixLen = 0
|
||||
if string[i : i + 3] == "&#x":
|
||||
prefixLen = 3
|
||||
elif string[i : i + 7] == "&#x":
|
||||
prefixLen = 7
|
||||
if prefixLen:
|
||||
i = i + prefixLen
|
||||
j = i
|
||||
while string[i] != ";":
|
||||
i = i + 1
|
||||
valStr = string[j:i]
|
||||
|
||||
uString = uString + chr(eval("0x" + valStr))
|
||||
else:
|
||||
uString = uString + chr(byteord(string[i]))
|
||||
i = i + 1
|
||||
|
||||
return uString.encode("utf_8")
|
||||
|
||||
|
||||
def mapUTF8toXML(string):
|
||||
uString = string.decode("utf_8")
|
||||
string = ""
|
||||
for uChar in uString:
|
||||
i = ord(uChar)
|
||||
if (i < 0x80) and (i > 0x1F):
|
||||
string = string + uChar
|
||||
else:
|
||||
string = string + "&#x" + hex(i)[2:] + ";"
|
||||
return string
|
||||
|
||||
|
||||
class StringRecord(object):
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag("StringRecord")
|
||||
writer.newline()
|
||||
writer.simpletag("labelID", value=self.labelID)
|
||||
writer.comment(getLabelString(self.labelID))
|
||||
writer.newline()
|
||||
writer.newline()
|
||||
writer.simpletag("string", value=mapUTF8toXML(self.string))
|
||||
writer.newline()
|
||||
writer.endtag("StringRecord")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
value = attrs["value"]
|
||||
if name == "string":
|
||||
self.string = mapXMLToUTF8(value)
|
||||
else:
|
||||
setattr(self, name, safeEval(value))
|
||||
|
||||
def compile(self, parentTable):
|
||||
data = sstruct.pack(METAStringRecordFormat, self)
|
||||
if parentTable.metaFlags == 0:
|
||||
datum = struct.pack(">H", self.offset)
|
||||
elif parentTable.metaFlags == 1:
|
||||
datum = struct.pack(">L", self.offset)
|
||||
data = data + datum
|
||||
return data
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"StringRecord [ labelID: "
|
||||
+ str(self.labelID)
|
||||
+ " aka "
|
||||
+ getLabelString(self.labelID)
|
||||
+ ", offset: "
|
||||
+ str(self.offset)
|
||||
+ ", length: "
|
||||
+ str(self.stringLen)
|
||||
+ ", string: "
|
||||
+ self.string
|
||||
+ " ]"
|
||||
)
|
||||
@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_M_V_A_R_(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,745 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.roundTools import otRound
|
||||
from fontTools.misc.textTools import safeEval, num2binary, binary2num
|
||||
from fontTools.ttLib.tables import DefaultTable
|
||||
import bisect
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
# panose classification
|
||||
|
||||
panoseFormat = """
|
||||
bFamilyType: B
|
||||
bSerifStyle: B
|
||||
bWeight: B
|
||||
bProportion: B
|
||||
bContrast: B
|
||||
bStrokeVariation: B
|
||||
bArmStyle: B
|
||||
bLetterForm: B
|
||||
bMidline: B
|
||||
bXHeight: B
|
||||
"""
|
||||
|
||||
|
||||
class Panose(object):
|
||||
def __init__(self, **kwargs):
|
||||
_, names, _ = sstruct.getformat(panoseFormat)
|
||||
for name in names:
|
||||
setattr(self, name, kwargs.pop(name, 0))
|
||||
for k in kwargs:
|
||||
raise TypeError(f"Panose() got an unexpected keyword argument {k!r}")
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
formatstring, names, fixes = sstruct.getformat(panoseFormat)
|
||||
for name in names:
|
||||
writer.simpletag(name, value=getattr(self, name))
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
|
||||
|
||||
# 'sfnt' OS/2 and Windows Metrics table - 'OS/2'
|
||||
|
||||
OS2_format_0 = """
|
||||
> # big endian
|
||||
version: H # version
|
||||
xAvgCharWidth: h # average character width
|
||||
usWeightClass: H # degree of thickness of strokes
|
||||
usWidthClass: H # aspect ratio
|
||||
fsType: H # type flags
|
||||
ySubscriptXSize: h # subscript horizontal font size
|
||||
ySubscriptYSize: h # subscript vertical font size
|
||||
ySubscriptXOffset: h # subscript x offset
|
||||
ySubscriptYOffset: h # subscript y offset
|
||||
ySuperscriptXSize: h # superscript horizontal font size
|
||||
ySuperscriptYSize: h # superscript vertical font size
|
||||
ySuperscriptXOffset: h # superscript x offset
|
||||
ySuperscriptYOffset: h # superscript y offset
|
||||
yStrikeoutSize: h # strikeout size
|
||||
yStrikeoutPosition: h # strikeout position
|
||||
sFamilyClass: h # font family class and subclass
|
||||
panose: 10s # panose classification number
|
||||
ulUnicodeRange1: L # character range
|
||||
ulUnicodeRange2: L # character range
|
||||
ulUnicodeRange3: L # character range
|
||||
ulUnicodeRange4: L # character range
|
||||
achVendID: 4s # font vendor identification
|
||||
fsSelection: H # font selection flags
|
||||
usFirstCharIndex: H # first unicode character index
|
||||
usLastCharIndex: H # last unicode character index
|
||||
sTypoAscender: h # typographic ascender
|
||||
sTypoDescender: h # typographic descender
|
||||
sTypoLineGap: h # typographic line gap
|
||||
usWinAscent: H # Windows ascender
|
||||
usWinDescent: H # Windows descender
|
||||
"""
|
||||
|
||||
OS2_format_1_addition = """
|
||||
ulCodePageRange1: L
|
||||
ulCodePageRange2: L
|
||||
"""
|
||||
|
||||
OS2_format_2_addition = (
|
||||
OS2_format_1_addition
|
||||
+ """
|
||||
sxHeight: h
|
||||
sCapHeight: h
|
||||
usDefaultChar: H
|
||||
usBreakChar: H
|
||||
usMaxContext: H
|
||||
"""
|
||||
)
|
||||
|
||||
OS2_format_5_addition = (
|
||||
OS2_format_2_addition
|
||||
+ """
|
||||
usLowerOpticalPointSize: H
|
||||
usUpperOpticalPointSize: H
|
||||
"""
|
||||
)
|
||||
|
||||
bigendian = " > # big endian\n"
|
||||
|
||||
OS2_format_1 = OS2_format_0 + OS2_format_1_addition
|
||||
OS2_format_2 = OS2_format_0 + OS2_format_2_addition
|
||||
OS2_format_5 = OS2_format_0 + OS2_format_5_addition
|
||||
OS2_format_1_addition = bigendian + OS2_format_1_addition
|
||||
OS2_format_2_addition = bigendian + OS2_format_2_addition
|
||||
OS2_format_5_addition = bigendian + OS2_format_5_addition
|
||||
|
||||
|
||||
class table_O_S_2f_2(DefaultTable.DefaultTable):
|
||||
"""the OS/2 table"""
|
||||
|
||||
dependencies = ["head"]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, data = sstruct.unpack2(OS2_format_0, data, self)
|
||||
|
||||
if self.version == 1:
|
||||
dummy, data = sstruct.unpack2(OS2_format_1_addition, data, self)
|
||||
elif self.version in (2, 3, 4):
|
||||
dummy, data = sstruct.unpack2(OS2_format_2_addition, data, self)
|
||||
elif self.version == 5:
|
||||
dummy, data = sstruct.unpack2(OS2_format_5_addition, data, self)
|
||||
self.usLowerOpticalPointSize /= 20
|
||||
self.usUpperOpticalPointSize /= 20
|
||||
elif self.version != 0:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError(
|
||||
"unknown format for OS/2 table: version %s" % self.version
|
||||
)
|
||||
if len(data):
|
||||
log.warning("too much 'OS/2' table data")
|
||||
|
||||
self.panose = sstruct.unpack(panoseFormat, self.panose, Panose())
|
||||
|
||||
def compile(self, ttFont):
|
||||
self.updateFirstAndLastCharIndex(ttFont)
|
||||
panose = self.panose
|
||||
head = ttFont["head"]
|
||||
if (self.fsSelection & 1) and not (head.macStyle & 1 << 1):
|
||||
log.warning(
|
||||
"fsSelection bit 0 (italic) and "
|
||||
"head table macStyle bit 1 (italic) should match"
|
||||
)
|
||||
if (self.fsSelection & 1 << 5) and not (head.macStyle & 1):
|
||||
log.warning(
|
||||
"fsSelection bit 5 (bold) and "
|
||||
"head table macStyle bit 0 (bold) should match"
|
||||
)
|
||||
if (self.fsSelection & 1 << 6) and (self.fsSelection & 1 + (1 << 5)):
|
||||
log.warning(
|
||||
"fsSelection bit 6 (regular) is set, "
|
||||
"bits 0 (italic) and 5 (bold) must be clear"
|
||||
)
|
||||
if self.version < 4 and self.fsSelection & 0b1110000000:
|
||||
log.warning(
|
||||
"fsSelection bits 7, 8 and 9 are only defined in "
|
||||
"OS/2 table version 4 and up: version %s",
|
||||
self.version,
|
||||
)
|
||||
self.panose = sstruct.pack(panoseFormat, self.panose)
|
||||
if self.version == 0:
|
||||
data = sstruct.pack(OS2_format_0, self)
|
||||
elif self.version == 1:
|
||||
data = sstruct.pack(OS2_format_1, self)
|
||||
elif self.version in (2, 3, 4):
|
||||
data = sstruct.pack(OS2_format_2, self)
|
||||
elif self.version == 5:
|
||||
d = self.__dict__.copy()
|
||||
d["usLowerOpticalPointSize"] = round(self.usLowerOpticalPointSize * 20)
|
||||
d["usUpperOpticalPointSize"] = round(self.usUpperOpticalPointSize * 20)
|
||||
data = sstruct.pack(OS2_format_5, d)
|
||||
else:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError(
|
||||
"unknown format for OS/2 table: version %s" % self.version
|
||||
)
|
||||
self.panose = panose
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment(
|
||||
"The fields 'usFirstCharIndex' and 'usLastCharIndex'\n"
|
||||
"will be recalculated by the compiler"
|
||||
)
|
||||
writer.newline()
|
||||
if self.version == 1:
|
||||
format = OS2_format_1
|
||||
elif self.version in (2, 3, 4):
|
||||
format = OS2_format_2
|
||||
elif self.version == 5:
|
||||
format = OS2_format_5
|
||||
else:
|
||||
format = OS2_format_0
|
||||
formatstring, names, fixes = sstruct.getformat(format)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
if name == "panose":
|
||||
writer.begintag("panose")
|
||||
writer.newline()
|
||||
value.toXML(writer, ttFont)
|
||||
writer.endtag("panose")
|
||||
elif name in (
|
||||
"ulUnicodeRange1",
|
||||
"ulUnicodeRange2",
|
||||
"ulUnicodeRange3",
|
||||
"ulUnicodeRange4",
|
||||
"ulCodePageRange1",
|
||||
"ulCodePageRange2",
|
||||
):
|
||||
writer.simpletag(name, value=num2binary(value))
|
||||
elif name in ("fsType", "fsSelection"):
|
||||
writer.simpletag(name, value=num2binary(value, 16))
|
||||
elif name == "achVendID":
|
||||
writer.simpletag(name, value=repr(value)[1:-1])
|
||||
else:
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "panose":
|
||||
self.panose = panose = Panose()
|
||||
for element in content:
|
||||
if isinstance(element, tuple):
|
||||
name, attrs, content = element
|
||||
panose.fromXML(name, attrs, content, ttFont)
|
||||
elif name in (
|
||||
"ulUnicodeRange1",
|
||||
"ulUnicodeRange2",
|
||||
"ulUnicodeRange3",
|
||||
"ulUnicodeRange4",
|
||||
"ulCodePageRange1",
|
||||
"ulCodePageRange2",
|
||||
"fsType",
|
||||
"fsSelection",
|
||||
):
|
||||
setattr(self, name, binary2num(attrs["value"]))
|
||||
elif name == "achVendID":
|
||||
setattr(self, name, safeEval("'''" + attrs["value"] + "'''"))
|
||||
else:
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
|
||||
def updateFirstAndLastCharIndex(self, ttFont):
|
||||
if "cmap" not in ttFont:
|
||||
return
|
||||
codes = set()
|
||||
for table in getattr(ttFont["cmap"], "tables", []):
|
||||
if table.isUnicode():
|
||||
codes.update(table.cmap.keys())
|
||||
if codes:
|
||||
minCode = min(codes)
|
||||
maxCode = max(codes)
|
||||
# USHORT cannot hold codepoints greater than 0xFFFF
|
||||
self.usFirstCharIndex = min(0xFFFF, minCode)
|
||||
self.usLastCharIndex = min(0xFFFF, maxCode)
|
||||
|
||||
# misspelled attributes kept for legacy reasons
|
||||
|
||||
@property
|
||||
def usMaxContex(self):
|
||||
return self.usMaxContext
|
||||
|
||||
@usMaxContex.setter
|
||||
def usMaxContex(self, value):
|
||||
self.usMaxContext = value
|
||||
|
||||
@property
|
||||
def fsFirstCharIndex(self):
|
||||
return self.usFirstCharIndex
|
||||
|
||||
@fsFirstCharIndex.setter
|
||||
def fsFirstCharIndex(self, value):
|
||||
self.usFirstCharIndex = value
|
||||
|
||||
@property
|
||||
def fsLastCharIndex(self):
|
||||
return self.usLastCharIndex
|
||||
|
||||
@fsLastCharIndex.setter
|
||||
def fsLastCharIndex(self, value):
|
||||
self.usLastCharIndex = value
|
||||
|
||||
def getUnicodeRanges(self):
|
||||
"""Return the set of 'ulUnicodeRange*' bits currently enabled."""
|
||||
bits = set()
|
||||
ul1, ul2 = self.ulUnicodeRange1, self.ulUnicodeRange2
|
||||
ul3, ul4 = self.ulUnicodeRange3, self.ulUnicodeRange4
|
||||
for i in range(32):
|
||||
if ul1 & (1 << i):
|
||||
bits.add(i)
|
||||
if ul2 & (1 << i):
|
||||
bits.add(i + 32)
|
||||
if ul3 & (1 << i):
|
||||
bits.add(i + 64)
|
||||
if ul4 & (1 << i):
|
||||
bits.add(i + 96)
|
||||
return bits
|
||||
|
||||
def setUnicodeRanges(self, bits):
|
||||
"""Set the 'ulUnicodeRange*' fields to the specified 'bits'."""
|
||||
ul1, ul2, ul3, ul4 = 0, 0, 0, 0
|
||||
for bit in bits:
|
||||
if 0 <= bit < 32:
|
||||
ul1 |= 1 << bit
|
||||
elif 32 <= bit < 64:
|
||||
ul2 |= 1 << (bit - 32)
|
||||
elif 64 <= bit < 96:
|
||||
ul3 |= 1 << (bit - 64)
|
||||
elif 96 <= bit < 123:
|
||||
ul4 |= 1 << (bit - 96)
|
||||
else:
|
||||
raise ValueError("expected 0 <= int <= 122, found: %r" % bit)
|
||||
self.ulUnicodeRange1, self.ulUnicodeRange2 = ul1, ul2
|
||||
self.ulUnicodeRange3, self.ulUnicodeRange4 = ul3, ul4
|
||||
|
||||
def recalcUnicodeRanges(self, ttFont, pruneOnly=False):
|
||||
"""Intersect the codepoints in the font's Unicode cmap subtables with
|
||||
the Unicode block ranges defined in the OpenType specification (v1.7),
|
||||
and set the respective 'ulUnicodeRange*' bits if there is at least ONE
|
||||
intersection.
|
||||
If 'pruneOnly' is True, only clear unused bits with NO intersection.
|
||||
"""
|
||||
unicodes = set()
|
||||
for table in ttFont["cmap"].tables:
|
||||
if table.isUnicode():
|
||||
unicodes.update(table.cmap.keys())
|
||||
if pruneOnly:
|
||||
empty = intersectUnicodeRanges(unicodes, inverse=True)
|
||||
bits = self.getUnicodeRanges() - empty
|
||||
else:
|
||||
bits = intersectUnicodeRanges(unicodes)
|
||||
self.setUnicodeRanges(bits)
|
||||
return bits
|
||||
|
||||
def getCodePageRanges(self):
|
||||
"""Return the set of 'ulCodePageRange*' bits currently enabled."""
|
||||
bits = set()
|
||||
if self.version < 1:
|
||||
return bits
|
||||
ul1, ul2 = self.ulCodePageRange1, self.ulCodePageRange2
|
||||
for i in range(32):
|
||||
if ul1 & (1 << i):
|
||||
bits.add(i)
|
||||
if ul2 & (1 << i):
|
||||
bits.add(i + 32)
|
||||
return bits
|
||||
|
||||
def setCodePageRanges(self, bits):
|
||||
"""Set the 'ulCodePageRange*' fields to the specified 'bits'."""
|
||||
ul1, ul2 = 0, 0
|
||||
for bit in bits:
|
||||
if 0 <= bit < 32:
|
||||
ul1 |= 1 << bit
|
||||
elif 32 <= bit < 64:
|
||||
ul2 |= 1 << (bit - 32)
|
||||
else:
|
||||
raise ValueError(f"expected 0 <= int <= 63, found: {bit:r}")
|
||||
if self.version < 1:
|
||||
self.version = 1
|
||||
self.ulCodePageRange1, self.ulCodePageRange2 = ul1, ul2
|
||||
|
||||
def recalcCodePageRanges(self, ttFont, pruneOnly=False):
|
||||
unicodes = set()
|
||||
for table in ttFont["cmap"].tables:
|
||||
if table.isUnicode():
|
||||
unicodes.update(table.cmap.keys())
|
||||
bits = calcCodePageRanges(unicodes)
|
||||
if pruneOnly:
|
||||
bits &= self.getCodePageRanges()
|
||||
# when no codepage ranges can be enabled, fall back to enabling bit 0
|
||||
# (Latin 1) so that the font works in MS Word:
|
||||
# https://github.com/googlei18n/fontmake/issues/468
|
||||
if not bits:
|
||||
bits = {0}
|
||||
self.setCodePageRanges(bits)
|
||||
return bits
|
||||
|
||||
def recalcAvgCharWidth(self, ttFont):
|
||||
"""Recalculate xAvgCharWidth using metrics from ttFont's 'hmtx' table.
|
||||
|
||||
Set it to 0 if the unlikely event 'hmtx' table is not found.
|
||||
"""
|
||||
avg_width = 0
|
||||
hmtx = ttFont.get("hmtx")
|
||||
if hmtx is not None:
|
||||
widths = [width for width, _ in hmtx.metrics.values() if width > 0]
|
||||
if widths:
|
||||
avg_width = otRound(sum(widths) / len(widths))
|
||||
self.xAvgCharWidth = avg_width
|
||||
return avg_width
|
||||
|
||||
|
||||
# Unicode ranges data from the OpenType OS/2 table specification v1.7
|
||||
|
||||
OS2_UNICODE_RANGES = (
|
||||
(("Basic Latin", (0x0000, 0x007F)),),
|
||||
(("Latin-1 Supplement", (0x0080, 0x00FF)),),
|
||||
(("Latin Extended-A", (0x0100, 0x017F)),),
|
||||
(("Latin Extended-B", (0x0180, 0x024F)),),
|
||||
(
|
||||
("IPA Extensions", (0x0250, 0x02AF)),
|
||||
("Phonetic Extensions", (0x1D00, 0x1D7F)),
|
||||
("Phonetic Extensions Supplement", (0x1D80, 0x1DBF)),
|
||||
),
|
||||
(
|
||||
("Spacing Modifier Letters", (0x02B0, 0x02FF)),
|
||||
("Modifier Tone Letters", (0xA700, 0xA71F)),
|
||||
),
|
||||
(
|
||||
("Combining Diacritical Marks", (0x0300, 0x036F)),
|
||||
("Combining Diacritical Marks Supplement", (0x1DC0, 0x1DFF)),
|
||||
),
|
||||
(("Greek and Coptic", (0x0370, 0x03FF)),),
|
||||
(("Coptic", (0x2C80, 0x2CFF)),),
|
||||
(
|
||||
("Cyrillic", (0x0400, 0x04FF)),
|
||||
("Cyrillic Supplement", (0x0500, 0x052F)),
|
||||
("Cyrillic Extended-A", (0x2DE0, 0x2DFF)),
|
||||
("Cyrillic Extended-B", (0xA640, 0xA69F)),
|
||||
),
|
||||
(("Armenian", (0x0530, 0x058F)),),
|
||||
(("Hebrew", (0x0590, 0x05FF)),),
|
||||
(("Vai", (0xA500, 0xA63F)),),
|
||||
(("Arabic", (0x0600, 0x06FF)), ("Arabic Supplement", (0x0750, 0x077F))),
|
||||
(("NKo", (0x07C0, 0x07FF)),),
|
||||
(("Devanagari", (0x0900, 0x097F)),),
|
||||
(("Bengali", (0x0980, 0x09FF)),),
|
||||
(("Gurmukhi", (0x0A00, 0x0A7F)),),
|
||||
(("Gujarati", (0x0A80, 0x0AFF)),),
|
||||
(("Oriya", (0x0B00, 0x0B7F)),),
|
||||
(("Tamil", (0x0B80, 0x0BFF)),),
|
||||
(("Telugu", (0x0C00, 0x0C7F)),),
|
||||
(("Kannada", (0x0C80, 0x0CFF)),),
|
||||
(("Malayalam", (0x0D00, 0x0D7F)),),
|
||||
(("Thai", (0x0E00, 0x0E7F)),),
|
||||
(("Lao", (0x0E80, 0x0EFF)),),
|
||||
(("Georgian", (0x10A0, 0x10FF)), ("Georgian Supplement", (0x2D00, 0x2D2F))),
|
||||
(("Balinese", (0x1B00, 0x1B7F)),),
|
||||
(("Hangul Jamo", (0x1100, 0x11FF)),),
|
||||
(
|
||||
("Latin Extended Additional", (0x1E00, 0x1EFF)),
|
||||
("Latin Extended-C", (0x2C60, 0x2C7F)),
|
||||
("Latin Extended-D", (0xA720, 0xA7FF)),
|
||||
),
|
||||
(("Greek Extended", (0x1F00, 0x1FFF)),),
|
||||
(
|
||||
("General Punctuation", (0x2000, 0x206F)),
|
||||
("Supplemental Punctuation", (0x2E00, 0x2E7F)),
|
||||
),
|
||||
(("Superscripts And Subscripts", (0x2070, 0x209F)),),
|
||||
(("Currency Symbols", (0x20A0, 0x20CF)),),
|
||||
(("Combining Diacritical Marks For Symbols", (0x20D0, 0x20FF)),),
|
||||
(("Letterlike Symbols", (0x2100, 0x214F)),),
|
||||
(("Number Forms", (0x2150, 0x218F)),),
|
||||
(
|
||||
("Arrows", (0x2190, 0x21FF)),
|
||||
("Supplemental Arrows-A", (0x27F0, 0x27FF)),
|
||||
("Supplemental Arrows-B", (0x2900, 0x297F)),
|
||||
("Miscellaneous Symbols and Arrows", (0x2B00, 0x2BFF)),
|
||||
),
|
||||
(
|
||||
("Mathematical Operators", (0x2200, 0x22FF)),
|
||||
("Supplemental Mathematical Operators", (0x2A00, 0x2AFF)),
|
||||
("Miscellaneous Mathematical Symbols-A", (0x27C0, 0x27EF)),
|
||||
("Miscellaneous Mathematical Symbols-B", (0x2980, 0x29FF)),
|
||||
),
|
||||
(("Miscellaneous Technical", (0x2300, 0x23FF)),),
|
||||
(("Control Pictures", (0x2400, 0x243F)),),
|
||||
(("Optical Character Recognition", (0x2440, 0x245F)),),
|
||||
(("Enclosed Alphanumerics", (0x2460, 0x24FF)),),
|
||||
(("Box Drawing", (0x2500, 0x257F)),),
|
||||
(("Block Elements", (0x2580, 0x259F)),),
|
||||
(("Geometric Shapes", (0x25A0, 0x25FF)),),
|
||||
(("Miscellaneous Symbols", (0x2600, 0x26FF)),),
|
||||
(("Dingbats", (0x2700, 0x27BF)),),
|
||||
(("CJK Symbols And Punctuation", (0x3000, 0x303F)),),
|
||||
(("Hiragana", (0x3040, 0x309F)),),
|
||||
(
|
||||
("Katakana", (0x30A0, 0x30FF)),
|
||||
("Katakana Phonetic Extensions", (0x31F0, 0x31FF)),
|
||||
),
|
||||
(("Bopomofo", (0x3100, 0x312F)), ("Bopomofo Extended", (0x31A0, 0x31BF))),
|
||||
(("Hangul Compatibility Jamo", (0x3130, 0x318F)),),
|
||||
(("Phags-pa", (0xA840, 0xA87F)),),
|
||||
(("Enclosed CJK Letters And Months", (0x3200, 0x32FF)),),
|
||||
(("CJK Compatibility", (0x3300, 0x33FF)),),
|
||||
(("Hangul Syllables", (0xAC00, 0xD7AF)),),
|
||||
(("Non-Plane 0 *", (0xD800, 0xDFFF)),),
|
||||
(("Phoenician", (0x10900, 0x1091F)),),
|
||||
(
|
||||
("CJK Unified Ideographs", (0x4E00, 0x9FFF)),
|
||||
("CJK Radicals Supplement", (0x2E80, 0x2EFF)),
|
||||
("Kangxi Radicals", (0x2F00, 0x2FDF)),
|
||||
("Ideographic Description Characters", (0x2FF0, 0x2FFF)),
|
||||
("CJK Unified Ideographs Extension A", (0x3400, 0x4DBF)),
|
||||
("CJK Unified Ideographs Extension B", (0x20000, 0x2A6DF)),
|
||||
("Kanbun", (0x3190, 0x319F)),
|
||||
),
|
||||
(("Private Use Area (plane 0)", (0xE000, 0xF8FF)),),
|
||||
(
|
||||
("CJK Strokes", (0x31C0, 0x31EF)),
|
||||
("CJK Compatibility Ideographs", (0xF900, 0xFAFF)),
|
||||
("CJK Compatibility Ideographs Supplement", (0x2F800, 0x2FA1F)),
|
||||
),
|
||||
(("Alphabetic Presentation Forms", (0xFB00, 0xFB4F)),),
|
||||
(("Arabic Presentation Forms-A", (0xFB50, 0xFDFF)),),
|
||||
(("Combining Half Marks", (0xFE20, 0xFE2F)),),
|
||||
(
|
||||
("Vertical Forms", (0xFE10, 0xFE1F)),
|
||||
("CJK Compatibility Forms", (0xFE30, 0xFE4F)),
|
||||
),
|
||||
(("Small Form Variants", (0xFE50, 0xFE6F)),),
|
||||
(("Arabic Presentation Forms-B", (0xFE70, 0xFEFF)),),
|
||||
(("Halfwidth And Fullwidth Forms", (0xFF00, 0xFFEF)),),
|
||||
(("Specials", (0xFFF0, 0xFFFF)),),
|
||||
(("Tibetan", (0x0F00, 0x0FFF)),),
|
||||
(("Syriac", (0x0700, 0x074F)),),
|
||||
(("Thaana", (0x0780, 0x07BF)),),
|
||||
(("Sinhala", (0x0D80, 0x0DFF)),),
|
||||
(("Myanmar", (0x1000, 0x109F)),),
|
||||
(
|
||||
("Ethiopic", (0x1200, 0x137F)),
|
||||
("Ethiopic Supplement", (0x1380, 0x139F)),
|
||||
("Ethiopic Extended", (0x2D80, 0x2DDF)),
|
||||
),
|
||||
(("Cherokee", (0x13A0, 0x13FF)),),
|
||||
(("Unified Canadian Aboriginal Syllabics", (0x1400, 0x167F)),),
|
||||
(("Ogham", (0x1680, 0x169F)),),
|
||||
(("Runic", (0x16A0, 0x16FF)),),
|
||||
(("Khmer", (0x1780, 0x17FF)), ("Khmer Symbols", (0x19E0, 0x19FF))),
|
||||
(("Mongolian", (0x1800, 0x18AF)),),
|
||||
(("Braille Patterns", (0x2800, 0x28FF)),),
|
||||
(("Yi Syllables", (0xA000, 0xA48F)), ("Yi Radicals", (0xA490, 0xA4CF))),
|
||||
(
|
||||
("Tagalog", (0x1700, 0x171F)),
|
||||
("Hanunoo", (0x1720, 0x173F)),
|
||||
("Buhid", (0x1740, 0x175F)),
|
||||
("Tagbanwa", (0x1760, 0x177F)),
|
||||
),
|
||||
(("Old Italic", (0x10300, 0x1032F)),),
|
||||
(("Gothic", (0x10330, 0x1034F)),),
|
||||
(("Deseret", (0x10400, 0x1044F)),),
|
||||
(
|
||||
("Byzantine Musical Symbols", (0x1D000, 0x1D0FF)),
|
||||
("Musical Symbols", (0x1D100, 0x1D1FF)),
|
||||
("Ancient Greek Musical Notation", (0x1D200, 0x1D24F)),
|
||||
),
|
||||
(("Mathematical Alphanumeric Symbols", (0x1D400, 0x1D7FF)),),
|
||||
(
|
||||
("Private Use (plane 15)", (0xF0000, 0xFFFFD)),
|
||||
("Private Use (plane 16)", (0x100000, 0x10FFFD)),
|
||||
),
|
||||
(
|
||||
("Variation Selectors", (0xFE00, 0xFE0F)),
|
||||
("Variation Selectors Supplement", (0xE0100, 0xE01EF)),
|
||||
),
|
||||
(("Tags", (0xE0000, 0xE007F)),),
|
||||
(("Limbu", (0x1900, 0x194F)),),
|
||||
(("Tai Le", (0x1950, 0x197F)),),
|
||||
(("New Tai Lue", (0x1980, 0x19DF)),),
|
||||
(("Buginese", (0x1A00, 0x1A1F)),),
|
||||
(("Glagolitic", (0x2C00, 0x2C5F)),),
|
||||
(("Tifinagh", (0x2D30, 0x2D7F)),),
|
||||
(("Yijing Hexagram Symbols", (0x4DC0, 0x4DFF)),),
|
||||
(("Syloti Nagri", (0xA800, 0xA82F)),),
|
||||
(
|
||||
("Linear B Syllabary", (0x10000, 0x1007F)),
|
||||
("Linear B Ideograms", (0x10080, 0x100FF)),
|
||||
("Aegean Numbers", (0x10100, 0x1013F)),
|
||||
),
|
||||
(("Ancient Greek Numbers", (0x10140, 0x1018F)),),
|
||||
(("Ugaritic", (0x10380, 0x1039F)),),
|
||||
(("Old Persian", (0x103A0, 0x103DF)),),
|
||||
(("Shavian", (0x10450, 0x1047F)),),
|
||||
(("Osmanya", (0x10480, 0x104AF)),),
|
||||
(("Cypriot Syllabary", (0x10800, 0x1083F)),),
|
||||
(("Kharoshthi", (0x10A00, 0x10A5F)),),
|
||||
(("Tai Xuan Jing Symbols", (0x1D300, 0x1D35F)),),
|
||||
(
|
||||
("Cuneiform", (0x12000, 0x123FF)),
|
||||
("Cuneiform Numbers and Punctuation", (0x12400, 0x1247F)),
|
||||
),
|
||||
(("Counting Rod Numerals", (0x1D360, 0x1D37F)),),
|
||||
(("Sundanese", (0x1B80, 0x1BBF)),),
|
||||
(("Lepcha", (0x1C00, 0x1C4F)),),
|
||||
(("Ol Chiki", (0x1C50, 0x1C7F)),),
|
||||
(("Saurashtra", (0xA880, 0xA8DF)),),
|
||||
(("Kayah Li", (0xA900, 0xA92F)),),
|
||||
(("Rejang", (0xA930, 0xA95F)),),
|
||||
(("Cham", (0xAA00, 0xAA5F)),),
|
||||
(("Ancient Symbols", (0x10190, 0x101CF)),),
|
||||
(("Phaistos Disc", (0x101D0, 0x101FF)),),
|
||||
(
|
||||
("Carian", (0x102A0, 0x102DF)),
|
||||
("Lycian", (0x10280, 0x1029F)),
|
||||
("Lydian", (0x10920, 0x1093F)),
|
||||
),
|
||||
(("Domino Tiles", (0x1F030, 0x1F09F)), ("Mahjong Tiles", (0x1F000, 0x1F02F))),
|
||||
)
|
||||
|
||||
|
||||
_unicodeStarts = []
|
||||
_unicodeValues = [None]
|
||||
|
||||
|
||||
def _getUnicodeRanges():
|
||||
# build the ranges of codepoints for each unicode range bit, and cache result
|
||||
if not _unicodeStarts:
|
||||
unicodeRanges = [
|
||||
(start, (stop, bit))
|
||||
for bit, blocks in enumerate(OS2_UNICODE_RANGES)
|
||||
for _, (start, stop) in blocks
|
||||
]
|
||||
for start, (stop, bit) in sorted(unicodeRanges):
|
||||
_unicodeStarts.append(start)
|
||||
_unicodeValues.append((stop, bit))
|
||||
return _unicodeStarts, _unicodeValues
|
||||
|
||||
|
||||
def intersectUnicodeRanges(unicodes, inverse=False):
|
||||
"""Intersect a sequence of (int) Unicode codepoints with the Unicode block
|
||||
ranges defined in the OpenType specification v1.7, and return the set of
|
||||
'ulUnicodeRanges' bits for which there is at least ONE intersection.
|
||||
If 'inverse' is True, return the the bits for which there is NO intersection.
|
||||
|
||||
>>> intersectUnicodeRanges([0x0410]) == {9}
|
||||
True
|
||||
>>> intersectUnicodeRanges([0x0410, 0x1F000]) == {9, 57, 122}
|
||||
True
|
||||
>>> intersectUnicodeRanges([0x0410, 0x1F000], inverse=True) == (
|
||||
... set(range(len(OS2_UNICODE_RANGES))) - {9, 57, 122})
|
||||
True
|
||||
"""
|
||||
unicodes = set(unicodes)
|
||||
unicodestarts, unicodevalues = _getUnicodeRanges()
|
||||
bits = set()
|
||||
for code in unicodes:
|
||||
stop, bit = unicodevalues[bisect.bisect(unicodestarts, code)]
|
||||
if code <= stop:
|
||||
bits.add(bit)
|
||||
# The spec says that bit 57 ("Non Plane 0") implies that there's
|
||||
# at least one codepoint beyond the BMP; so I also include all
|
||||
# the non-BMP codepoints here
|
||||
if any(0x10000 <= code < 0x110000 for code in unicodes):
|
||||
bits.add(57)
|
||||
return set(range(len(OS2_UNICODE_RANGES))) - bits if inverse else bits
|
||||
|
||||
|
||||
def calcCodePageRanges(unicodes):
|
||||
"""Given a set of Unicode codepoints (integers), calculate the
|
||||
corresponding OS/2 CodePage range bits.
|
||||
This is a direct translation of FontForge implementation:
|
||||
https://github.com/fontforge/fontforge/blob/7b2c074/fontforge/tottf.c#L3158
|
||||
"""
|
||||
bits = set()
|
||||
hasAscii = set(range(0x20, 0x7E)).issubset(unicodes)
|
||||
hasLineart = ord("┤") in unicodes
|
||||
|
||||
for uni in unicodes:
|
||||
if uni == ord("Þ") and hasAscii:
|
||||
bits.add(0) # Latin 1
|
||||
elif uni == ord("Ľ") and hasAscii:
|
||||
bits.add(1) # Latin 2: Eastern Europe
|
||||
if hasLineart:
|
||||
bits.add(58) # Latin 2
|
||||
elif uni == ord("Б"):
|
||||
bits.add(2) # Cyrillic
|
||||
if ord("Ѕ") in unicodes and hasLineart:
|
||||
bits.add(57) # IBM Cyrillic
|
||||
if ord("╜") in unicodes and hasLineart:
|
||||
bits.add(49) # MS-DOS Russian
|
||||
elif uni == ord("Ά"):
|
||||
bits.add(3) # Greek
|
||||
if hasLineart and ord("½") in unicodes:
|
||||
bits.add(48) # IBM Greek
|
||||
if hasLineart and ord("√") in unicodes:
|
||||
bits.add(60) # Greek, former 437 G
|
||||
elif uni == ord("İ") and hasAscii:
|
||||
bits.add(4) # Turkish
|
||||
if hasLineart:
|
||||
bits.add(56) # IBM turkish
|
||||
elif uni == ord("א"):
|
||||
bits.add(5) # Hebrew
|
||||
if hasLineart and ord("√") in unicodes:
|
||||
bits.add(53) # Hebrew
|
||||
elif uni == ord("ر"):
|
||||
bits.add(6) # Arabic
|
||||
if ord("√") in unicodes:
|
||||
bits.add(51) # Arabic
|
||||
if hasLineart:
|
||||
bits.add(61) # Arabic; ASMO 708
|
||||
elif uni == ord("ŗ") and hasAscii:
|
||||
bits.add(7) # Windows Baltic
|
||||
if hasLineart:
|
||||
bits.add(59) # MS-DOS Baltic
|
||||
elif uni == ord("₫") and hasAscii:
|
||||
bits.add(8) # Vietnamese
|
||||
elif uni == ord("ๅ"):
|
||||
bits.add(16) # Thai
|
||||
elif uni == ord("エ"):
|
||||
bits.add(17) # JIS/Japan
|
||||
elif uni == ord("ㄅ"):
|
||||
bits.add(18) # Chinese: Simplified
|
||||
elif uni == ord("ㄱ"):
|
||||
bits.add(19) # Korean wansung
|
||||
elif uni == ord("央"):
|
||||
bits.add(20) # Chinese: Traditional
|
||||
elif uni == ord("곴"):
|
||||
bits.add(21) # Korean Johab
|
||||
elif uni == ord("♥") and hasAscii:
|
||||
bits.add(30) # OEM Character Set
|
||||
# TODO: Symbol bit has a special meaning (check the spec), we need
|
||||
# to confirm if this is wanted by default.
|
||||
# elif chr(0xF000) <= char <= chr(0xF0FF):
|
||||
# codepageRanges.add(31) # Symbol Character Set
|
||||
elif uni == ord("þ") and hasAscii and hasLineart:
|
||||
bits.add(54) # MS-DOS Icelandic
|
||||
elif uni == ord("╚") and hasAscii:
|
||||
bits.add(62) # WE/Latin 1
|
||||
bits.add(63) # US
|
||||
elif hasAscii and hasLineart and ord("√") in unicodes:
|
||||
if uni == ord("Å"):
|
||||
bits.add(50) # MS-DOS Nordic
|
||||
elif uni == ord("é"):
|
||||
bits.add(52) # MS-DOS Canadian French
|
||||
elif uni == ord("õ"):
|
||||
bits.add(55) # MS-DOS Portuguese
|
||||
|
||||
if hasAscii and ord("‰") in unicodes and ord("∑") in unicodes:
|
||||
bits.add(29) # Macintosh Character Set (US Roman)
|
||||
|
||||
return bits
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest, sys
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
@ -0,0 +1,92 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval
|
||||
from . import DefaultTable
|
||||
|
||||
SINGFormat = """
|
||||
> # big endian
|
||||
tableVersionMajor: H
|
||||
tableVersionMinor: H
|
||||
glyphletVersion: H
|
||||
permissions: h
|
||||
mainGID: H
|
||||
unitsPerEm: H
|
||||
vertAdvance: h
|
||||
vertOrigin: h
|
||||
uniqueName: 28s
|
||||
METAMD5: 16s
|
||||
nameLength: 1s
|
||||
"""
|
||||
# baseGlyphName is a byte string which follows the record above.
|
||||
|
||||
|
||||
class table_S_I_N_G_(DefaultTable.DefaultTable):
|
||||
dependencies = []
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, rest = sstruct.unpack2(SINGFormat, data, self)
|
||||
self.uniqueName = self.decompileUniqueName(self.uniqueName)
|
||||
self.nameLength = byteord(self.nameLength)
|
||||
assert len(rest) == self.nameLength
|
||||
self.baseGlyphName = tostr(rest)
|
||||
|
||||
rawMETAMD5 = self.METAMD5
|
||||
self.METAMD5 = "[" + hex(byteord(self.METAMD5[0]))
|
||||
for char in rawMETAMD5[1:]:
|
||||
self.METAMD5 = self.METAMD5 + ", " + hex(byteord(char))
|
||||
self.METAMD5 = self.METAMD5 + "]"
|
||||
|
||||
def decompileUniqueName(self, data):
|
||||
name = ""
|
||||
for char in data:
|
||||
val = byteord(char)
|
||||
if val == 0:
|
||||
break
|
||||
if (val > 31) or (val < 128):
|
||||
name += chr(val)
|
||||
else:
|
||||
octString = oct(val)
|
||||
if len(octString) > 3:
|
||||
octString = octString[1:] # chop off that leading zero.
|
||||
elif len(octString) < 3:
|
||||
octString.zfill(3)
|
||||
name += "\\" + octString
|
||||
return name
|
||||
|
||||
def compile(self, ttFont):
|
||||
d = self.__dict__.copy()
|
||||
d["nameLength"] = bytechr(len(self.baseGlyphName))
|
||||
d["uniqueName"] = self.compilecompileUniqueName(self.uniqueName, 28)
|
||||
METAMD5List = eval(self.METAMD5)
|
||||
d["METAMD5"] = b""
|
||||
for val in METAMD5List:
|
||||
d["METAMD5"] += bytechr(val)
|
||||
assert len(d["METAMD5"]) == 16, "Failed to pack 16 byte MD5 hash in SING table"
|
||||
data = sstruct.pack(SINGFormat, d)
|
||||
data = data + tobytes(self.baseGlyphName)
|
||||
return data
|
||||
|
||||
def compilecompileUniqueName(self, name, length):
|
||||
nameLen = len(name)
|
||||
if length <= nameLen:
|
||||
name = name[: length - 1] + "\000"
|
||||
else:
|
||||
name += (nameLen - length) * "\000"
|
||||
return name
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment("Most of this table will be recalculated by the compiler")
|
||||
writer.newline()
|
||||
formatstring, names, fixes = sstruct.getformat(SINGFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
writer.simpletag("baseGlyphName", value=self.baseGlyphName)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
value = attrs["value"]
|
||||
if name in ["uniqueName", "METAMD5", "baseGlyphName"]:
|
||||
setattr(self, name, value)
|
||||
else:
|
||||
setattr(self, name, safeEval(value))
|
||||
@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_S_T_A_T_(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,215 @@
|
||||
"""Compiles/decompiles SVG table.
|
||||
|
||||
https://docs.microsoft.com/en-us/typography/opentype/spec/svg
|
||||
|
||||
The XML format is:
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<SVG>
|
||||
<svgDoc endGlyphID="1" startGlyphID="1">
|
||||
<![CDATA[ <complete SVG doc> ]]
|
||||
</svgDoc>
|
||||
...
|
||||
<svgDoc endGlyphID="n" startGlyphID="m">
|
||||
<![CDATA[ <complete SVG doc> ]]
|
||||
</svgDoc>
|
||||
</SVG>
|
||||
"""
|
||||
|
||||
from fontTools.misc.textTools import bytesjoin, safeEval, strjoin, tobytes, tostr
|
||||
from fontTools.misc import sstruct
|
||||
from . import DefaultTable
|
||||
from collections.abc import Sequence
|
||||
from dataclasses import dataclass, astuple
|
||||
from io import BytesIO
|
||||
import struct
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
SVG_format_0 = """
|
||||
> # big endian
|
||||
version: H
|
||||
offsetToSVGDocIndex: L
|
||||
reserved: L
|
||||
"""
|
||||
|
||||
SVG_format_0Size = sstruct.calcsize(SVG_format_0)
|
||||
|
||||
doc_index_entry_format_0 = """
|
||||
> # big endian
|
||||
startGlyphID: H
|
||||
endGlyphID: H
|
||||
svgDocOffset: L
|
||||
svgDocLength: L
|
||||
"""
|
||||
|
||||
doc_index_entry_format_0Size = sstruct.calcsize(doc_index_entry_format_0)
|
||||
|
||||
|
||||
class table_S_V_G_(DefaultTable.DefaultTable):
|
||||
def decompile(self, data, ttFont):
|
||||
self.docList = []
|
||||
# Version 0 is the standardized version of the table; and current.
|
||||
# https://www.microsoft.com/typography/otspec/svg.htm
|
||||
sstruct.unpack(SVG_format_0, data[:SVG_format_0Size], self)
|
||||
if self.version != 0:
|
||||
log.warning(
|
||||
"Unknown SVG table version '%s'. Decompiling as version 0.",
|
||||
self.version,
|
||||
)
|
||||
# read in SVG Documents Index
|
||||
# data starts with the first entry of the entry list.
|
||||
pos = subTableStart = self.offsetToSVGDocIndex
|
||||
self.numEntries = struct.unpack(">H", data[pos : pos + 2])[0]
|
||||
pos += 2
|
||||
if self.numEntries > 0:
|
||||
data2 = data[pos:]
|
||||
entries = []
|
||||
for i in range(self.numEntries):
|
||||
record_data = data2[
|
||||
i
|
||||
* doc_index_entry_format_0Size : (i + 1)
|
||||
* doc_index_entry_format_0Size
|
||||
]
|
||||
docIndexEntry = sstruct.unpack(
|
||||
doc_index_entry_format_0, record_data, DocumentIndexEntry()
|
||||
)
|
||||
entries.append(docIndexEntry)
|
||||
|
||||
for entry in entries:
|
||||
start = entry.svgDocOffset + subTableStart
|
||||
end = start + entry.svgDocLength
|
||||
doc = data[start:end]
|
||||
compressed = False
|
||||
if doc.startswith(b"\x1f\x8b"):
|
||||
import gzip
|
||||
|
||||
bytesIO = BytesIO(doc)
|
||||
with gzip.GzipFile(None, "r", fileobj=bytesIO) as gunzipper:
|
||||
doc = gunzipper.read()
|
||||
del bytesIO
|
||||
compressed = True
|
||||
doc = tostr(doc, "utf_8")
|
||||
self.docList.append(
|
||||
SVGDocument(doc, entry.startGlyphID, entry.endGlyphID, compressed)
|
||||
)
|
||||
|
||||
def compile(self, ttFont):
|
||||
version = 0
|
||||
offsetToSVGDocIndex = (
|
||||
SVG_format_0Size # I start the SVGDocIndex right after the header.
|
||||
)
|
||||
# get SGVDoc info.
|
||||
docList = []
|
||||
entryList = []
|
||||
numEntries = len(self.docList)
|
||||
datum = struct.pack(">H", numEntries)
|
||||
entryList.append(datum)
|
||||
curOffset = len(datum) + doc_index_entry_format_0Size * numEntries
|
||||
seenDocs = {}
|
||||
allCompressed = getattr(self, "compressed", False)
|
||||
for i, doc in enumerate(self.docList):
|
||||
if isinstance(doc, (list, tuple)):
|
||||
doc = SVGDocument(*doc)
|
||||
self.docList[i] = doc
|
||||
docBytes = tobytes(doc.data, encoding="utf_8")
|
||||
if (allCompressed or doc.compressed) and not docBytes.startswith(
|
||||
b"\x1f\x8b"
|
||||
):
|
||||
import gzip
|
||||
|
||||
bytesIO = BytesIO()
|
||||
# mtime=0 strips the useless timestamp and makes gzip output reproducible;
|
||||
# equivalent to `gzip -n`
|
||||
with gzip.GzipFile(None, "w", fileobj=bytesIO, mtime=0) as gzipper:
|
||||
gzipper.write(docBytes)
|
||||
gzipped = bytesIO.getvalue()
|
||||
if len(gzipped) < len(docBytes):
|
||||
docBytes = gzipped
|
||||
del gzipped, bytesIO
|
||||
docLength = len(docBytes)
|
||||
if docBytes in seenDocs:
|
||||
docOffset = seenDocs[docBytes]
|
||||
else:
|
||||
docOffset = curOffset
|
||||
curOffset += docLength
|
||||
seenDocs[docBytes] = docOffset
|
||||
docList.append(docBytes)
|
||||
entry = struct.pack(
|
||||
">HHLL", doc.startGlyphID, doc.endGlyphID, docOffset, docLength
|
||||
)
|
||||
entryList.append(entry)
|
||||
entryList.extend(docList)
|
||||
svgDocData = bytesjoin(entryList)
|
||||
|
||||
reserved = 0
|
||||
header = struct.pack(">HLL", version, offsetToSVGDocIndex, reserved)
|
||||
data = [header, svgDocData]
|
||||
data = bytesjoin(data)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
for i, doc in enumerate(self.docList):
|
||||
if isinstance(doc, (list, tuple)):
|
||||
doc = SVGDocument(*doc)
|
||||
self.docList[i] = doc
|
||||
attrs = {"startGlyphID": doc.startGlyphID, "endGlyphID": doc.endGlyphID}
|
||||
if doc.compressed:
|
||||
attrs["compressed"] = 1
|
||||
writer.begintag("svgDoc", **attrs)
|
||||
writer.newline()
|
||||
writer.writecdata(doc.data)
|
||||
writer.newline()
|
||||
writer.endtag("svgDoc")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "svgDoc":
|
||||
if not hasattr(self, "docList"):
|
||||
self.docList = []
|
||||
doc = strjoin(content)
|
||||
doc = doc.strip()
|
||||
startGID = int(attrs["startGlyphID"])
|
||||
endGID = int(attrs["endGlyphID"])
|
||||
compressed = bool(safeEval(attrs.get("compressed", "0")))
|
||||
self.docList.append(SVGDocument(doc, startGID, endGID, compressed))
|
||||
else:
|
||||
log.warning("Unknown %s %s", name, content)
|
||||
|
||||
|
||||
class DocumentIndexEntry(object):
|
||||
def __init__(self):
|
||||
self.startGlyphID = None # USHORT
|
||||
self.endGlyphID = None # USHORT
|
||||
self.svgDocOffset = None # ULONG
|
||||
self.svgDocLength = None # ULONG
|
||||
|
||||
def __repr__(self):
|
||||
return (
|
||||
"startGlyphID: %s, endGlyphID: %s, svgDocOffset: %s, svgDocLength: %s"
|
||||
% (self.startGlyphID, self.endGlyphID, self.svgDocOffset, self.svgDocLength)
|
||||
)
|
||||
|
||||
|
||||
@dataclass
|
||||
class SVGDocument(Sequence):
|
||||
data: str
|
||||
startGlyphID: int
|
||||
endGlyphID: int
|
||||
compressed: bool = False
|
||||
|
||||
# Previously, the SVG table's docList attribute contained a lists of 3 items:
|
||||
# [doc, startGlyphID, endGlyphID]; later, we added a `compressed` attribute.
|
||||
# For backward compatibility with code that depends of them being sequences of
|
||||
# fixed length=3, we subclass the Sequence abstract base class and pretend only
|
||||
# the first three items are present. 'compressed' is only accessible via named
|
||||
# attribute lookup like regular dataclasses: i.e. `doc.compressed`, not `doc[3]`
|
||||
def __getitem__(self, index):
|
||||
return astuple(self)[:3][index]
|
||||
|
||||
def __len__(self):
|
||||
return 3
|
||||
1037
venv/lib/python3.12/site-packages/fontTools/ttLib/tables/S__i_l_f.py
Normal file
1037
venv/lib/python3.12/site-packages/fontTools/ttLib/tables/S__i_l_f.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,87 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.fixedTools import floatToFixedToStr
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
from . import grUtils
|
||||
import struct
|
||||
|
||||
Sill_hdr = """
|
||||
>
|
||||
version: 16.16F
|
||||
"""
|
||||
|
||||
|
||||
class table_S__i_l_l(DefaultTable.DefaultTable):
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.langs = {}
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
(_, data) = sstruct.unpack2(Sill_hdr, data, self)
|
||||
self.version = float(floatToFixedToStr(self.version, precisionBits=16))
|
||||
(numLangs,) = struct.unpack(">H", data[:2])
|
||||
data = data[8:]
|
||||
maxsetting = 0
|
||||
langinfo = []
|
||||
for i in range(numLangs):
|
||||
(langcode, numsettings, offset) = struct.unpack(
|
||||
">4sHH", data[i * 8 : (i + 1) * 8]
|
||||
)
|
||||
offset = int(offset / 8) - (numLangs + 1)
|
||||
langcode = langcode.replace(b"\000", b"")
|
||||
langinfo.append((langcode.decode("utf-8"), numsettings, offset))
|
||||
maxsetting = max(maxsetting, offset + numsettings)
|
||||
data = data[numLangs * 8 :]
|
||||
finfo = []
|
||||
for i in range(maxsetting):
|
||||
(fid, val, _) = struct.unpack(">LHH", data[i * 8 : (i + 1) * 8])
|
||||
finfo.append((fid, val))
|
||||
self.langs = {}
|
||||
for c, n, o in langinfo:
|
||||
self.langs[c] = []
|
||||
for i in range(o, o + n):
|
||||
self.langs[c].append(finfo[i])
|
||||
|
||||
def compile(self, ttFont):
|
||||
ldat = b""
|
||||
fdat = b""
|
||||
offset = len(self.langs)
|
||||
for c, inf in sorted(self.langs.items()):
|
||||
ldat += struct.pack(">4sHH", c.encode("utf8"), len(inf), 8 * offset + 20)
|
||||
for fid, val in inf:
|
||||
fdat += struct.pack(">LHH", fid, val, 0)
|
||||
offset += len(inf)
|
||||
ldat += struct.pack(">LHH", 0x80808080, 0, 8 * offset + 20)
|
||||
return (
|
||||
sstruct.pack(Sill_hdr, self)
|
||||
+ grUtils.bininfo(len(self.langs))
|
||||
+ ldat
|
||||
+ fdat
|
||||
)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("version", version=self.version)
|
||||
writer.newline()
|
||||
for c, inf in sorted(self.langs.items()):
|
||||
writer.begintag("lang", name=c)
|
||||
writer.newline()
|
||||
for fid, val in inf:
|
||||
writer.simpletag("feature", fid=grUtils.num2tag(fid), val=val)
|
||||
writer.newline()
|
||||
writer.endtag("lang")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
self.version = float(safeEval(attrs["version"]))
|
||||
elif name == "lang":
|
||||
c = attrs["name"]
|
||||
self.langs[c] = []
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
tag, a, subcontent = element
|
||||
if tag == "feature":
|
||||
self.langs[c].append(
|
||||
(grUtils.tag2num(a["fid"]), int(safeEval(a["val"])))
|
||||
)
|
||||
@ -0,0 +1,5 @@
|
||||
from .T_S_I_V_ import table_T_S_I_V_
|
||||
|
||||
|
||||
class table_T_S_I_B_(table_T_S_I_V_):
|
||||
pass
|
||||
@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_T_S_I_C_(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,5 @@
|
||||
from .T_S_I_V_ import table_T_S_I_V_
|
||||
|
||||
|
||||
class table_T_S_I_D_(table_T_S_I_V_):
|
||||
pass
|
||||
@ -0,0 +1,5 @@
|
||||
from .T_S_I_V_ import table_T_S_I_V_
|
||||
|
||||
|
||||
class table_T_S_I_J_(table_T_S_I_V_):
|
||||
pass
|
||||
@ -0,0 +1,5 @@
|
||||
from .T_S_I_V_ import table_T_S_I_V_
|
||||
|
||||
|
||||
class table_T_S_I_P_(table_T_S_I_V_):
|
||||
pass
|
||||
@ -0,0 +1,5 @@
|
||||
from .T_S_I_V_ import table_T_S_I_V_
|
||||
|
||||
|
||||
class table_T_S_I_S_(table_T_S_I_V_):
|
||||
pass
|
||||
@ -0,0 +1,20 @@
|
||||
from fontTools.misc.textTools import strjoin, tobytes, tostr
|
||||
from . import asciiTable
|
||||
|
||||
|
||||
class table_T_S_I_V_(asciiTable.asciiTable):
|
||||
def toXML(self, writer, ttFont):
|
||||
data = tostr(self.data)
|
||||
# removing null bytes. XXX needed??
|
||||
data = data.split("\0")
|
||||
data = strjoin(data)
|
||||
writer.begintag("source")
|
||||
writer.newline()
|
||||
writer.write_noindent(data.replace("\r", "\n"))
|
||||
writer.newline()
|
||||
writer.endtag("source")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
lines = strjoin(content).split("\n")
|
||||
self.data = tobytes("\r".join(lines[1:-1]))
|
||||
@ -0,0 +1,57 @@
|
||||
""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its hinting source data.
|
||||
|
||||
TSI0 is the index table containing the lengths and offsets for the glyph
|
||||
programs and 'extra' programs ('fpgm', 'prep', and 'cvt') that are contained
|
||||
in the TSI1 table.
|
||||
"""
|
||||
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
|
||||
tsi0Format = ">HHL"
|
||||
|
||||
|
||||
def fixlongs(glyphID, textLength, textOffset):
|
||||
return int(glyphID), int(textLength), textOffset
|
||||
|
||||
|
||||
class table_T_S_I__0(DefaultTable.DefaultTable):
|
||||
dependencies = ["TSI1"]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
indices = []
|
||||
size = struct.calcsize(tsi0Format)
|
||||
for i in range(numGlyphs + 5):
|
||||
glyphID, textLength, textOffset = fixlongs(
|
||||
*struct.unpack(tsi0Format, data[:size])
|
||||
)
|
||||
indices.append((glyphID, textLength, textOffset))
|
||||
data = data[size:]
|
||||
assert len(data) == 0
|
||||
assert indices[-5] == (0xFFFE, 0, 0xABFC1F34), "bad magic number"
|
||||
self.indices = indices[:-5]
|
||||
self.extra_indices = indices[-4:]
|
||||
|
||||
def compile(self, ttFont):
|
||||
if not hasattr(self, "indices"):
|
||||
# We have no corresponding table (TSI1 or TSI3); let's return
|
||||
# no data, which effectively means "ignore us".
|
||||
return b""
|
||||
data = b""
|
||||
for index, textLength, textOffset in self.indices:
|
||||
data = data + struct.pack(tsi0Format, index, textLength, textOffset)
|
||||
data = data + struct.pack(tsi0Format, 0xFFFE, 0, 0xABFC1F34)
|
||||
for index, textLength, textOffset in self.extra_indices:
|
||||
data = data + struct.pack(tsi0Format, index, textLength, textOffset)
|
||||
return data
|
||||
|
||||
def set(self, indices, extra_indices):
|
||||
# gets called by 'TSI1' or 'TSI3'
|
||||
self.indices = indices
|
||||
self.extra_indices = extra_indices
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment("This table will be calculated by the compiler")
|
||||
writer.newline()
|
||||
@ -0,0 +1,164 @@
|
||||
""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its hinting source data.
|
||||
|
||||
TSI1 contains the text of the glyph programs in the form of low-level assembly
|
||||
code, as well as the 'extra' programs 'fpgm', 'ppgm' (i.e. 'prep'), and 'cvt'.
|
||||
"""
|
||||
|
||||
from . import DefaultTable
|
||||
from fontTools.misc.loggingTools import LogMixin
|
||||
from fontTools.misc.textTools import strjoin, tobytes, tostr
|
||||
|
||||
|
||||
class table_T_S_I__1(LogMixin, DefaultTable.DefaultTable):
|
||||
extras = {0xFFFA: "ppgm", 0xFFFB: "cvt", 0xFFFC: "reserved", 0xFFFD: "fpgm"}
|
||||
|
||||
indextable = "TSI0"
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
totalLength = len(data)
|
||||
indextable = ttFont[self.indextable]
|
||||
for indices, isExtra in zip(
|
||||
(indextable.indices, indextable.extra_indices), (False, True)
|
||||
):
|
||||
programs = {}
|
||||
for i, (glyphID, textLength, textOffset) in enumerate(indices):
|
||||
if isExtra:
|
||||
name = self.extras[glyphID]
|
||||
else:
|
||||
name = ttFont.getGlyphName(glyphID)
|
||||
if textOffset > totalLength:
|
||||
self.log.warning("textOffset > totalLength; %r skipped" % name)
|
||||
continue
|
||||
if textLength < 0x8000:
|
||||
# If the length stored in the record is less than 32768, then use
|
||||
# that as the length of the record.
|
||||
pass
|
||||
elif textLength == 0x8000:
|
||||
# If the length is 32768, compute the actual length as follows:
|
||||
isLast = i == (len(indices) - 1)
|
||||
if isLast:
|
||||
if isExtra:
|
||||
# For the last "extra" record (the very last record of the
|
||||
# table), the length is the difference between the total
|
||||
# length of the TSI1 table and the textOffset of the final
|
||||
# record.
|
||||
nextTextOffset = totalLength
|
||||
else:
|
||||
# For the last "normal" record (the last record just prior
|
||||
# to the record containing the "magic number"), the length
|
||||
# is the difference between the textOffset of the record
|
||||
# following the "magic number" (0xFFFE) record (i.e. the
|
||||
# first "extra" record), and the textOffset of the last
|
||||
# "normal" record.
|
||||
nextTextOffset = indextable.extra_indices[0][2]
|
||||
else:
|
||||
# For all other records with a length of 0x8000, the length is
|
||||
# the difference between the textOffset of the record in
|
||||
# question and the textOffset of the next record.
|
||||
nextTextOffset = indices[i + 1][2]
|
||||
assert nextTextOffset >= textOffset, "entries not sorted by offset"
|
||||
if nextTextOffset > totalLength:
|
||||
self.log.warning(
|
||||
"nextTextOffset > totalLength; %r truncated" % name
|
||||
)
|
||||
nextTextOffset = totalLength
|
||||
textLength = nextTextOffset - textOffset
|
||||
else:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError(
|
||||
"%r textLength (%d) must not be > 32768" % (name, textLength)
|
||||
)
|
||||
text = data[textOffset : textOffset + textLength]
|
||||
assert len(text) == textLength
|
||||
text = tostr(text, encoding="utf-8")
|
||||
if text:
|
||||
programs[name] = text
|
||||
if isExtra:
|
||||
self.extraPrograms = programs
|
||||
else:
|
||||
self.glyphPrograms = programs
|
||||
|
||||
def compile(self, ttFont):
|
||||
if not hasattr(self, "glyphPrograms"):
|
||||
self.glyphPrograms = {}
|
||||
self.extraPrograms = {}
|
||||
data = b""
|
||||
indextable = ttFont[self.indextable]
|
||||
glyphNames = ttFont.getGlyphOrder()
|
||||
|
||||
indices = []
|
||||
for i in range(len(glyphNames)):
|
||||
if len(data) % 2:
|
||||
data = (
|
||||
data + b"\015"
|
||||
) # align on 2-byte boundaries, fill with return chars. Yum.
|
||||
name = glyphNames[i]
|
||||
if name in self.glyphPrograms:
|
||||
text = tobytes(self.glyphPrograms[name], encoding="utf-8")
|
||||
else:
|
||||
text = b""
|
||||
textLength = len(text)
|
||||
if textLength >= 0x8000:
|
||||
textLength = 0x8000
|
||||
indices.append((i, textLength, len(data)))
|
||||
data = data + text
|
||||
|
||||
extra_indices = []
|
||||
codes = sorted(self.extras.items())
|
||||
for i in range(len(codes)):
|
||||
if len(data) % 2:
|
||||
data = (
|
||||
data + b"\015"
|
||||
) # align on 2-byte boundaries, fill with return chars.
|
||||
code, name = codes[i]
|
||||
if name in self.extraPrograms:
|
||||
text = tobytes(self.extraPrograms[name], encoding="utf-8")
|
||||
else:
|
||||
text = b""
|
||||
textLength = len(text)
|
||||
if textLength >= 0x8000:
|
||||
textLength = 0x8000
|
||||
extra_indices.append((code, textLength, len(data)))
|
||||
data = data + text
|
||||
indextable.set(indices, extra_indices)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
names = sorted(self.glyphPrograms.keys())
|
||||
writer.newline()
|
||||
for name in names:
|
||||
text = self.glyphPrograms[name]
|
||||
if not text:
|
||||
continue
|
||||
writer.begintag("glyphProgram", name=name)
|
||||
writer.newline()
|
||||
writer.write_noindent(text.replace("\r", "\n"))
|
||||
writer.newline()
|
||||
writer.endtag("glyphProgram")
|
||||
writer.newline()
|
||||
writer.newline()
|
||||
extra_names = sorted(self.extraPrograms.keys())
|
||||
for name in extra_names:
|
||||
text = self.extraPrograms[name]
|
||||
if not text:
|
||||
continue
|
||||
writer.begintag("extraProgram", name=name)
|
||||
writer.newline()
|
||||
writer.write_noindent(text.replace("\r", "\n"))
|
||||
writer.newline()
|
||||
writer.endtag("extraProgram")
|
||||
writer.newline()
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if not hasattr(self, "glyphPrograms"):
|
||||
self.glyphPrograms = {}
|
||||
self.extraPrograms = {}
|
||||
lines = strjoin(content).replace("\r", "\n").split("\n")
|
||||
text = "\r".join(lines[1:-1])
|
||||
if name == "glyphProgram":
|
||||
self.glyphPrograms[attrs["name"]] = text
|
||||
elif name == "extraProgram":
|
||||
self.extraPrograms[attrs["name"]] = text
|
||||
@ -0,0 +1,15 @@
|
||||
""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its hinting source data.
|
||||
|
||||
TSI2 is the index table containing the lengths and offsets for the glyph
|
||||
programs that are contained in the TSI3 table. It uses the same format as
|
||||
the TSI0 table.
|
||||
"""
|
||||
|
||||
from fontTools import ttLib
|
||||
|
||||
superclass = ttLib.getTableClass("TSI0")
|
||||
|
||||
|
||||
class table_T_S_I__2(superclass):
|
||||
dependencies = ["TSI3"]
|
||||
@ -0,0 +1,20 @@
|
||||
""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its hinting source data.
|
||||
|
||||
TSI3 contains the text of the glyph programs in the form of 'VTTTalk' code.
|
||||
"""
|
||||
|
||||
from fontTools import ttLib
|
||||
|
||||
superclass = ttLib.getTableClass("TSI1")
|
||||
|
||||
|
||||
class table_T_S_I__3(superclass):
|
||||
extras = {
|
||||
0xFFFA: "reserved0",
|
||||
0xFFFB: "reserved1",
|
||||
0xFFFC: "reserved2",
|
||||
0xFFFD: "reserved3",
|
||||
}
|
||||
|
||||
indextable = "TSI2"
|
||||
@ -0,0 +1,47 @@
|
||||
""" TSI{0,1,2,3,5} are private tables used by Microsoft Visual TrueType (VTT)
|
||||
tool to store its hinting source data.
|
||||
|
||||
TSI5 contains the VTT character groups.
|
||||
"""
|
||||
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
import sys
|
||||
import array
|
||||
|
||||
|
||||
class table_T_S_I__5(DefaultTable.DefaultTable):
|
||||
def decompile(self, data, ttFont):
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
assert len(data) == 2 * numGlyphs
|
||||
a = array.array("H")
|
||||
a.frombytes(data)
|
||||
if sys.byteorder != "big":
|
||||
a.byteswap()
|
||||
self.glyphGrouping = {}
|
||||
for i in range(numGlyphs):
|
||||
self.glyphGrouping[ttFont.getGlyphName(i)] = a[i]
|
||||
|
||||
def compile(self, ttFont):
|
||||
glyphNames = ttFont.getGlyphOrder()
|
||||
a = array.array("H")
|
||||
for i in range(len(glyphNames)):
|
||||
a.append(self.glyphGrouping.get(glyphNames[i], 0))
|
||||
if sys.byteorder != "big":
|
||||
a.byteswap()
|
||||
return a.tobytes()
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
names = sorted(self.glyphGrouping.keys())
|
||||
for glyphName in names:
|
||||
writer.simpletag(
|
||||
"glyphgroup", name=glyphName, value=self.glyphGrouping[glyphName]
|
||||
)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if not hasattr(self, "glyphGrouping"):
|
||||
self.glyphGrouping = {}
|
||||
if name != "glyphgroup":
|
||||
return
|
||||
self.glyphGrouping[attrs["name"]] = safeEval(attrs["value"])
|
||||
@ -0,0 +1,5 @@
|
||||
from . import asciiTable
|
||||
|
||||
|
||||
class table_T_T_F_A_(asciiTable.asciiTable):
|
||||
pass
|
||||
@ -0,0 +1,884 @@
|
||||
from fontTools.misc.fixedTools import (
|
||||
fixedToFloat as fi2fl,
|
||||
floatToFixed as fl2fi,
|
||||
floatToFixedToStr as fl2str,
|
||||
strToFixedToFloat as str2fl,
|
||||
otRound,
|
||||
)
|
||||
from fontTools.misc.textTools import safeEval
|
||||
import array
|
||||
from collections import Counter, defaultdict
|
||||
import io
|
||||
import logging
|
||||
import struct
|
||||
import sys
|
||||
|
||||
|
||||
# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm
|
||||
|
||||
EMBEDDED_PEAK_TUPLE = 0x8000
|
||||
INTERMEDIATE_REGION = 0x4000
|
||||
PRIVATE_POINT_NUMBERS = 0x2000
|
||||
|
||||
DELTAS_ARE_ZERO = 0x80
|
||||
DELTAS_ARE_WORDS = 0x40
|
||||
DELTAS_ARE_LONGS = 0xC0
|
||||
DELTAS_SIZE_MASK = 0xC0
|
||||
DELTA_RUN_COUNT_MASK = 0x3F
|
||||
|
||||
POINTS_ARE_WORDS = 0x80
|
||||
POINT_RUN_COUNT_MASK = 0x7F
|
||||
|
||||
TUPLES_SHARE_POINT_NUMBERS = 0x8000
|
||||
TUPLE_COUNT_MASK = 0x0FFF
|
||||
TUPLE_INDEX_MASK = 0x0FFF
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TupleVariation(object):
|
||||
def __init__(self, axes, coordinates):
|
||||
self.axes = axes.copy()
|
||||
self.coordinates = list(coordinates)
|
||||
|
||||
def __repr__(self):
|
||||
axes = ",".join(
|
||||
sorted(["%s=%s" % (name, value) for (name, value) in self.axes.items()])
|
||||
)
|
||||
return "<TupleVariation %s %s>" % (axes, self.coordinates)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.coordinates == other.coordinates and self.axes == other.axes
|
||||
|
||||
def getUsedPoints(self):
|
||||
# Empty set means "all points used".
|
||||
if None not in self.coordinates:
|
||||
return frozenset()
|
||||
used = frozenset([i for i, p in enumerate(self.coordinates) if p is not None])
|
||||
# Return None if no points used.
|
||||
return used if used else None
|
||||
|
||||
def hasImpact(self):
|
||||
"""Returns True if this TupleVariation has any visible impact.
|
||||
|
||||
If the result is False, the TupleVariation can be omitted from the font
|
||||
without making any visible difference.
|
||||
"""
|
||||
return any(c is not None for c in self.coordinates)
|
||||
|
||||
def toXML(self, writer, axisTags):
|
||||
writer.begintag("tuple")
|
||||
writer.newline()
|
||||
for axis in axisTags:
|
||||
value = self.axes.get(axis)
|
||||
if value is not None:
|
||||
minValue, value, maxValue = value
|
||||
defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
|
||||
defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
|
||||
if minValue == defaultMinValue and maxValue == defaultMaxValue:
|
||||
writer.simpletag("coord", axis=axis, value=fl2str(value, 14))
|
||||
else:
|
||||
attrs = [
|
||||
("axis", axis),
|
||||
("min", fl2str(minValue, 14)),
|
||||
("value", fl2str(value, 14)),
|
||||
("max", fl2str(maxValue, 14)),
|
||||
]
|
||||
writer.simpletag("coord", attrs)
|
||||
writer.newline()
|
||||
wrote_any_deltas = False
|
||||
for i, delta in enumerate(self.coordinates):
|
||||
if type(delta) == tuple and len(delta) == 2:
|
||||
writer.simpletag("delta", pt=i, x=delta[0], y=delta[1])
|
||||
writer.newline()
|
||||
wrote_any_deltas = True
|
||||
elif type(delta) == int:
|
||||
writer.simpletag("delta", cvt=i, value=delta)
|
||||
writer.newline()
|
||||
wrote_any_deltas = True
|
||||
elif delta is not None:
|
||||
log.error("bad delta format")
|
||||
writer.comment("bad delta #%d" % i)
|
||||
writer.newline()
|
||||
wrote_any_deltas = True
|
||||
if not wrote_any_deltas:
|
||||
writer.comment("no deltas")
|
||||
writer.newline()
|
||||
writer.endtag("tuple")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, _content):
|
||||
if name == "coord":
|
||||
axis = attrs["axis"]
|
||||
value = str2fl(attrs["value"], 14)
|
||||
defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
|
||||
defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
|
||||
minValue = str2fl(attrs.get("min", defaultMinValue), 14)
|
||||
maxValue = str2fl(attrs.get("max", defaultMaxValue), 14)
|
||||
self.axes[axis] = (minValue, value, maxValue)
|
||||
elif name == "delta":
|
||||
if "pt" in attrs:
|
||||
point = safeEval(attrs["pt"])
|
||||
x = safeEval(attrs["x"])
|
||||
y = safeEval(attrs["y"])
|
||||
self.coordinates[point] = (x, y)
|
||||
elif "cvt" in attrs:
|
||||
cvt = safeEval(attrs["cvt"])
|
||||
value = safeEval(attrs["value"])
|
||||
self.coordinates[cvt] = value
|
||||
else:
|
||||
log.warning("bad delta format: %s" % ", ".join(sorted(attrs.keys())))
|
||||
|
||||
def compile(
|
||||
self, axisTags, sharedCoordIndices={}, pointData=None, *, optimizeSize=True
|
||||
):
|
||||
assert set(self.axes.keys()) <= set(axisTags), (
|
||||
"Unknown axis tag found.",
|
||||
self.axes.keys(),
|
||||
axisTags,
|
||||
)
|
||||
|
||||
tupleData = []
|
||||
auxData = []
|
||||
|
||||
if pointData is None:
|
||||
usedPoints = self.getUsedPoints()
|
||||
if usedPoints is None: # Nothing to encode
|
||||
return b"", b""
|
||||
pointData = self.compilePoints(usedPoints)
|
||||
|
||||
coord = self.compileCoord(axisTags)
|
||||
flags = sharedCoordIndices.get(coord)
|
||||
if flags is None:
|
||||
flags = EMBEDDED_PEAK_TUPLE
|
||||
tupleData.append(coord)
|
||||
|
||||
intermediateCoord = self.compileIntermediateCoord(axisTags)
|
||||
if intermediateCoord is not None:
|
||||
flags |= INTERMEDIATE_REGION
|
||||
tupleData.append(intermediateCoord)
|
||||
|
||||
# pointData of b'' implies "use shared points".
|
||||
if pointData:
|
||||
flags |= PRIVATE_POINT_NUMBERS
|
||||
auxData.append(pointData)
|
||||
|
||||
auxData.append(self.compileDeltas(optimizeSize=optimizeSize))
|
||||
auxData = b"".join(auxData)
|
||||
|
||||
tupleData.insert(0, struct.pack(">HH", len(auxData), flags))
|
||||
return b"".join(tupleData), auxData
|
||||
|
||||
def compileCoord(self, axisTags):
|
||||
result = []
|
||||
axes = self.axes
|
||||
for axis in axisTags:
|
||||
triple = axes.get(axis)
|
||||
if triple is None:
|
||||
result.append(b"\0\0")
|
||||
else:
|
||||
result.append(struct.pack(">h", fl2fi(triple[1], 14)))
|
||||
return b"".join(result)
|
||||
|
||||
def compileIntermediateCoord(self, axisTags):
|
||||
needed = False
|
||||
for axis in axisTags:
|
||||
minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
|
||||
defaultMinValue = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
|
||||
defaultMaxValue = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
|
||||
if (minValue != defaultMinValue) or (maxValue != defaultMaxValue):
|
||||
needed = True
|
||||
break
|
||||
if not needed:
|
||||
return None
|
||||
minCoords = []
|
||||
maxCoords = []
|
||||
for axis in axisTags:
|
||||
minValue, value, maxValue = self.axes.get(axis, (0.0, 0.0, 0.0))
|
||||
minCoords.append(struct.pack(">h", fl2fi(minValue, 14)))
|
||||
maxCoords.append(struct.pack(">h", fl2fi(maxValue, 14)))
|
||||
return b"".join(minCoords + maxCoords)
|
||||
|
||||
@staticmethod
|
||||
def decompileCoord_(axisTags, data, offset):
|
||||
coord = {}
|
||||
pos = offset
|
||||
for axis in axisTags:
|
||||
coord[axis] = fi2fl(struct.unpack(">h", data[pos : pos + 2])[0], 14)
|
||||
pos += 2
|
||||
return coord, pos
|
||||
|
||||
@staticmethod
|
||||
def compilePoints(points):
|
||||
# If the set consists of all points in the glyph, it gets encoded with
|
||||
# a special encoding: a single zero byte.
|
||||
#
|
||||
# To use this optimization, points passed in must be empty set.
|
||||
# The following two lines are not strictly necessary as the main code
|
||||
# below would emit the same. But this is most common and faster.
|
||||
if not points:
|
||||
return b"\0"
|
||||
|
||||
# In the 'gvar' table, the packing of point numbers is a little surprising.
|
||||
# It consists of multiple runs, each being a delta-encoded list of integers.
|
||||
# For example, the point set {17, 18, 19, 20, 21, 22, 23} gets encoded as
|
||||
# [6, 17, 1, 1, 1, 1, 1, 1]. The first value (6) is the run length minus 1.
|
||||
# There are two types of runs, with values being either 8 or 16 bit unsigned
|
||||
# integers.
|
||||
points = list(points)
|
||||
points.sort()
|
||||
numPoints = len(points)
|
||||
|
||||
result = bytearray()
|
||||
# The binary representation starts with the total number of points in the set,
|
||||
# encoded into one or two bytes depending on the value.
|
||||
if numPoints < 0x80:
|
||||
result.append(numPoints)
|
||||
else:
|
||||
result.append((numPoints >> 8) | 0x80)
|
||||
result.append(numPoints & 0xFF)
|
||||
|
||||
MAX_RUN_LENGTH = 127
|
||||
pos = 0
|
||||
lastValue = 0
|
||||
while pos < numPoints:
|
||||
runLength = 0
|
||||
|
||||
headerPos = len(result)
|
||||
result.append(0)
|
||||
|
||||
useByteEncoding = None
|
||||
while pos < numPoints and runLength <= MAX_RUN_LENGTH:
|
||||
curValue = points[pos]
|
||||
delta = curValue - lastValue
|
||||
if useByteEncoding is None:
|
||||
useByteEncoding = 0 <= delta <= 0xFF
|
||||
if useByteEncoding and (delta > 0xFF or delta < 0):
|
||||
# we need to start a new run (which will not use byte encoding)
|
||||
break
|
||||
# TODO This never switches back to a byte-encoding from a short-encoding.
|
||||
# That's suboptimal.
|
||||
if useByteEncoding:
|
||||
result.append(delta)
|
||||
else:
|
||||
result.append(delta >> 8)
|
||||
result.append(delta & 0xFF)
|
||||
lastValue = curValue
|
||||
pos += 1
|
||||
runLength += 1
|
||||
if useByteEncoding:
|
||||
result[headerPos] = runLength - 1
|
||||
else:
|
||||
result[headerPos] = (runLength - 1) | POINTS_ARE_WORDS
|
||||
|
||||
return result
|
||||
|
||||
@staticmethod
|
||||
def decompilePoints_(numPoints, data, offset, tableTag):
|
||||
"""(numPoints, data, offset, tableTag) --> ([point1, point2, ...], newOffset)"""
|
||||
assert tableTag in ("cvar", "gvar")
|
||||
pos = offset
|
||||
numPointsInData = data[pos]
|
||||
pos += 1
|
||||
if (numPointsInData & POINTS_ARE_WORDS) != 0:
|
||||
numPointsInData = (numPointsInData & POINT_RUN_COUNT_MASK) << 8 | data[pos]
|
||||
pos += 1
|
||||
if numPointsInData == 0:
|
||||
return (range(numPoints), pos)
|
||||
|
||||
result = []
|
||||
while len(result) < numPointsInData:
|
||||
runHeader = data[pos]
|
||||
pos += 1
|
||||
numPointsInRun = (runHeader & POINT_RUN_COUNT_MASK) + 1
|
||||
point = 0
|
||||
if (runHeader & POINTS_ARE_WORDS) != 0:
|
||||
points = array.array("H")
|
||||
pointsSize = numPointsInRun * 2
|
||||
else:
|
||||
points = array.array("B")
|
||||
pointsSize = numPointsInRun
|
||||
points.frombytes(data[pos : pos + pointsSize])
|
||||
if sys.byteorder != "big":
|
||||
points.byteswap()
|
||||
|
||||
assert len(points) == numPointsInRun
|
||||
pos += pointsSize
|
||||
|
||||
result.extend(points)
|
||||
|
||||
# Convert relative to absolute
|
||||
absolute = []
|
||||
current = 0
|
||||
for delta in result:
|
||||
current += delta
|
||||
absolute.append(current)
|
||||
result = absolute
|
||||
del absolute
|
||||
|
||||
badPoints = {str(p) for p in result if p < 0 or p >= numPoints}
|
||||
if badPoints:
|
||||
log.warning(
|
||||
"point %s out of range in '%s' table"
|
||||
% (",".join(sorted(badPoints)), tableTag)
|
||||
)
|
||||
return (result, pos)
|
||||
|
||||
def compileDeltas(self, optimizeSize=True):
|
||||
deltaX = []
|
||||
deltaY = []
|
||||
if self.getCoordWidth() == 2:
|
||||
for c in self.coordinates:
|
||||
if c is None:
|
||||
continue
|
||||
deltaX.append(c[0])
|
||||
deltaY.append(c[1])
|
||||
else:
|
||||
for c in self.coordinates:
|
||||
if c is None:
|
||||
continue
|
||||
deltaX.append(c)
|
||||
bytearr = bytearray()
|
||||
self.compileDeltaValues_(deltaX, bytearr, optimizeSize=optimizeSize)
|
||||
self.compileDeltaValues_(deltaY, bytearr, optimizeSize=optimizeSize)
|
||||
return bytearr
|
||||
|
||||
@staticmethod
|
||||
def compileDeltaValues_(deltas, bytearr=None, *, optimizeSize=True):
|
||||
"""[value1, value2, value3, ...] --> bytearray
|
||||
|
||||
Emits a sequence of runs. Each run starts with a
|
||||
byte-sized header whose 6 least significant bits
|
||||
(header & 0x3F) indicate how many values are encoded
|
||||
in this run. The stored length is the actual length
|
||||
minus one; run lengths are thus in the range [1..64].
|
||||
If the header byte has its most significant bit (0x80)
|
||||
set, all values in this run are zero, and no data
|
||||
follows. Otherwise, the header byte is followed by
|
||||
((header & 0x3F) + 1) signed values. If (header &
|
||||
0x40) is clear, the delta values are stored as signed
|
||||
bytes; if (header & 0x40) is set, the delta values are
|
||||
signed 16-bit integers.
|
||||
""" # Explaining the format because the 'gvar' spec is hard to understand.
|
||||
if bytearr is None:
|
||||
bytearr = bytearray()
|
||||
|
||||
pos = 0
|
||||
numDeltas = len(deltas)
|
||||
|
||||
if optimizeSize:
|
||||
while pos < numDeltas:
|
||||
value = deltas[pos]
|
||||
if value == 0:
|
||||
pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, bytearr)
|
||||
elif -128 <= value <= 127:
|
||||
pos = TupleVariation.encodeDeltaRunAsBytes_(deltas, pos, bytearr)
|
||||
elif -32768 <= value <= 32767:
|
||||
pos = TupleVariation.encodeDeltaRunAsWords_(deltas, pos, bytearr)
|
||||
else:
|
||||
pos = TupleVariation.encodeDeltaRunAsLongs_(deltas, pos, bytearr)
|
||||
else:
|
||||
minVal, maxVal = min(deltas), max(deltas)
|
||||
if minVal == 0 == maxVal:
|
||||
pos = TupleVariation.encodeDeltaRunAsZeroes_(deltas, pos, bytearr)
|
||||
elif -128 <= minVal <= maxVal <= 127:
|
||||
pos = TupleVariation.encodeDeltaRunAsBytes_(
|
||||
deltas, pos, bytearr, optimizeSize=False
|
||||
)
|
||||
elif -32768 <= minVal <= maxVal <= 32767:
|
||||
pos = TupleVariation.encodeDeltaRunAsWords_(
|
||||
deltas, pos, bytearr, optimizeSize=False
|
||||
)
|
||||
else:
|
||||
pos = TupleVariation.encodeDeltaRunAsLongs_(
|
||||
deltas, pos, bytearr, optimizeSize=False
|
||||
)
|
||||
|
||||
assert pos == numDeltas, (pos, numDeltas)
|
||||
|
||||
return bytearr
|
||||
|
||||
@staticmethod
|
||||
def encodeDeltaRunAsZeroes_(deltas, offset, bytearr):
|
||||
pos = offset
|
||||
numDeltas = len(deltas)
|
||||
while pos < numDeltas and deltas[pos] == 0:
|
||||
pos += 1
|
||||
runLength = pos - offset
|
||||
while runLength >= 64:
|
||||
bytearr.append(DELTAS_ARE_ZERO | 63)
|
||||
runLength -= 64
|
||||
if runLength:
|
||||
bytearr.append(DELTAS_ARE_ZERO | (runLength - 1))
|
||||
return pos
|
||||
|
||||
@staticmethod
|
||||
def encodeDeltaRunAsBytes_(deltas, offset, bytearr, optimizeSize=True):
|
||||
pos = offset
|
||||
numDeltas = len(deltas)
|
||||
while pos < numDeltas:
|
||||
value = deltas[pos]
|
||||
if not (-128 <= value <= 127):
|
||||
break
|
||||
# Within a byte-encoded run of deltas, a single zero
|
||||
# is best stored literally as 0x00 value. However,
|
||||
# if are two or more zeroes in a sequence, it is
|
||||
# better to start a new run. For example, the sequence
|
||||
# of deltas [15, 15, 0, 15, 15] becomes 6 bytes
|
||||
# (04 0F 0F 00 0F 0F) when storing the zero value
|
||||
# literally, but 7 bytes (01 0F 0F 80 01 0F 0F)
|
||||
# when starting a new run.
|
||||
if (
|
||||
optimizeSize
|
||||
and value == 0
|
||||
and pos + 1 < numDeltas
|
||||
and deltas[pos + 1] == 0
|
||||
):
|
||||
break
|
||||
pos += 1
|
||||
runLength = pos - offset
|
||||
while runLength >= 64:
|
||||
bytearr.append(63)
|
||||
bytearr.extend(array.array("b", deltas[offset : offset + 64]))
|
||||
offset += 64
|
||||
runLength -= 64
|
||||
if runLength:
|
||||
bytearr.append(runLength - 1)
|
||||
bytearr.extend(array.array("b", deltas[offset:pos]))
|
||||
return pos
|
||||
|
||||
@staticmethod
|
||||
def encodeDeltaRunAsWords_(deltas, offset, bytearr, optimizeSize=True):
|
||||
pos = offset
|
||||
numDeltas = len(deltas)
|
||||
while pos < numDeltas:
|
||||
value = deltas[pos]
|
||||
|
||||
# Within a word-encoded run of deltas, it is easiest
|
||||
# to start a new run (with a different encoding)
|
||||
# whenever we encounter a zero value. For example,
|
||||
# the sequence [0x6666, 0, 0x7777] needs 7 bytes when
|
||||
# storing the zero literally (42 66 66 00 00 77 77),
|
||||
# and equally 7 bytes when starting a new run
|
||||
# (40 66 66 80 40 77 77).
|
||||
if optimizeSize and value == 0:
|
||||
break
|
||||
|
||||
# Within a word-encoded run of deltas, a single value
|
||||
# in the range (-128..127) should be encoded literally
|
||||
# because it is more compact. For example, the sequence
|
||||
# [0x6666, 2, 0x7777] becomes 7 bytes when storing
|
||||
# the value literally (42 66 66 00 02 77 77), but 8 bytes
|
||||
# when starting a new run (40 66 66 00 02 40 77 77).
|
||||
if (
|
||||
optimizeSize
|
||||
and (-128 <= value <= 127)
|
||||
and pos + 1 < numDeltas
|
||||
and (-128 <= deltas[pos + 1] <= 127)
|
||||
):
|
||||
break
|
||||
|
||||
if not (-32768 <= value <= 32767):
|
||||
break
|
||||
|
||||
pos += 1
|
||||
runLength = pos - offset
|
||||
while runLength >= 64:
|
||||
bytearr.append(DELTAS_ARE_WORDS | 63)
|
||||
a = array.array("h", deltas[offset : offset + 64])
|
||||
if sys.byteorder != "big":
|
||||
a.byteswap()
|
||||
bytearr.extend(a)
|
||||
offset += 64
|
||||
runLength -= 64
|
||||
if runLength:
|
||||
bytearr.append(DELTAS_ARE_WORDS | (runLength - 1))
|
||||
a = array.array("h", deltas[offset:pos])
|
||||
if sys.byteorder != "big":
|
||||
a.byteswap()
|
||||
bytearr.extend(a)
|
||||
return pos
|
||||
|
||||
@staticmethod
|
||||
def encodeDeltaRunAsLongs_(deltas, offset, bytearr, optimizeSize=True):
|
||||
pos = offset
|
||||
numDeltas = len(deltas)
|
||||
while pos < numDeltas:
|
||||
value = deltas[pos]
|
||||
if optimizeSize and -32768 <= value <= 32767:
|
||||
break
|
||||
pos += 1
|
||||
runLength = pos - offset
|
||||
while runLength >= 64:
|
||||
bytearr.append(DELTAS_ARE_LONGS | 63)
|
||||
a = array.array("i", deltas[offset : offset + 64])
|
||||
if sys.byteorder != "big":
|
||||
a.byteswap()
|
||||
bytearr.extend(a)
|
||||
offset += 64
|
||||
runLength -= 64
|
||||
if runLength:
|
||||
bytearr.append(DELTAS_ARE_LONGS | (runLength - 1))
|
||||
a = array.array("i", deltas[offset:pos])
|
||||
if sys.byteorder != "big":
|
||||
a.byteswap()
|
||||
bytearr.extend(a)
|
||||
return pos
|
||||
|
||||
@staticmethod
|
||||
def decompileDeltas_(numDeltas, data, offset=0):
|
||||
"""(numDeltas, data, offset) --> ([delta, delta, ...], newOffset)"""
|
||||
result = []
|
||||
pos = offset
|
||||
while len(result) < numDeltas if numDeltas is not None else pos < len(data):
|
||||
runHeader = data[pos]
|
||||
pos += 1
|
||||
numDeltasInRun = (runHeader & DELTA_RUN_COUNT_MASK) + 1
|
||||
if (runHeader & DELTAS_SIZE_MASK) == DELTAS_ARE_ZERO:
|
||||
result.extend([0] * numDeltasInRun)
|
||||
else:
|
||||
if (runHeader & DELTAS_SIZE_MASK) == DELTAS_ARE_LONGS:
|
||||
deltas = array.array("i")
|
||||
deltasSize = numDeltasInRun * 4
|
||||
elif (runHeader & DELTAS_SIZE_MASK) == DELTAS_ARE_WORDS:
|
||||
deltas = array.array("h")
|
||||
deltasSize = numDeltasInRun * 2
|
||||
else:
|
||||
deltas = array.array("b")
|
||||
deltasSize = numDeltasInRun
|
||||
deltas.frombytes(data[pos : pos + deltasSize])
|
||||
if sys.byteorder != "big":
|
||||
deltas.byteswap()
|
||||
assert len(deltas) == numDeltasInRun, (len(deltas), numDeltasInRun)
|
||||
pos += deltasSize
|
||||
result.extend(deltas)
|
||||
assert numDeltas is None or len(result) == numDeltas
|
||||
return (result, pos)
|
||||
|
||||
@staticmethod
|
||||
def getTupleSize_(flags, axisCount):
|
||||
size = 4
|
||||
if (flags & EMBEDDED_PEAK_TUPLE) != 0:
|
||||
size += axisCount * 2
|
||||
if (flags & INTERMEDIATE_REGION) != 0:
|
||||
size += axisCount * 4
|
||||
return size
|
||||
|
||||
def getCoordWidth(self):
|
||||
"""Return 2 if coordinates are (x, y) as in gvar, 1 if single values
|
||||
as in cvar, or 0 if empty.
|
||||
"""
|
||||
firstDelta = next((c for c in self.coordinates if c is not None), None)
|
||||
if firstDelta is None:
|
||||
return 0 # empty or has no impact
|
||||
if type(firstDelta) in (int, float):
|
||||
return 1
|
||||
if type(firstDelta) is tuple and len(firstDelta) == 2:
|
||||
return 2
|
||||
raise TypeError(
|
||||
"invalid type of delta; expected (int or float) number, or "
|
||||
"Tuple[number, number]: %r" % firstDelta
|
||||
)
|
||||
|
||||
def scaleDeltas(self, scalar):
|
||||
if scalar == 1.0:
|
||||
return # no change
|
||||
coordWidth = self.getCoordWidth()
|
||||
self.coordinates = [
|
||||
(
|
||||
None
|
||||
if d is None
|
||||
else d * scalar if coordWidth == 1 else (d[0] * scalar, d[1] * scalar)
|
||||
)
|
||||
for d in self.coordinates
|
||||
]
|
||||
|
||||
def roundDeltas(self):
|
||||
coordWidth = self.getCoordWidth()
|
||||
self.coordinates = [
|
||||
(
|
||||
None
|
||||
if d is None
|
||||
else otRound(d) if coordWidth == 1 else (otRound(d[0]), otRound(d[1]))
|
||||
)
|
||||
for d in self.coordinates
|
||||
]
|
||||
|
||||
def calcInferredDeltas(self, origCoords, endPts):
|
||||
from fontTools.varLib.iup import iup_delta
|
||||
|
||||
if self.getCoordWidth() == 1:
|
||||
raise TypeError("Only 'gvar' TupleVariation can have inferred deltas")
|
||||
if None in self.coordinates:
|
||||
if len(self.coordinates) != len(origCoords):
|
||||
raise ValueError(
|
||||
"Expected len(origCoords) == %d; found %d"
|
||||
% (len(self.coordinates), len(origCoords))
|
||||
)
|
||||
self.coordinates = iup_delta(self.coordinates, origCoords, endPts)
|
||||
|
||||
def optimize(self, origCoords, endPts, tolerance=0.5, isComposite=False):
|
||||
from fontTools.varLib.iup import iup_delta_optimize
|
||||
|
||||
if None in self.coordinates:
|
||||
return # already optimized
|
||||
|
||||
deltaOpt = iup_delta_optimize(
|
||||
self.coordinates, origCoords, endPts, tolerance=tolerance
|
||||
)
|
||||
if None in deltaOpt:
|
||||
if isComposite and all(d is None for d in deltaOpt):
|
||||
# Fix for macOS composites
|
||||
# https://github.com/fonttools/fonttools/issues/1381
|
||||
deltaOpt = [(0, 0)] + [None] * (len(deltaOpt) - 1)
|
||||
# Use "optimized" version only if smaller...
|
||||
varOpt = TupleVariation(self.axes, deltaOpt)
|
||||
|
||||
# Shouldn't matter that this is different from fvar...?
|
||||
axisTags = sorted(self.axes.keys())
|
||||
tupleData, auxData = self.compile(axisTags)
|
||||
unoptimizedLength = len(tupleData) + len(auxData)
|
||||
tupleData, auxData = varOpt.compile(axisTags)
|
||||
optimizedLength = len(tupleData) + len(auxData)
|
||||
|
||||
if optimizedLength < unoptimizedLength:
|
||||
self.coordinates = varOpt.coordinates
|
||||
|
||||
def __imul__(self, scalar):
|
||||
self.scaleDeltas(scalar)
|
||||
return self
|
||||
|
||||
def __iadd__(self, other):
|
||||
if not isinstance(other, TupleVariation):
|
||||
return NotImplemented
|
||||
deltas1 = self.coordinates
|
||||
length = len(deltas1)
|
||||
deltas2 = other.coordinates
|
||||
if len(deltas2) != length:
|
||||
raise ValueError("cannot sum TupleVariation deltas with different lengths")
|
||||
# 'None' values have different meanings in gvar vs cvar TupleVariations:
|
||||
# within the gvar, when deltas are not provided explicitly for some points,
|
||||
# they need to be inferred; whereas for the 'cvar' table, if deltas are not
|
||||
# provided for some CVT values, then no adjustments are made (i.e. None == 0).
|
||||
# Thus, we cannot sum deltas for gvar TupleVariations if they contain
|
||||
# inferred inferred deltas (the latter need to be computed first using
|
||||
# 'calcInferredDeltas' method), but we can treat 'None' values in cvar
|
||||
# deltas as if they are zeros.
|
||||
if self.getCoordWidth() == 2:
|
||||
for i, d2 in zip(range(length), deltas2):
|
||||
d1 = deltas1[i]
|
||||
try:
|
||||
deltas1[i] = (d1[0] + d2[0], d1[1] + d2[1])
|
||||
except TypeError:
|
||||
raise ValueError("cannot sum gvar deltas with inferred points")
|
||||
else:
|
||||
for i, d2 in zip(range(length), deltas2):
|
||||
d1 = deltas1[i]
|
||||
if d1 is not None and d2 is not None:
|
||||
deltas1[i] = d1 + d2
|
||||
elif d1 is None and d2 is not None:
|
||||
deltas1[i] = d2
|
||||
# elif d2 is None do nothing
|
||||
return self
|
||||
|
||||
|
||||
def decompileSharedTuples(axisTags, sharedTupleCount, data, offset):
|
||||
result = []
|
||||
for _ in range(sharedTupleCount):
|
||||
t, offset = TupleVariation.decompileCoord_(axisTags, data, offset)
|
||||
result.append(t)
|
||||
return result
|
||||
|
||||
|
||||
def compileSharedTuples(
|
||||
axisTags, variations, MAX_NUM_SHARED_COORDS=TUPLE_INDEX_MASK + 1
|
||||
):
|
||||
coordCount = Counter()
|
||||
for var in variations:
|
||||
coord = var.compileCoord(axisTags)
|
||||
coordCount[coord] += 1
|
||||
# In python < 3.7, most_common() ordering is non-deterministic
|
||||
# so apply a sort to make sure the ordering is consistent.
|
||||
sharedCoords = sorted(
|
||||
coordCount.most_common(MAX_NUM_SHARED_COORDS),
|
||||
key=lambda item: (-item[1], item[0]),
|
||||
)
|
||||
return [c[0] for c in sharedCoords if c[1] > 1]
|
||||
|
||||
|
||||
def compileTupleVariationStore(
|
||||
variations,
|
||||
pointCount,
|
||||
axisTags,
|
||||
sharedTupleIndices,
|
||||
useSharedPoints=True,
|
||||
*,
|
||||
optimizeSize=True,
|
||||
):
|
||||
# pointCount is actually unused. Keeping for API compat.
|
||||
del pointCount
|
||||
newVariations = []
|
||||
pointDatas = []
|
||||
# Compile all points and figure out sharing if desired
|
||||
sharedPoints = None
|
||||
|
||||
# Collect, count, and compile point-sets for all variation sets
|
||||
pointSetCount = defaultdict(int)
|
||||
for v in variations:
|
||||
points = v.getUsedPoints()
|
||||
if points is None: # Empty variations
|
||||
continue
|
||||
pointSetCount[points] += 1
|
||||
newVariations.append(v)
|
||||
pointDatas.append(points)
|
||||
variations = newVariations
|
||||
del newVariations
|
||||
|
||||
if not variations:
|
||||
return (0, b"", b"")
|
||||
|
||||
n = len(variations[0].coordinates)
|
||||
assert all(
|
||||
len(v.coordinates) == n for v in variations
|
||||
), "Variation sets have different sizes"
|
||||
|
||||
compiledPoints = {
|
||||
pointSet: TupleVariation.compilePoints(pointSet) for pointSet in pointSetCount
|
||||
}
|
||||
|
||||
tupleVariationCount = len(variations)
|
||||
tuples = []
|
||||
data = []
|
||||
|
||||
if useSharedPoints:
|
||||
# Find point-set which saves most bytes.
|
||||
def key(pn):
|
||||
pointSet = pn[0]
|
||||
count = pn[1]
|
||||
return len(compiledPoints[pointSet]) * (count - 1)
|
||||
|
||||
sharedPoints = max(pointSetCount.items(), key=key)[0]
|
||||
|
||||
data.append(compiledPoints[sharedPoints])
|
||||
tupleVariationCount |= TUPLES_SHARE_POINT_NUMBERS
|
||||
|
||||
# b'' implies "use shared points"
|
||||
pointDatas = [
|
||||
compiledPoints[points] if points != sharedPoints else b""
|
||||
for points in pointDatas
|
||||
]
|
||||
|
||||
for v, p in zip(variations, pointDatas):
|
||||
thisTuple, thisData = v.compile(
|
||||
axisTags, sharedTupleIndices, pointData=p, optimizeSize=optimizeSize
|
||||
)
|
||||
|
||||
tuples.append(thisTuple)
|
||||
data.append(thisData)
|
||||
|
||||
tuples = b"".join(tuples)
|
||||
data = b"".join(data)
|
||||
return tupleVariationCount, tuples, data
|
||||
|
||||
|
||||
def decompileTupleVariationStore(
|
||||
tableTag,
|
||||
axisTags,
|
||||
tupleVariationCount,
|
||||
pointCount,
|
||||
sharedTuples,
|
||||
data,
|
||||
pos,
|
||||
dataPos,
|
||||
):
|
||||
numAxes = len(axisTags)
|
||||
result = []
|
||||
if (tupleVariationCount & TUPLES_SHARE_POINT_NUMBERS) != 0:
|
||||
sharedPoints, dataPos = TupleVariation.decompilePoints_(
|
||||
pointCount, data, dataPos, tableTag
|
||||
)
|
||||
else:
|
||||
sharedPoints = []
|
||||
for _ in range(tupleVariationCount & TUPLE_COUNT_MASK):
|
||||
dataSize, flags = struct.unpack(">HH", data[pos : pos + 4])
|
||||
tupleSize = TupleVariation.getTupleSize_(flags, numAxes)
|
||||
tupleData = data[pos : pos + tupleSize]
|
||||
pointDeltaData = data[dataPos : dataPos + dataSize]
|
||||
result.append(
|
||||
decompileTupleVariation_(
|
||||
pointCount,
|
||||
sharedTuples,
|
||||
sharedPoints,
|
||||
tableTag,
|
||||
axisTags,
|
||||
tupleData,
|
||||
pointDeltaData,
|
||||
)
|
||||
)
|
||||
pos += tupleSize
|
||||
dataPos += dataSize
|
||||
return result
|
||||
|
||||
|
||||
def decompileTupleVariation_(
|
||||
pointCount, sharedTuples, sharedPoints, tableTag, axisTags, data, tupleData
|
||||
):
|
||||
assert tableTag in ("cvar", "gvar"), tableTag
|
||||
flags = struct.unpack(">H", data[2:4])[0]
|
||||
pos = 4
|
||||
if (flags & EMBEDDED_PEAK_TUPLE) == 0:
|
||||
peak = sharedTuples[flags & TUPLE_INDEX_MASK]
|
||||
else:
|
||||
peak, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
|
||||
if (flags & INTERMEDIATE_REGION) != 0:
|
||||
start, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
|
||||
end, pos = TupleVariation.decompileCoord_(axisTags, data, pos)
|
||||
else:
|
||||
start, end = inferRegion_(peak)
|
||||
axes = {}
|
||||
for axis in axisTags:
|
||||
region = start[axis], peak[axis], end[axis]
|
||||
if region != (0.0, 0.0, 0.0):
|
||||
axes[axis] = region
|
||||
pos = 0
|
||||
if (flags & PRIVATE_POINT_NUMBERS) != 0:
|
||||
points, pos = TupleVariation.decompilePoints_(
|
||||
pointCount, tupleData, pos, tableTag
|
||||
)
|
||||
else:
|
||||
points = sharedPoints
|
||||
|
||||
deltas = [None] * pointCount
|
||||
|
||||
if tableTag == "cvar":
|
||||
deltas_cvt, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
|
||||
for p, delta in zip(points, deltas_cvt):
|
||||
if 0 <= p < pointCount:
|
||||
deltas[p] = delta
|
||||
|
||||
elif tableTag == "gvar":
|
||||
deltas_x, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
|
||||
deltas_y, pos = TupleVariation.decompileDeltas_(len(points), tupleData, pos)
|
||||
for p, x, y in zip(points, deltas_x, deltas_y):
|
||||
if 0 <= p < pointCount:
|
||||
deltas[p] = (x, y)
|
||||
|
||||
return TupleVariation(axes, deltas)
|
||||
|
||||
|
||||
def inferRegion_(peak):
|
||||
"""Infer start and end for a (non-intermediate) region
|
||||
|
||||
This helper function computes the applicability region for
|
||||
variation tuples whose INTERMEDIATE_REGION flag is not set in the
|
||||
TupleVariationHeader structure. Variation tuples apply only to
|
||||
certain regions of the variation space; outside that region, the
|
||||
tuple has no effect. To make the binary encoding more compact,
|
||||
TupleVariationHeaders can omit the intermediateStartTuple and
|
||||
intermediateEndTuple fields.
|
||||
"""
|
||||
start, end = {}, {}
|
||||
for axis, value in peak.items():
|
||||
start[axis] = min(value, 0.0) # -0.3 --> -0.3; 0.7 --> 0.0
|
||||
end[axis] = max(value, 0.0) # -0.3 --> 0.0; 0.7 --> 0.7
|
||||
return (start, end)
|
||||
@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_V_A_R_C_(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,241 @@
|
||||
from . import DefaultTable
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
import struct
|
||||
|
||||
VDMX_HeaderFmt = """
|
||||
> # big endian
|
||||
version: H # Version number (0 or 1)
|
||||
numRecs: H # Number of VDMX groups present
|
||||
numRatios: H # Number of aspect ratio groupings
|
||||
"""
|
||||
# the VMDX header is followed by an array of RatRange[numRatios] (i.e. aspect
|
||||
# ratio ranges);
|
||||
VDMX_RatRangeFmt = """
|
||||
> # big endian
|
||||
bCharSet: B # Character set
|
||||
xRatio: B # Value to use for x-Ratio
|
||||
yStartRatio: B # Starting y-Ratio value
|
||||
yEndRatio: B # Ending y-Ratio value
|
||||
"""
|
||||
# followed by an array of offset[numRatios] from start of VDMX table to the
|
||||
# VDMX Group for this ratio range (offsets will be re-calculated on compile);
|
||||
# followed by an array of Group[numRecs] records;
|
||||
VDMX_GroupFmt = """
|
||||
> # big endian
|
||||
recs: H # Number of height records in this group
|
||||
startsz: B # Starting yPelHeight
|
||||
endsz: B # Ending yPelHeight
|
||||
"""
|
||||
# followed by an array of vTable[recs] records.
|
||||
VDMX_vTableFmt = """
|
||||
> # big endian
|
||||
yPelHeight: H # yPelHeight to which values apply
|
||||
yMax: h # Maximum value (in pels) for this yPelHeight
|
||||
yMin: h # Minimum value (in pels) for this yPelHeight
|
||||
"""
|
||||
|
||||
|
||||
class table_V_D_M_X_(DefaultTable.DefaultTable):
|
||||
def decompile(self, data, ttFont):
|
||||
pos = 0 # track current position from to start of VDMX table
|
||||
dummy, data = sstruct.unpack2(VDMX_HeaderFmt, data, self)
|
||||
pos += sstruct.calcsize(VDMX_HeaderFmt)
|
||||
self.ratRanges = []
|
||||
for i in range(self.numRatios):
|
||||
ratio, data = sstruct.unpack2(VDMX_RatRangeFmt, data)
|
||||
pos += sstruct.calcsize(VDMX_RatRangeFmt)
|
||||
# the mapping between a ratio and a group is defined further below
|
||||
ratio["groupIndex"] = None
|
||||
self.ratRanges.append(ratio)
|
||||
lenOffset = struct.calcsize(">H")
|
||||
_offsets = [] # temporarily store offsets to groups
|
||||
for i in range(self.numRatios):
|
||||
offset = struct.unpack(">H", data[0:lenOffset])[0]
|
||||
data = data[lenOffset:]
|
||||
pos += lenOffset
|
||||
_offsets.append(offset)
|
||||
self.groups = []
|
||||
for groupIndex in range(self.numRecs):
|
||||
# the offset to this group from beginning of the VDMX table
|
||||
currOffset = pos
|
||||
group, data = sstruct.unpack2(VDMX_GroupFmt, data)
|
||||
# the group lenght and bounding sizes are re-calculated on compile
|
||||
recs = group.pop("recs")
|
||||
startsz = group.pop("startsz")
|
||||
endsz = group.pop("endsz")
|
||||
pos += sstruct.calcsize(VDMX_GroupFmt)
|
||||
for j in range(recs):
|
||||
vTable, data = sstruct.unpack2(VDMX_vTableFmt, data)
|
||||
vTableLength = sstruct.calcsize(VDMX_vTableFmt)
|
||||
pos += vTableLength
|
||||
# group is a dict of (yMax, yMin) tuples keyed by yPelHeight
|
||||
group[vTable["yPelHeight"]] = (vTable["yMax"], vTable["yMin"])
|
||||
# make sure startsz and endsz match the calculated values
|
||||
minSize = min(group.keys())
|
||||
maxSize = max(group.keys())
|
||||
assert (
|
||||
startsz == minSize
|
||||
), "startsz (%s) must equal min yPelHeight (%s): group %d" % (
|
||||
group.startsz,
|
||||
minSize,
|
||||
groupIndex,
|
||||
)
|
||||
assert (
|
||||
endsz == maxSize
|
||||
), "endsz (%s) must equal max yPelHeight (%s): group %d" % (
|
||||
group.endsz,
|
||||
maxSize,
|
||||
groupIndex,
|
||||
)
|
||||
self.groups.append(group)
|
||||
# match the defined offsets with the current group's offset
|
||||
for offsetIndex, offsetValue in enumerate(_offsets):
|
||||
# when numRecs < numRatios there can more than one ratio range
|
||||
# sharing the same VDMX group
|
||||
if currOffset == offsetValue:
|
||||
# map the group with the ratio range thas has the same
|
||||
# index as the offset to that group (it took me a while..)
|
||||
self.ratRanges[offsetIndex]["groupIndex"] = groupIndex
|
||||
# check that all ratio ranges have a group
|
||||
for i in range(self.numRatios):
|
||||
ratio = self.ratRanges[i]
|
||||
if ratio["groupIndex"] is None:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError("no group defined for ratRange %d" % i)
|
||||
|
||||
def _getOffsets(self):
|
||||
"""
|
||||
Calculate offsets to VDMX_Group records.
|
||||
For each ratRange return a list of offset values from the beginning of
|
||||
the VDMX table to a VDMX_Group.
|
||||
"""
|
||||
lenHeader = sstruct.calcsize(VDMX_HeaderFmt)
|
||||
lenRatRange = sstruct.calcsize(VDMX_RatRangeFmt)
|
||||
lenOffset = struct.calcsize(">H")
|
||||
lenGroupHeader = sstruct.calcsize(VDMX_GroupFmt)
|
||||
lenVTable = sstruct.calcsize(VDMX_vTableFmt)
|
||||
# offset to the first group
|
||||
pos = lenHeader + self.numRatios * lenRatRange + self.numRatios * lenOffset
|
||||
groupOffsets = []
|
||||
for group in self.groups:
|
||||
groupOffsets.append(pos)
|
||||
lenGroup = lenGroupHeader + len(group) * lenVTable
|
||||
pos += lenGroup # offset to next group
|
||||
offsets = []
|
||||
for ratio in self.ratRanges:
|
||||
groupIndex = ratio["groupIndex"]
|
||||
offsets.append(groupOffsets[groupIndex])
|
||||
return offsets
|
||||
|
||||
def compile(self, ttFont):
|
||||
if not (self.version == 0 or self.version == 1):
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError(
|
||||
"unknown format for VDMX table: version %s" % self.version
|
||||
)
|
||||
data = sstruct.pack(VDMX_HeaderFmt, self)
|
||||
for ratio in self.ratRanges:
|
||||
data += sstruct.pack(VDMX_RatRangeFmt, ratio)
|
||||
# recalculate offsets to VDMX groups
|
||||
for offset in self._getOffsets():
|
||||
data += struct.pack(">H", offset)
|
||||
for group in self.groups:
|
||||
recs = len(group)
|
||||
startsz = min(group.keys())
|
||||
endsz = max(group.keys())
|
||||
gHeader = {"recs": recs, "startsz": startsz, "endsz": endsz}
|
||||
data += sstruct.pack(VDMX_GroupFmt, gHeader)
|
||||
for yPelHeight, (yMax, yMin) in sorted(group.items()):
|
||||
vTable = {"yPelHeight": yPelHeight, "yMax": yMax, "yMin": yMin}
|
||||
data += sstruct.pack(VDMX_vTableFmt, vTable)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("version", value=self.version)
|
||||
writer.newline()
|
||||
writer.begintag("ratRanges")
|
||||
writer.newline()
|
||||
for ratio in self.ratRanges:
|
||||
groupIndex = ratio["groupIndex"]
|
||||
writer.simpletag(
|
||||
"ratRange",
|
||||
bCharSet=ratio["bCharSet"],
|
||||
xRatio=ratio["xRatio"],
|
||||
yStartRatio=ratio["yStartRatio"],
|
||||
yEndRatio=ratio["yEndRatio"],
|
||||
groupIndex=groupIndex,
|
||||
)
|
||||
writer.newline()
|
||||
writer.endtag("ratRanges")
|
||||
writer.newline()
|
||||
writer.begintag("groups")
|
||||
writer.newline()
|
||||
for groupIndex in range(self.numRecs):
|
||||
group = self.groups[groupIndex]
|
||||
recs = len(group)
|
||||
startsz = min(group.keys())
|
||||
endsz = max(group.keys())
|
||||
writer.begintag("group", index=groupIndex)
|
||||
writer.newline()
|
||||
writer.comment("recs=%d, startsz=%d, endsz=%d" % (recs, startsz, endsz))
|
||||
writer.newline()
|
||||
for yPelHeight, (yMax, yMin) in sorted(group.items()):
|
||||
writer.simpletag(
|
||||
"record",
|
||||
[("yPelHeight", yPelHeight), ("yMax", yMax), ("yMin", yMin)],
|
||||
)
|
||||
writer.newline()
|
||||
writer.endtag("group")
|
||||
writer.newline()
|
||||
writer.endtag("groups")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
self.version = safeEval(attrs["value"])
|
||||
elif name == "ratRanges":
|
||||
if not hasattr(self, "ratRanges"):
|
||||
self.ratRanges = []
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "ratRange":
|
||||
if not hasattr(self, "numRatios"):
|
||||
self.numRatios = 1
|
||||
else:
|
||||
self.numRatios += 1
|
||||
ratio = {
|
||||
"bCharSet": safeEval(attrs["bCharSet"]),
|
||||
"xRatio": safeEval(attrs["xRatio"]),
|
||||
"yStartRatio": safeEval(attrs["yStartRatio"]),
|
||||
"yEndRatio": safeEval(attrs["yEndRatio"]),
|
||||
"groupIndex": safeEval(attrs["groupIndex"]),
|
||||
}
|
||||
self.ratRanges.append(ratio)
|
||||
elif name == "groups":
|
||||
if not hasattr(self, "groups"):
|
||||
self.groups = []
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "group":
|
||||
if not hasattr(self, "numRecs"):
|
||||
self.numRecs = 1
|
||||
else:
|
||||
self.numRecs += 1
|
||||
group = {}
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "record":
|
||||
yPelHeight = safeEval(attrs["yPelHeight"])
|
||||
yMax = safeEval(attrs["yMax"])
|
||||
yMin = safeEval(attrs["yMin"])
|
||||
group[yPelHeight] = (yMax, yMin)
|
||||
self.groups.append(group)
|
||||
@ -0,0 +1,158 @@
|
||||
from fontTools.misc.textTools import bytesjoin, safeEval
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
|
||||
|
||||
class table_V_O_R_G_(DefaultTable.DefaultTable):
|
||||
"""This table is structured so that you can treat it like a dictionary keyed by glyph name.
|
||||
|
||||
``ttFont['VORG'][<glyphName>]`` will return the vertical origin for any glyph.
|
||||
|
||||
``ttFont['VORG'][<glyphName>] = <value>`` will set the vertical origin for any glyph.
|
||||
"""
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
self.getGlyphName = (
|
||||
ttFont.getGlyphName
|
||||
) # for use in get/set item functions, for access by GID
|
||||
(
|
||||
self.majorVersion,
|
||||
self.minorVersion,
|
||||
self.defaultVertOriginY,
|
||||
self.numVertOriginYMetrics,
|
||||
) = struct.unpack(">HHhH", data[:8])
|
||||
assert (
|
||||
self.majorVersion <= 1
|
||||
), "Major version of VORG table is higher than I know how to handle"
|
||||
data = data[8:]
|
||||
vids = []
|
||||
gids = []
|
||||
pos = 0
|
||||
for i in range(self.numVertOriginYMetrics):
|
||||
gid, vOrigin = struct.unpack(">Hh", data[pos : pos + 4])
|
||||
pos += 4
|
||||
gids.append(gid)
|
||||
vids.append(vOrigin)
|
||||
|
||||
self.VOriginRecords = vOrig = {}
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
try:
|
||||
names = [glyphOrder[gid] for gid in gids]
|
||||
except IndexError:
|
||||
getGlyphName = self.getGlyphName
|
||||
names = map(getGlyphName, gids)
|
||||
|
||||
for name, vid in zip(names, vids):
|
||||
vOrig[name] = vid
|
||||
|
||||
def compile(self, ttFont):
|
||||
vorgs = list(self.VOriginRecords.values())
|
||||
names = list(self.VOriginRecords.keys())
|
||||
nameMap = ttFont.getReverseGlyphMap()
|
||||
try:
|
||||
gids = [nameMap[name] for name in names]
|
||||
except KeyError:
|
||||
nameMap = ttFont.getReverseGlyphMap(rebuild=True)
|
||||
gids = [nameMap[name] for name in names]
|
||||
vOriginTable = list(zip(gids, vorgs))
|
||||
self.numVertOriginYMetrics = len(vorgs)
|
||||
vOriginTable.sort() # must be in ascending GID order
|
||||
dataList = [struct.pack(">Hh", rec[0], rec[1]) for rec in vOriginTable]
|
||||
header = struct.pack(
|
||||
">HHhH",
|
||||
self.majorVersion,
|
||||
self.minorVersion,
|
||||
self.defaultVertOriginY,
|
||||
self.numVertOriginYMetrics,
|
||||
)
|
||||
dataList.insert(0, header)
|
||||
data = bytesjoin(dataList)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("majorVersion", value=self.majorVersion)
|
||||
writer.newline()
|
||||
writer.simpletag("minorVersion", value=self.minorVersion)
|
||||
writer.newline()
|
||||
writer.simpletag("defaultVertOriginY", value=self.defaultVertOriginY)
|
||||
writer.newline()
|
||||
writer.simpletag("numVertOriginYMetrics", value=self.numVertOriginYMetrics)
|
||||
writer.newline()
|
||||
vOriginTable = []
|
||||
glyphNames = self.VOriginRecords.keys()
|
||||
for glyphName in glyphNames:
|
||||
try:
|
||||
gid = ttFont.getGlyphID(glyphName)
|
||||
except:
|
||||
assert 0, (
|
||||
"VORG table contains a glyph name not in ttFont.getGlyphNames(): "
|
||||
+ str(glyphName)
|
||||
)
|
||||
vOriginTable.append([gid, glyphName, self.VOriginRecords[glyphName]])
|
||||
vOriginTable.sort()
|
||||
for entry in vOriginTable:
|
||||
vOriginRec = VOriginRecord(entry[1], entry[2])
|
||||
vOriginRec.toXML(writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if not hasattr(self, "VOriginRecords"):
|
||||
self.VOriginRecords = {}
|
||||
self.getGlyphName = (
|
||||
ttFont.getGlyphName
|
||||
) # for use in get/set item functions, for access by GID
|
||||
if name == "VOriginRecord":
|
||||
vOriginRec = VOriginRecord()
|
||||
for element in content:
|
||||
if isinstance(element, str):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
vOriginRec.fromXML(name, attrs, content, ttFont)
|
||||
self.VOriginRecords[vOriginRec.glyphName] = vOriginRec.vOrigin
|
||||
elif "value" in attrs:
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
|
||||
def __getitem__(self, glyphSelector):
|
||||
if isinstance(glyphSelector, int):
|
||||
# its a gid, convert to glyph name
|
||||
glyphSelector = self.getGlyphName(glyphSelector)
|
||||
|
||||
if glyphSelector not in self.VOriginRecords:
|
||||
return self.defaultVertOriginY
|
||||
|
||||
return self.VOriginRecords[glyphSelector]
|
||||
|
||||
def __setitem__(self, glyphSelector, value):
|
||||
if isinstance(glyphSelector, int):
|
||||
# its a gid, convert to glyph name
|
||||
glyphSelector = self.getGlyphName(glyphSelector)
|
||||
|
||||
if value != self.defaultVertOriginY:
|
||||
self.VOriginRecords[glyphSelector] = value
|
||||
elif glyphSelector in self.VOriginRecords:
|
||||
del self.VOriginRecords[glyphSelector]
|
||||
|
||||
def __delitem__(self, glyphSelector):
|
||||
del self.VOriginRecords[glyphSelector]
|
||||
|
||||
|
||||
class VOriginRecord(object):
|
||||
def __init__(self, name=None, vOrigin=None):
|
||||
self.glyphName = name
|
||||
self.vOrigin = vOrigin
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag("VOriginRecord")
|
||||
writer.newline()
|
||||
writer.simpletag("glyphName", value=self.glyphName)
|
||||
writer.newline()
|
||||
writer.simpletag("vOrigin", value=self.vOrigin)
|
||||
writer.newline()
|
||||
writer.endtag("VOriginRecord")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
value = attrs["value"]
|
||||
if name == "glyphName":
|
||||
setattr(self, name, value)
|
||||
else:
|
||||
setattr(self, name, safeEval(value))
|
||||
@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table_V_V_A_R_(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,97 @@
|
||||
# DON'T EDIT! This file is generated by MetaTools/buildTableList.py.
|
||||
def _moduleFinderHint():
|
||||
"""Dummy function to let modulefinder know what tables may be
|
||||
dynamically imported. Generated by MetaTools/buildTableList.py.
|
||||
|
||||
>>> _moduleFinderHint()
|
||||
"""
|
||||
from . import B_A_S_E_
|
||||
from . import C_B_D_T_
|
||||
from . import C_B_L_C_
|
||||
from . import C_F_F_
|
||||
from . import C_F_F__2
|
||||
from . import C_O_L_R_
|
||||
from . import C_P_A_L_
|
||||
from . import D_S_I_G_
|
||||
from . import D__e_b_g
|
||||
from . import E_B_D_T_
|
||||
from . import E_B_L_C_
|
||||
from . import F_F_T_M_
|
||||
from . import F__e_a_t
|
||||
from . import G_D_E_F_
|
||||
from . import G_M_A_P_
|
||||
from . import G_P_K_G_
|
||||
from . import G_P_O_S_
|
||||
from . import G_S_U_B_
|
||||
from . import G__l_a_t
|
||||
from . import G__l_o_c
|
||||
from . import H_V_A_R_
|
||||
from . import J_S_T_F_
|
||||
from . import L_T_S_H_
|
||||
from . import M_A_T_H_
|
||||
from . import M_E_T_A_
|
||||
from . import M_V_A_R_
|
||||
from . import O_S_2f_2
|
||||
from . import S_I_N_G_
|
||||
from . import S_T_A_T_
|
||||
from . import S_V_G_
|
||||
from . import S__i_l_f
|
||||
from . import S__i_l_l
|
||||
from . import T_S_I_B_
|
||||
from . import T_S_I_C_
|
||||
from . import T_S_I_D_
|
||||
from . import T_S_I_J_
|
||||
from . import T_S_I_P_
|
||||
from . import T_S_I_S_
|
||||
from . import T_S_I_V_
|
||||
from . import T_S_I__0
|
||||
from . import T_S_I__1
|
||||
from . import T_S_I__2
|
||||
from . import T_S_I__3
|
||||
from . import T_S_I__5
|
||||
from . import T_T_F_A_
|
||||
from . import V_A_R_C_
|
||||
from . import V_D_M_X_
|
||||
from . import V_O_R_G_
|
||||
from . import V_V_A_R_
|
||||
from . import _a_n_k_r
|
||||
from . import _a_v_a_r
|
||||
from . import _b_s_l_n
|
||||
from . import _c_i_d_g
|
||||
from . import _c_m_a_p
|
||||
from . import _c_v_a_r
|
||||
from . import _c_v_t
|
||||
from . import _f_e_a_t
|
||||
from . import _f_p_g_m
|
||||
from . import _f_v_a_r
|
||||
from . import _g_a_s_p
|
||||
from . import _g_c_i_d
|
||||
from . import _g_l_y_f
|
||||
from . import _g_v_a_r
|
||||
from . import _h_d_m_x
|
||||
from . import _h_e_a_d
|
||||
from . import _h_h_e_a
|
||||
from . import _h_m_t_x
|
||||
from . import _k_e_r_n
|
||||
from . import _l_c_a_r
|
||||
from . import _l_o_c_a
|
||||
from . import _l_t_a_g
|
||||
from . import _m_a_x_p
|
||||
from . import _m_e_t_a
|
||||
from . import _m_o_r_t
|
||||
from . import _m_o_r_x
|
||||
from . import _n_a_m_e
|
||||
from . import _o_p_b_d
|
||||
from . import _p_o_s_t
|
||||
from . import _p_r_e_p
|
||||
from . import _p_r_o_p
|
||||
from . import _s_b_i_x
|
||||
from . import _t_r_a_k
|
||||
from . import _v_h_e_a
|
||||
from . import _v_m_t_x
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest, sys
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
@ -0,0 +1,14 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table__a_n_k_r(BaseTTXConverter):
|
||||
"""
|
||||
The anchor point table provides a way to define anchor points.
|
||||
These are points within the coordinate space of a given glyph,
|
||||
independent of the control points used to render the glyph.
|
||||
Anchor points are used in conjunction with the 'kerx' table.
|
||||
|
||||
See also https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ankr.html
|
||||
"""
|
||||
|
||||
pass
|
||||
@ -0,0 +1,189 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.fixedTools import (
|
||||
fixedToFloat as fi2fl,
|
||||
floatToFixed as fl2fi,
|
||||
floatToFixedToStr as fl2str,
|
||||
strToFixedToFloat as str2fl,
|
||||
)
|
||||
from fontTools.misc.textTools import bytesjoin, safeEval
|
||||
from fontTools.misc.roundTools import otRound
|
||||
from fontTools.varLib.models import piecewiseLinearMap
|
||||
from fontTools.varLib.varStore import VarStoreInstancer, NO_VARIATION_INDEX
|
||||
from fontTools.ttLib import TTLibError
|
||||
from . import DefaultTable
|
||||
from . import otTables
|
||||
import struct
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table__a_v_a_r(BaseTTXConverter):
|
||||
"""Axis Variations Table
|
||||
|
||||
This class represents the ``avar`` table of a variable font. The object has one
|
||||
substantive attribute, ``segments``, which maps axis tags to a segments dictionary::
|
||||
|
||||
>>> font["avar"].segments # doctest: +SKIP
|
||||
{'wght': {-1.0: -1.0,
|
||||
0.0: 0.0,
|
||||
0.125: 0.11444091796875,
|
||||
0.25: 0.23492431640625,
|
||||
0.5: 0.35540771484375,
|
||||
0.625: 0.5,
|
||||
0.75: 0.6566162109375,
|
||||
0.875: 0.81927490234375,
|
||||
1.0: 1.0},
|
||||
'ital': {-1.0: -1.0, 0.0: 0.0, 1.0: 1.0}}
|
||||
|
||||
Notice that the segments dictionary is made up of normalized values. A valid
|
||||
``avar`` segment mapping must contain the entries ``-1.0: -1.0, 0.0: 0.0, 1.0: 1.0``.
|
||||
fontTools does not enforce this, so it is your responsibility to ensure that
|
||||
mappings are valid.
|
||||
"""
|
||||
|
||||
dependencies = ["fvar"]
|
||||
|
||||
def __init__(self, tag=None):
|
||||
super().__init__(tag)
|
||||
self.segments = {}
|
||||
|
||||
def compile(self, ttFont):
|
||||
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
|
||||
if not hasattr(self, "table"):
|
||||
self.table = otTables.avar()
|
||||
if not hasattr(self.table, "Reserved"):
|
||||
self.table.Reserved = 0
|
||||
self.table.Version = (getattr(self, "majorVersion", 1) << 16) | getattr(
|
||||
self, "minorVersion", 0
|
||||
)
|
||||
self.table.AxisCount = len(axisTags)
|
||||
self.table.AxisSegmentMap = []
|
||||
for axis in axisTags:
|
||||
mappings = self.segments[axis]
|
||||
segmentMap = otTables.AxisSegmentMap()
|
||||
segmentMap.PositionMapCount = len(mappings)
|
||||
segmentMap.AxisValueMap = []
|
||||
for key, value in sorted(mappings.items()):
|
||||
valueMap = otTables.AxisValueMap()
|
||||
valueMap.FromCoordinate = key
|
||||
valueMap.ToCoordinate = value
|
||||
segmentMap.AxisValueMap.append(valueMap)
|
||||
self.table.AxisSegmentMap.append(segmentMap)
|
||||
return super().compile(ttFont)
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
super().decompile(data, ttFont)
|
||||
self.majorVersion = self.table.Version >> 16
|
||||
self.minorVersion = self.table.Version & 0xFFFF
|
||||
if self.majorVersion not in (1, 2):
|
||||
raise NotImplementedError("Unknown avar table version")
|
||||
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
|
||||
for axis in axisTags:
|
||||
self.segments[axis] = {}
|
||||
for axis, segmentMap in zip(axisTags, self.table.AxisSegmentMap):
|
||||
segments = self.segments[axis] = {}
|
||||
for segment in segmentMap.AxisValueMap:
|
||||
segments[segment.FromCoordinate] = segment.ToCoordinate
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag(
|
||||
"version",
|
||||
major=getattr(self, "majorVersion", 1),
|
||||
minor=getattr(self, "minorVersion", 0),
|
||||
)
|
||||
writer.newline()
|
||||
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
|
||||
for axis in axisTags:
|
||||
writer.begintag("segment", axis=axis)
|
||||
writer.newline()
|
||||
for key, value in sorted(self.segments[axis].items()):
|
||||
key = fl2str(key, 14)
|
||||
value = fl2str(value, 14)
|
||||
writer.simpletag("mapping", **{"from": key, "to": value})
|
||||
writer.newline()
|
||||
writer.endtag("segment")
|
||||
writer.newline()
|
||||
if getattr(self, "majorVersion", 1) >= 2:
|
||||
if self.table.VarIdxMap:
|
||||
self.table.VarIdxMap.toXML(writer, ttFont, name="VarIdxMap")
|
||||
if self.table.VarStore:
|
||||
self.table.VarStore.toXML(writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if not hasattr(self, "table"):
|
||||
self.table = otTables.avar()
|
||||
if not hasattr(self.table, "Reserved"):
|
||||
self.table.Reserved = 0
|
||||
if name == "version":
|
||||
self.majorVersion = safeEval(attrs["major"])
|
||||
self.minorVersion = safeEval(attrs["minor"])
|
||||
self.table.Version = (getattr(self, "majorVersion", 1) << 16) | getattr(
|
||||
self, "minorVersion", 0
|
||||
)
|
||||
elif name == "segment":
|
||||
axis = attrs["axis"]
|
||||
segment = self.segments[axis] = {}
|
||||
for element in content:
|
||||
if isinstance(element, tuple):
|
||||
elementName, elementAttrs, _ = element
|
||||
if elementName == "mapping":
|
||||
fromValue = str2fl(elementAttrs["from"], 14)
|
||||
toValue = str2fl(elementAttrs["to"], 14)
|
||||
if fromValue in segment:
|
||||
log.warning(
|
||||
"duplicate entry for %s in axis '%s'", fromValue, axis
|
||||
)
|
||||
segment[fromValue] = toValue
|
||||
else:
|
||||
super().fromXML(name, attrs, content, ttFont)
|
||||
|
||||
def renormalizeLocation(self, location, font):
|
||||
|
||||
majorVersion = getattr(self, "majorVersion", 1)
|
||||
|
||||
if majorVersion not in (1, 2):
|
||||
raise NotImplementedError("Unknown avar table version")
|
||||
|
||||
avarSegments = self.segments
|
||||
mappedLocation = {}
|
||||
for axisTag, value in location.items():
|
||||
avarMapping = avarSegments.get(axisTag, None)
|
||||
if avarMapping is not None:
|
||||
value = piecewiseLinearMap(value, avarMapping)
|
||||
mappedLocation[axisTag] = value
|
||||
|
||||
if majorVersion < 2:
|
||||
return mappedLocation
|
||||
|
||||
# Version 2
|
||||
|
||||
varIdxMap = self.table.VarIdxMap
|
||||
varStore = self.table.VarStore
|
||||
axes = font["fvar"].axes
|
||||
if varStore is not None:
|
||||
instancer = VarStoreInstancer(varStore, axes, mappedLocation)
|
||||
|
||||
coords = list(fl2fi(mappedLocation.get(axis.axisTag, 0), 14) for axis in axes)
|
||||
|
||||
out = []
|
||||
for varIdx, v in enumerate(coords):
|
||||
|
||||
if varIdxMap is not None:
|
||||
varIdx = varIdxMap[varIdx]
|
||||
|
||||
if varStore is not None:
|
||||
delta = instancer[varIdx]
|
||||
v += otRound(delta)
|
||||
v = min(max(v, -(1 << 14)), +(1 << 14))
|
||||
|
||||
out.append(v)
|
||||
|
||||
mappedLocation = {
|
||||
axis.axisTag: fi2fl(v, 14) for v, axis in zip(out, axes) if v != 0
|
||||
}
|
||||
|
||||
return mappedLocation
|
||||
@ -0,0 +1,6 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6bsln.html
|
||||
class table__b_s_l_n(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,19 @@
|
||||
# coding: utf-8
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table__c_i_d_g(BaseTTXConverter):
|
||||
"""The AAT ``cidg`` table has almost the same structure as ``gidc``,
|
||||
just mapping CIDs to GlyphIDs instead of the reverse direction.
|
||||
|
||||
It is useful for fonts that may be used by a PDF renderer in lieu of
|
||||
a font reference with a known glyph collection but no subsetted
|
||||
glyphs. For instance, a PDF can say “please use a font conforming
|
||||
to Adobe-Japan-1”; the ``cidg`` mapping is necessary if the font is,
|
||||
say, a TrueType font. ``gidc`` is lossy for this purpose and is
|
||||
obsoleted by ``cidg``.
|
||||
|
||||
For example, the first font in ``/System/Library/Fonts/PingFang.ttc``
|
||||
(which Apple ships pre-installed on MacOS 10.12.6) has a ``cidg`` table."""
|
||||
|
||||
pass
|
||||
1576
venv/lib/python3.12/site-packages/fontTools/ttLib/tables/_c_m_a_p.py
Normal file
1576
venv/lib/python3.12/site-packages/fontTools/ttLib/tables/_c_m_a_p.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,86 @@
|
||||
from . import DefaultTable
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytesjoin
|
||||
from fontTools.ttLib.tables.TupleVariation import (
|
||||
compileTupleVariationStore,
|
||||
decompileTupleVariationStore,
|
||||
TupleVariation,
|
||||
)
|
||||
|
||||
|
||||
# https://www.microsoft.com/typography/otspec/cvar.htm
|
||||
# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6cvar.html
|
||||
|
||||
CVAR_HEADER_FORMAT = """
|
||||
> # big endian
|
||||
majorVersion: H
|
||||
minorVersion: H
|
||||
tupleVariationCount: H
|
||||
offsetToData: H
|
||||
"""
|
||||
|
||||
CVAR_HEADER_SIZE = sstruct.calcsize(CVAR_HEADER_FORMAT)
|
||||
|
||||
|
||||
class table__c_v_a_r(DefaultTable.DefaultTable):
|
||||
dependencies = ["cvt ", "fvar"]
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.majorVersion, self.minorVersion = 1, 0
|
||||
self.variations = []
|
||||
|
||||
def compile(self, ttFont, useSharedPoints=False):
|
||||
tupleVariationCount, tuples, data = compileTupleVariationStore(
|
||||
variations=[v for v in self.variations if v.hasImpact()],
|
||||
pointCount=len(ttFont["cvt "].values),
|
||||
axisTags=[axis.axisTag for axis in ttFont["fvar"].axes],
|
||||
sharedTupleIndices={},
|
||||
useSharedPoints=useSharedPoints,
|
||||
)
|
||||
header = {
|
||||
"majorVersion": self.majorVersion,
|
||||
"minorVersion": self.minorVersion,
|
||||
"tupleVariationCount": tupleVariationCount,
|
||||
"offsetToData": CVAR_HEADER_SIZE + len(tuples),
|
||||
}
|
||||
return b"".join([sstruct.pack(CVAR_HEADER_FORMAT, header), tuples, data])
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
|
||||
header = {}
|
||||
sstruct.unpack(CVAR_HEADER_FORMAT, data[0:CVAR_HEADER_SIZE], header)
|
||||
self.majorVersion = header["majorVersion"]
|
||||
self.minorVersion = header["minorVersion"]
|
||||
assert self.majorVersion == 1, self.majorVersion
|
||||
self.variations = decompileTupleVariationStore(
|
||||
tableTag=self.tableTag,
|
||||
axisTags=axisTags,
|
||||
tupleVariationCount=header["tupleVariationCount"],
|
||||
pointCount=len(ttFont["cvt "].values),
|
||||
sharedTuples=None,
|
||||
data=data,
|
||||
pos=CVAR_HEADER_SIZE,
|
||||
dataPos=header["offsetToData"],
|
||||
)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
self.majorVersion = int(attrs.get("major", "1"))
|
||||
self.minorVersion = int(attrs.get("minor", "0"))
|
||||
elif name == "tuple":
|
||||
valueCount = len(ttFont["cvt "].values)
|
||||
var = TupleVariation({}, [None] * valueCount)
|
||||
self.variations.append(var)
|
||||
for tupleElement in content:
|
||||
if isinstance(tupleElement, tuple):
|
||||
tupleName, tupleAttrs, tupleContent = tupleElement
|
||||
var.fromXML(tupleName, tupleAttrs, tupleContent)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
|
||||
writer.simpletag("version", major=self.majorVersion, minor=self.minorVersion)
|
||||
writer.newline()
|
||||
for var in self.variations:
|
||||
var.toXML(writer, axisTags)
|
||||
@ -0,0 +1,47 @@
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
import sys
|
||||
import array
|
||||
|
||||
|
||||
class table__c_v_t(DefaultTable.DefaultTable):
|
||||
def decompile(self, data, ttFont):
|
||||
values = array.array("h")
|
||||
values.frombytes(data)
|
||||
if sys.byteorder != "big":
|
||||
values.byteswap()
|
||||
self.values = values
|
||||
|
||||
def compile(self, ttFont):
|
||||
values = self.values[:]
|
||||
if sys.byteorder != "big":
|
||||
values.byteswap()
|
||||
return values.tobytes()
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
for i in range(len(self.values)):
|
||||
value = self.values[i]
|
||||
writer.simpletag("cv", value=value, index=i)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if not hasattr(self, "values"):
|
||||
self.values = array.array("h")
|
||||
if name == "cv":
|
||||
index = safeEval(attrs["index"])
|
||||
value = safeEval(attrs["value"])
|
||||
for i in range(1 + index - len(self.values)):
|
||||
self.values.append(0)
|
||||
self.values[index] = value
|
||||
|
||||
def __len__(self):
|
||||
return len(self.values)
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self.values[index]
|
||||
|
||||
def __setitem__(self, index, value):
|
||||
self.values[index] = value
|
||||
|
||||
def __delitem__(self, index):
|
||||
del self.values[index]
|
||||
@ -0,0 +1,12 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table__f_e_a_t(BaseTTXConverter):
|
||||
"""The feature name table is an AAT (Apple Advanced Typography) table for
|
||||
storing font features, settings, and their human-readable names. It should
|
||||
not be confused with the ``Feat`` table or the OpenType Layout ``GSUB``/``GPOS``
|
||||
tables. See `Feature Name Table <https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6feat.html>`_
|
||||
in the TrueType Reference Manual for more information on the structure and
|
||||
purpose of this table."""
|
||||
|
||||
pass
|
||||
@ -0,0 +1,49 @@
|
||||
from . import DefaultTable
|
||||
from . import ttProgram
|
||||
|
||||
|
||||
class table__f_p_g_m(DefaultTable.DefaultTable):
|
||||
def decompile(self, data, ttFont):
|
||||
program = ttProgram.Program()
|
||||
program.fromBytecode(data)
|
||||
self.program = program
|
||||
|
||||
def compile(self, ttFont):
|
||||
return self.program.getBytecode()
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
self.program.toXML(writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
program = ttProgram.Program()
|
||||
program.fromXML(name, attrs, content, ttFont)
|
||||
self.program = program
|
||||
|
||||
def __bool__(self):
|
||||
"""
|
||||
>>> fpgm = table__f_p_g_m()
|
||||
>>> bool(fpgm)
|
||||
False
|
||||
>>> p = ttProgram.Program()
|
||||
>>> fpgm.program = p
|
||||
>>> bool(fpgm)
|
||||
False
|
||||
>>> bc = bytearray([0])
|
||||
>>> p.fromBytecode(bc)
|
||||
>>> bool(fpgm)
|
||||
True
|
||||
>>> p.bytecode.pop()
|
||||
0
|
||||
>>> bool(fpgm)
|
||||
False
|
||||
"""
|
||||
return hasattr(self, "program") and bool(self.program)
|
||||
|
||||
__nonzero__ = __bool__
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import doctest
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
@ -0,0 +1,253 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.fixedTools import (
|
||||
fixedToFloat as fi2fl,
|
||||
floatToFixed as fl2fi,
|
||||
floatToFixedToStr as fl2str,
|
||||
strToFixedToFloat as str2fl,
|
||||
)
|
||||
from fontTools.misc.textTools import Tag, bytesjoin, safeEval
|
||||
from fontTools.ttLib import TTLibError
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
|
||||
|
||||
# Apple's documentation of 'fvar':
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6fvar.html
|
||||
|
||||
FVAR_HEADER_FORMAT = """
|
||||
> # big endian
|
||||
version: L
|
||||
offsetToData: H
|
||||
countSizePairs: H
|
||||
axisCount: H
|
||||
axisSize: H
|
||||
instanceCount: H
|
||||
instanceSize: H
|
||||
"""
|
||||
|
||||
FVAR_AXIS_FORMAT = """
|
||||
> # big endian
|
||||
axisTag: 4s
|
||||
minValue: 16.16F
|
||||
defaultValue: 16.16F
|
||||
maxValue: 16.16F
|
||||
flags: H
|
||||
axisNameID: H
|
||||
"""
|
||||
|
||||
FVAR_INSTANCE_FORMAT = """
|
||||
> # big endian
|
||||
subfamilyNameID: H
|
||||
flags: H
|
||||
"""
|
||||
|
||||
|
||||
class table__f_v_a_r(DefaultTable.DefaultTable):
|
||||
dependencies = ["name"]
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.axes = []
|
||||
self.instances = []
|
||||
|
||||
def compile(self, ttFont):
|
||||
instanceSize = sstruct.calcsize(FVAR_INSTANCE_FORMAT) + (len(self.axes) * 4)
|
||||
includePostScriptNames = any(
|
||||
instance.postscriptNameID != 0xFFFF for instance in self.instances
|
||||
)
|
||||
if includePostScriptNames:
|
||||
instanceSize += 2
|
||||
header = {
|
||||
"version": 0x00010000,
|
||||
"offsetToData": sstruct.calcsize(FVAR_HEADER_FORMAT),
|
||||
"countSizePairs": 2,
|
||||
"axisCount": len(self.axes),
|
||||
"axisSize": sstruct.calcsize(FVAR_AXIS_FORMAT),
|
||||
"instanceCount": len(self.instances),
|
||||
"instanceSize": instanceSize,
|
||||
}
|
||||
result = [sstruct.pack(FVAR_HEADER_FORMAT, header)]
|
||||
result.extend([axis.compile() for axis in self.axes])
|
||||
axisTags = [axis.axisTag for axis in self.axes]
|
||||
for instance in self.instances:
|
||||
result.append(instance.compile(axisTags, includePostScriptNames))
|
||||
return bytesjoin(result)
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
header = {}
|
||||
headerSize = sstruct.calcsize(FVAR_HEADER_FORMAT)
|
||||
header = sstruct.unpack(FVAR_HEADER_FORMAT, data[0:headerSize])
|
||||
if header["version"] != 0x00010000:
|
||||
raise TTLibError("unsupported 'fvar' version %04x" % header["version"])
|
||||
pos = header["offsetToData"]
|
||||
axisSize = header["axisSize"]
|
||||
for _ in range(header["axisCount"]):
|
||||
axis = Axis()
|
||||
axis.decompile(data[pos : pos + axisSize])
|
||||
self.axes.append(axis)
|
||||
pos += axisSize
|
||||
instanceSize = header["instanceSize"]
|
||||
axisTags = [axis.axisTag for axis in self.axes]
|
||||
for _ in range(header["instanceCount"]):
|
||||
instance = NamedInstance()
|
||||
instance.decompile(data[pos : pos + instanceSize], axisTags)
|
||||
self.instances.append(instance)
|
||||
pos += instanceSize
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
for axis in self.axes:
|
||||
axis.toXML(writer, ttFont)
|
||||
for instance in self.instances:
|
||||
instance.toXML(writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "Axis":
|
||||
axis = Axis()
|
||||
axis.fromXML(name, attrs, content, ttFont)
|
||||
self.axes.append(axis)
|
||||
elif name == "NamedInstance":
|
||||
instance = NamedInstance()
|
||||
instance.fromXML(name, attrs, content, ttFont)
|
||||
self.instances.append(instance)
|
||||
|
||||
def getAxes(self):
|
||||
return {a.axisTag: (a.minValue, a.defaultValue, a.maxValue) for a in self.axes}
|
||||
|
||||
|
||||
class Axis(object):
|
||||
def __init__(self):
|
||||
self.axisTag = None
|
||||
self.axisNameID = 0
|
||||
self.flags = 0
|
||||
self.minValue = -1.0
|
||||
self.defaultValue = 0.0
|
||||
self.maxValue = 1.0
|
||||
|
||||
def compile(self):
|
||||
return sstruct.pack(FVAR_AXIS_FORMAT, self)
|
||||
|
||||
def decompile(self, data):
|
||||
sstruct.unpack2(FVAR_AXIS_FORMAT, data, self)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
name = (
|
||||
ttFont["name"].getDebugName(self.axisNameID) if "name" in ttFont else None
|
||||
)
|
||||
if name is not None:
|
||||
writer.newline()
|
||||
writer.comment(name)
|
||||
writer.newline()
|
||||
writer.begintag("Axis")
|
||||
writer.newline()
|
||||
for tag, value in [
|
||||
("AxisTag", self.axisTag),
|
||||
("Flags", "0x%X" % self.flags),
|
||||
("MinValue", fl2str(self.minValue, 16)),
|
||||
("DefaultValue", fl2str(self.defaultValue, 16)),
|
||||
("MaxValue", fl2str(self.maxValue, 16)),
|
||||
("AxisNameID", str(self.axisNameID)),
|
||||
]:
|
||||
writer.begintag(tag)
|
||||
writer.write(value)
|
||||
writer.endtag(tag)
|
||||
writer.newline()
|
||||
writer.endtag("Axis")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, _attrs, content, ttFont):
|
||||
assert name == "Axis"
|
||||
for tag, _, value in filter(lambda t: type(t) is tuple, content):
|
||||
value = "".join(value)
|
||||
if tag == "AxisTag":
|
||||
self.axisTag = Tag(value)
|
||||
elif tag in {"Flags", "MinValue", "DefaultValue", "MaxValue", "AxisNameID"}:
|
||||
setattr(
|
||||
self,
|
||||
tag[0].lower() + tag[1:],
|
||||
str2fl(value, 16) if tag.endswith("Value") else safeEval(value),
|
||||
)
|
||||
|
||||
|
||||
class NamedInstance(object):
|
||||
def __init__(self):
|
||||
self.subfamilyNameID = 0
|
||||
self.postscriptNameID = 0xFFFF
|
||||
self.flags = 0
|
||||
self.coordinates = {}
|
||||
|
||||
def compile(self, axisTags, includePostScriptName):
|
||||
result = [sstruct.pack(FVAR_INSTANCE_FORMAT, self)]
|
||||
for axis in axisTags:
|
||||
fixedCoord = fl2fi(self.coordinates[axis], 16)
|
||||
result.append(struct.pack(">l", fixedCoord))
|
||||
if includePostScriptName:
|
||||
result.append(struct.pack(">H", self.postscriptNameID))
|
||||
return bytesjoin(result)
|
||||
|
||||
def decompile(self, data, axisTags):
|
||||
sstruct.unpack2(FVAR_INSTANCE_FORMAT, data, self)
|
||||
pos = sstruct.calcsize(FVAR_INSTANCE_FORMAT)
|
||||
for axis in axisTags:
|
||||
value = struct.unpack(">l", data[pos : pos + 4])[0]
|
||||
self.coordinates[axis] = fi2fl(value, 16)
|
||||
pos += 4
|
||||
if pos + 2 <= len(data):
|
||||
self.postscriptNameID = struct.unpack(">H", data[pos : pos + 2])[0]
|
||||
else:
|
||||
self.postscriptNameID = 0xFFFF
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
name = (
|
||||
ttFont["name"].getDebugName(self.subfamilyNameID)
|
||||
if "name" in ttFont
|
||||
else None
|
||||
)
|
||||
if name is not None:
|
||||
writer.newline()
|
||||
writer.comment(name)
|
||||
writer.newline()
|
||||
psname = (
|
||||
ttFont["name"].getDebugName(self.postscriptNameID)
|
||||
if "name" in ttFont
|
||||
else None
|
||||
)
|
||||
if psname is not None:
|
||||
writer.comment("PostScript: " + psname)
|
||||
writer.newline()
|
||||
if self.postscriptNameID == 0xFFFF:
|
||||
writer.begintag(
|
||||
"NamedInstance",
|
||||
flags=("0x%X" % self.flags),
|
||||
subfamilyNameID=self.subfamilyNameID,
|
||||
)
|
||||
else:
|
||||
writer.begintag(
|
||||
"NamedInstance",
|
||||
flags=("0x%X" % self.flags),
|
||||
subfamilyNameID=self.subfamilyNameID,
|
||||
postscriptNameID=self.postscriptNameID,
|
||||
)
|
||||
writer.newline()
|
||||
for axis in ttFont["fvar"].axes:
|
||||
writer.simpletag(
|
||||
"coord",
|
||||
axis=axis.axisTag,
|
||||
value=fl2str(self.coordinates[axis.axisTag], 16),
|
||||
)
|
||||
writer.newline()
|
||||
writer.endtag("NamedInstance")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
assert name == "NamedInstance"
|
||||
self.subfamilyNameID = safeEval(attrs["subfamilyNameID"])
|
||||
self.flags = safeEval(attrs.get("flags", "0"))
|
||||
if "postscriptNameID" in attrs:
|
||||
self.postscriptNameID = safeEval(attrs["postscriptNameID"])
|
||||
else:
|
||||
self.postscriptNameID = 0xFFFF
|
||||
|
||||
for tag, elementAttrs, _ in filter(lambda t: type(t) is tuple, content):
|
||||
if tag == "coord":
|
||||
value = str2fl(elementAttrs["value"], 16)
|
||||
self.coordinates[elementAttrs["axis"]] = value
|
||||
@ -0,0 +1,55 @@
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
|
||||
|
||||
GASP_SYMMETRIC_GRIDFIT = 0x0004
|
||||
GASP_SYMMETRIC_SMOOTHING = 0x0008
|
||||
GASP_DOGRAY = 0x0002
|
||||
GASP_GRIDFIT = 0x0001
|
||||
|
||||
|
||||
class table__g_a_s_p(DefaultTable.DefaultTable):
|
||||
def decompile(self, data, ttFont):
|
||||
self.version, numRanges = struct.unpack(">HH", data[:4])
|
||||
assert 0 <= self.version <= 1, "unknown 'gasp' format: %s" % self.version
|
||||
data = data[4:]
|
||||
self.gaspRange = {}
|
||||
for i in range(numRanges):
|
||||
rangeMaxPPEM, rangeGaspBehavior = struct.unpack(">HH", data[:4])
|
||||
self.gaspRange[int(rangeMaxPPEM)] = int(rangeGaspBehavior)
|
||||
data = data[4:]
|
||||
assert not data, "too much data"
|
||||
|
||||
def compile(self, ttFont):
|
||||
version = 0 # ignore self.version
|
||||
numRanges = len(self.gaspRange)
|
||||
data = b""
|
||||
items = sorted(self.gaspRange.items())
|
||||
for rangeMaxPPEM, rangeGaspBehavior in items:
|
||||
data = data + struct.pack(">HH", rangeMaxPPEM, rangeGaspBehavior)
|
||||
if rangeGaspBehavior & ~(GASP_GRIDFIT | GASP_DOGRAY):
|
||||
version = 1
|
||||
data = struct.pack(">HH", version, numRanges) + data
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
items = sorted(self.gaspRange.items())
|
||||
for rangeMaxPPEM, rangeGaspBehavior in items:
|
||||
writer.simpletag(
|
||||
"gaspRange",
|
||||
[
|
||||
("rangeMaxPPEM", rangeMaxPPEM),
|
||||
("rangeGaspBehavior", rangeGaspBehavior),
|
||||
],
|
||||
)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name != "gaspRange":
|
||||
return
|
||||
if not hasattr(self, "gaspRange"):
|
||||
self.gaspRange = {}
|
||||
self.gaspRange[safeEval(attrs["rangeMaxPPEM"])] = safeEval(
|
||||
attrs["rangeGaspBehavior"]
|
||||
)
|
||||
@ -0,0 +1,6 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gcid.html
|
||||
class table__g_c_i_d(BaseTTXConverter):
|
||||
pass
|
||||
2287
venv/lib/python3.12/site-packages/fontTools/ttLib/tables/_g_l_y_f.py
Normal file
2287
venv/lib/python3.12/site-packages/fontTools/ttLib/tables/_g_l_y_f.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,284 @@
|
||||
from collections import deque
|
||||
from functools import partial
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from fontTools.misc.lazyTools import LazyDict
|
||||
from . import DefaultTable
|
||||
import array
|
||||
import itertools
|
||||
import logging
|
||||
import struct
|
||||
import sys
|
||||
import fontTools.ttLib.tables.TupleVariation as tv
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
TupleVariation = tv.TupleVariation
|
||||
|
||||
|
||||
# https://www.microsoft.com/typography/otspec/gvar.htm
|
||||
# https://www.microsoft.com/typography/otspec/otvarcommonformats.htm
|
||||
#
|
||||
# Apple's documentation of 'gvar':
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6gvar.html
|
||||
#
|
||||
# FreeType2 source code for parsing 'gvar':
|
||||
# http://git.savannah.gnu.org/cgit/freetype/freetype2.git/tree/src/truetype/ttgxvar.c
|
||||
|
||||
GVAR_HEADER_FORMAT = """
|
||||
> # big endian
|
||||
version: H
|
||||
reserved: H
|
||||
axisCount: H
|
||||
sharedTupleCount: H
|
||||
offsetToSharedTuples: I
|
||||
glyphCount: H
|
||||
flags: H
|
||||
offsetToGlyphVariationData: I
|
||||
"""
|
||||
|
||||
GVAR_HEADER_SIZE = sstruct.calcsize(GVAR_HEADER_FORMAT)
|
||||
|
||||
|
||||
class table__g_v_a_r(DefaultTable.DefaultTable):
|
||||
dependencies = ["fvar", "glyf"]
|
||||
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.version, self.reserved = 1, 0
|
||||
self.variations = {}
|
||||
|
||||
def compile(self, ttFont):
|
||||
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
|
||||
sharedTuples = tv.compileSharedTuples(
|
||||
axisTags, itertools.chain(*self.variations.values())
|
||||
)
|
||||
sharedTupleIndices = {coord: i for i, coord in enumerate(sharedTuples)}
|
||||
sharedTupleSize = sum([len(c) for c in sharedTuples])
|
||||
compiledGlyphs = self.compileGlyphs_(ttFont, axisTags, sharedTupleIndices)
|
||||
offset = 0
|
||||
offsets = []
|
||||
for glyph in compiledGlyphs:
|
||||
offsets.append(offset)
|
||||
offset += len(glyph)
|
||||
offsets.append(offset)
|
||||
compiledOffsets, tableFormat = self.compileOffsets_(offsets)
|
||||
|
||||
header = {}
|
||||
header["version"] = self.version
|
||||
header["reserved"] = self.reserved
|
||||
header["axisCount"] = len(axisTags)
|
||||
header["sharedTupleCount"] = len(sharedTuples)
|
||||
header["offsetToSharedTuples"] = GVAR_HEADER_SIZE + len(compiledOffsets)
|
||||
header["glyphCount"] = len(compiledGlyphs)
|
||||
header["flags"] = tableFormat
|
||||
header["offsetToGlyphVariationData"] = (
|
||||
header["offsetToSharedTuples"] + sharedTupleSize
|
||||
)
|
||||
compiledHeader = sstruct.pack(GVAR_HEADER_FORMAT, header)
|
||||
|
||||
result = [compiledHeader, compiledOffsets]
|
||||
result.extend(sharedTuples)
|
||||
result.extend(compiledGlyphs)
|
||||
return b"".join(result)
|
||||
|
||||
def compileGlyphs_(self, ttFont, axisTags, sharedCoordIndices):
|
||||
result = []
|
||||
glyf = ttFont["glyf"]
|
||||
optimizeSize = getattr(self, "optimizeSize", True)
|
||||
for glyphName in ttFont.getGlyphOrder():
|
||||
variations = self.variations.get(glyphName, [])
|
||||
if not variations:
|
||||
result.append(b"")
|
||||
continue
|
||||
pointCountUnused = 0 # pointCount is actually unused by compileGlyph
|
||||
result.append(
|
||||
compileGlyph_(
|
||||
variations,
|
||||
pointCountUnused,
|
||||
axisTags,
|
||||
sharedCoordIndices,
|
||||
optimizeSize=optimizeSize,
|
||||
)
|
||||
)
|
||||
return result
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
|
||||
glyphs = ttFont.getGlyphOrder()
|
||||
sstruct.unpack(GVAR_HEADER_FORMAT, data[0:GVAR_HEADER_SIZE], self)
|
||||
assert len(glyphs) == self.glyphCount
|
||||
assert len(axisTags) == self.axisCount
|
||||
sharedCoords = tv.decompileSharedTuples(
|
||||
axisTags, self.sharedTupleCount, data, self.offsetToSharedTuples
|
||||
)
|
||||
variations = {}
|
||||
offsetToData = self.offsetToGlyphVariationData
|
||||
glyf = ttFont["glyf"]
|
||||
|
||||
def get_read_item():
|
||||
reverseGlyphMap = ttFont.getReverseGlyphMap()
|
||||
tableFormat = self.flags & 1
|
||||
|
||||
def read_item(glyphName):
|
||||
gid = reverseGlyphMap[glyphName]
|
||||
offsetSize = 2 if tableFormat == 0 else 4
|
||||
startOffset = GVAR_HEADER_SIZE + offsetSize * gid
|
||||
endOffset = startOffset + offsetSize * 2
|
||||
offsets = table__g_v_a_r.decompileOffsets_(
|
||||
data[startOffset:endOffset],
|
||||
tableFormat=tableFormat,
|
||||
glyphCount=1,
|
||||
)
|
||||
gvarData = data[offsetToData + offsets[0] : offsetToData + offsets[1]]
|
||||
if not gvarData:
|
||||
return []
|
||||
glyph = glyf[glyphName]
|
||||
numPointsInGlyph = self.getNumPoints_(glyph)
|
||||
return decompileGlyph_(
|
||||
numPointsInGlyph, sharedCoords, axisTags, gvarData
|
||||
)
|
||||
|
||||
return read_item
|
||||
|
||||
read_item = get_read_item()
|
||||
l = LazyDict({glyphs[gid]: read_item for gid in range(self.glyphCount)})
|
||||
|
||||
self.variations = l
|
||||
|
||||
if ttFont.lazy is False: # Be lazy for None and True
|
||||
self.ensureDecompiled()
|
||||
|
||||
def ensureDecompiled(self, recurse=False):
|
||||
# The recurse argument is unused, but part of the signature of
|
||||
# ensureDecompiled across the library.
|
||||
# Use a zero-length deque to consume the lazy dict
|
||||
deque(self.variations.values(), maxlen=0)
|
||||
|
||||
@staticmethod
|
||||
def decompileOffsets_(data, tableFormat, glyphCount):
|
||||
if tableFormat == 0:
|
||||
# Short format: array of UInt16
|
||||
offsets = array.array("H")
|
||||
offsetsSize = (glyphCount + 1) * 2
|
||||
else:
|
||||
# Long format: array of UInt32
|
||||
offsets = array.array("I")
|
||||
offsetsSize = (glyphCount + 1) * 4
|
||||
offsets.frombytes(data[0:offsetsSize])
|
||||
if sys.byteorder != "big":
|
||||
offsets.byteswap()
|
||||
|
||||
# In the short format, offsets need to be multiplied by 2.
|
||||
# This is not documented in Apple's TrueType specification,
|
||||
# but can be inferred from the FreeType implementation, and
|
||||
# we could verify it with two sample GX fonts.
|
||||
if tableFormat == 0:
|
||||
offsets = [off * 2 for off in offsets]
|
||||
|
||||
return offsets
|
||||
|
||||
@staticmethod
|
||||
def compileOffsets_(offsets):
|
||||
"""Packs a list of offsets into a 'gvar' offset table.
|
||||
|
||||
Returns a pair (bytestring, tableFormat). Bytestring is the
|
||||
packed offset table. Format indicates whether the table
|
||||
uses short (tableFormat=0) or long (tableFormat=1) integers.
|
||||
The returned tableFormat should get packed into the flags field
|
||||
of the 'gvar' header.
|
||||
"""
|
||||
assert len(offsets) >= 2
|
||||
for i in range(1, len(offsets)):
|
||||
assert offsets[i - 1] <= offsets[i]
|
||||
if max(offsets) <= 0xFFFF * 2:
|
||||
packed = array.array("H", [n >> 1 for n in offsets])
|
||||
tableFormat = 0
|
||||
else:
|
||||
packed = array.array("I", offsets)
|
||||
tableFormat = 1
|
||||
if sys.byteorder != "big":
|
||||
packed.byteswap()
|
||||
return (packed.tobytes(), tableFormat)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("version", value=self.version)
|
||||
writer.newline()
|
||||
writer.simpletag("reserved", value=self.reserved)
|
||||
writer.newline()
|
||||
axisTags = [axis.axisTag for axis in ttFont["fvar"].axes]
|
||||
for glyphName in ttFont.getGlyphNames():
|
||||
variations = self.variations.get(glyphName)
|
||||
if not variations:
|
||||
continue
|
||||
writer.begintag("glyphVariations", glyph=glyphName)
|
||||
writer.newline()
|
||||
for gvar in variations:
|
||||
gvar.toXML(writer, axisTags)
|
||||
writer.endtag("glyphVariations")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
self.version = safeEval(attrs["value"])
|
||||
elif name == "reserved":
|
||||
self.reserved = safeEval(attrs["value"])
|
||||
elif name == "glyphVariations":
|
||||
if not hasattr(self, "variations"):
|
||||
self.variations = {}
|
||||
glyphName = attrs["glyph"]
|
||||
glyph = ttFont["glyf"][glyphName]
|
||||
numPointsInGlyph = self.getNumPoints_(glyph)
|
||||
glyphVariations = []
|
||||
for element in content:
|
||||
if isinstance(element, tuple):
|
||||
name, attrs, content = element
|
||||
if name == "tuple":
|
||||
gvar = TupleVariation({}, [None] * numPointsInGlyph)
|
||||
glyphVariations.append(gvar)
|
||||
for tupleElement in content:
|
||||
if isinstance(tupleElement, tuple):
|
||||
tupleName, tupleAttrs, tupleContent = tupleElement
|
||||
gvar.fromXML(tupleName, tupleAttrs, tupleContent)
|
||||
self.variations[glyphName] = glyphVariations
|
||||
|
||||
@staticmethod
|
||||
def getNumPoints_(glyph):
|
||||
NUM_PHANTOM_POINTS = 4
|
||||
|
||||
if glyph.isComposite():
|
||||
return len(glyph.components) + NUM_PHANTOM_POINTS
|
||||
else:
|
||||
# Empty glyphs (eg. space, nonmarkingreturn) have no "coordinates" attribute.
|
||||
return len(getattr(glyph, "coordinates", [])) + NUM_PHANTOM_POINTS
|
||||
|
||||
|
||||
def compileGlyph_(
|
||||
variations, pointCount, axisTags, sharedCoordIndices, *, optimizeSize=True
|
||||
):
|
||||
tupleVariationCount, tuples, data = tv.compileTupleVariationStore(
|
||||
variations, pointCount, axisTags, sharedCoordIndices, optimizeSize=optimizeSize
|
||||
)
|
||||
if tupleVariationCount == 0:
|
||||
return b""
|
||||
result = [struct.pack(">HH", tupleVariationCount, 4 + len(tuples)), tuples, data]
|
||||
if (len(tuples) + len(data)) % 2 != 0:
|
||||
result.append(b"\0") # padding
|
||||
return b"".join(result)
|
||||
|
||||
|
||||
def decompileGlyph_(pointCount, sharedTuples, axisTags, data):
|
||||
if len(data) < 4:
|
||||
return []
|
||||
tupleVariationCount, offsetToData = struct.unpack(">HH", data[:4])
|
||||
dataPos = offsetToData
|
||||
return tv.decompileTupleVariationStore(
|
||||
"gvar",
|
||||
axisTags,
|
||||
tupleVariationCount,
|
||||
pointCount,
|
||||
sharedTuples,
|
||||
data,
|
||||
4,
|
||||
offsetToData,
|
||||
)
|
||||
@ -0,0 +1,119 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytechr, byteord, strjoin
|
||||
from . import DefaultTable
|
||||
import array
|
||||
from collections.abc import Mapping
|
||||
|
||||
hdmxHeaderFormat = """
|
||||
> # big endian!
|
||||
version: H
|
||||
numRecords: H
|
||||
recordSize: l
|
||||
"""
|
||||
|
||||
|
||||
class _GlyphnamedList(Mapping):
|
||||
def __init__(self, reverseGlyphOrder, data):
|
||||
self._array = data
|
||||
self._map = dict(reverseGlyphOrder)
|
||||
|
||||
def __getitem__(self, k):
|
||||
return self._array[self._map[k]]
|
||||
|
||||
def __len__(self):
|
||||
return len(self._map)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._map)
|
||||
|
||||
def keys(self):
|
||||
return self._map.keys()
|
||||
|
||||
|
||||
class table__h_d_m_x(DefaultTable.DefaultTable):
|
||||
def decompile(self, data, ttFont):
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
dummy, data = sstruct.unpack2(hdmxHeaderFormat, data, self)
|
||||
self.hdmx = {}
|
||||
for i in range(self.numRecords):
|
||||
ppem = byteord(data[0])
|
||||
maxSize = byteord(data[1])
|
||||
widths = _GlyphnamedList(
|
||||
ttFont.getReverseGlyphMap(), array.array("B", data[2 : 2 + numGlyphs])
|
||||
)
|
||||
self.hdmx[ppem] = widths
|
||||
data = data[self.recordSize :]
|
||||
assert len(data) == 0, "too much hdmx data"
|
||||
|
||||
def compile(self, ttFont):
|
||||
self.version = 0
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
self.recordSize = 4 * ((2 + numGlyphs + 3) // 4)
|
||||
pad = (self.recordSize - 2 - numGlyphs) * b"\0"
|
||||
self.numRecords = len(self.hdmx)
|
||||
data = sstruct.pack(hdmxHeaderFormat, self)
|
||||
items = sorted(self.hdmx.items())
|
||||
for ppem, widths in items:
|
||||
data = data + bytechr(ppem) + bytechr(max(widths.values()))
|
||||
for glyphID in range(len(glyphOrder)):
|
||||
width = widths[glyphOrder[glyphID]]
|
||||
data = data + bytechr(width)
|
||||
data = data + pad
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag("hdmxData")
|
||||
writer.newline()
|
||||
ppems = sorted(self.hdmx.keys())
|
||||
records = []
|
||||
format = ""
|
||||
for ppem in ppems:
|
||||
widths = self.hdmx[ppem]
|
||||
records.append(widths)
|
||||
format = format + "%4d"
|
||||
glyphNames = ttFont.getGlyphOrder()[:]
|
||||
glyphNames.sort()
|
||||
maxNameLen = max(map(len, glyphNames))
|
||||
format = "%" + repr(maxNameLen) + "s:" + format + " ;"
|
||||
writer.write(format % (("ppem",) + tuple(ppems)))
|
||||
writer.newline()
|
||||
writer.newline()
|
||||
for glyphName in glyphNames:
|
||||
row = []
|
||||
for ppem in ppems:
|
||||
widths = self.hdmx[ppem]
|
||||
row.append(widths[glyphName])
|
||||
if ";" in glyphName:
|
||||
glyphName = "\\x3b".join(glyphName.split(";"))
|
||||
writer.write(format % ((glyphName,) + tuple(row)))
|
||||
writer.newline()
|
||||
writer.endtag("hdmxData")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name != "hdmxData":
|
||||
return
|
||||
content = strjoin(content)
|
||||
lines = content.split(";")
|
||||
topRow = lines[0].split()
|
||||
assert topRow[0] == "ppem:", "illegal hdmx format"
|
||||
ppems = list(map(int, topRow[1:]))
|
||||
self.hdmx = hdmx = {}
|
||||
for ppem in ppems:
|
||||
hdmx[ppem] = {}
|
||||
lines = (line.split() for line in lines[1:])
|
||||
for line in lines:
|
||||
if not line:
|
||||
continue
|
||||
assert line[0][-1] == ":", "illegal hdmx format"
|
||||
glyphName = line[0][:-1]
|
||||
if "\\" in glyphName:
|
||||
from fontTools.misc.textTools import safeEval
|
||||
|
||||
glyphName = safeEval('"""' + glyphName + '"""')
|
||||
line = list(map(int, line[1:]))
|
||||
assert len(line) == len(ppems), "illegal hdmx format"
|
||||
for i in range(len(ppems)):
|
||||
hdmx[ppems[i]][glyphName] = line[i]
|
||||
@ -0,0 +1,123 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.fixedTools import floatToFixedToStr, strToFixedToFloat
|
||||
from fontTools.misc.textTools import safeEval, num2binary, binary2num
|
||||
from fontTools.misc.timeTools import (
|
||||
timestampFromString,
|
||||
timestampToString,
|
||||
timestampNow,
|
||||
)
|
||||
from fontTools.misc.timeTools import epoch_diff as mac_epoch_diff # For backward compat
|
||||
from fontTools.misc.arrayTools import intRect, unionRect
|
||||
from . import DefaultTable
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
headFormat = """
|
||||
> # big endian
|
||||
tableVersion: 16.16F
|
||||
fontRevision: 16.16F
|
||||
checkSumAdjustment: I
|
||||
magicNumber: I
|
||||
flags: H
|
||||
unitsPerEm: H
|
||||
created: Q
|
||||
modified: Q
|
||||
xMin: h
|
||||
yMin: h
|
||||
xMax: h
|
||||
yMax: h
|
||||
macStyle: H
|
||||
lowestRecPPEM: H
|
||||
fontDirectionHint: h
|
||||
indexToLocFormat: h
|
||||
glyphDataFormat: h
|
||||
"""
|
||||
|
||||
|
||||
class table__h_e_a_d(DefaultTable.DefaultTable):
|
||||
dependencies = ["maxp", "loca", "CFF ", "CFF2"]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, rest = sstruct.unpack2(headFormat, data, self)
|
||||
if rest:
|
||||
# this is quite illegal, but there seem to be fonts out there that do this
|
||||
log.warning("extra bytes at the end of 'head' table")
|
||||
assert rest == b"\0\0"
|
||||
|
||||
# For timestamp fields, ignore the top four bytes. Some fonts have
|
||||
# bogus values there. Since till 2038 those bytes only can be zero,
|
||||
# ignore them.
|
||||
#
|
||||
# https://github.com/fonttools/fonttools/issues/99#issuecomment-66776810
|
||||
for stamp in "created", "modified":
|
||||
value = getattr(self, stamp)
|
||||
if value > 0xFFFFFFFF:
|
||||
log.warning("'%s' timestamp out of range; ignoring top bytes", stamp)
|
||||
value &= 0xFFFFFFFF
|
||||
setattr(self, stamp, value)
|
||||
if value < 0x7C259DC0: # January 1, 1970 00:00:00
|
||||
log.warning(
|
||||
"'%s' timestamp seems very low; regarding as unix timestamp", stamp
|
||||
)
|
||||
value += 0x7C259DC0
|
||||
setattr(self, stamp, value)
|
||||
|
||||
def compile(self, ttFont):
|
||||
if ttFont.recalcBBoxes:
|
||||
# For TT-flavored fonts, xMin, yMin, xMax and yMax are set in table__m_a_x_p.recalc().
|
||||
if "CFF " in ttFont:
|
||||
topDict = ttFont["CFF "].cff.topDictIndex[0]
|
||||
self.xMin, self.yMin, self.xMax, self.yMax = intRect(topDict.FontBBox)
|
||||
elif "CFF2" in ttFont:
|
||||
topDict = ttFont["CFF2"].cff.topDictIndex[0]
|
||||
charStrings = topDict.CharStrings
|
||||
fontBBox = None
|
||||
for charString in charStrings.values():
|
||||
bounds = charString.calcBounds(charStrings)
|
||||
if bounds is not None:
|
||||
if fontBBox is not None:
|
||||
fontBBox = unionRect(fontBBox, bounds)
|
||||
else:
|
||||
fontBBox = bounds
|
||||
if fontBBox is not None:
|
||||
self.xMin, self.yMin, self.xMax, self.yMax = intRect(fontBBox)
|
||||
if ttFont.recalcTimestamp:
|
||||
self.modified = timestampNow()
|
||||
data = sstruct.pack(headFormat, self)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment("Most of this table will be recalculated by the compiler")
|
||||
writer.newline()
|
||||
_, names, fixes = sstruct.getformat(headFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
if name in fixes:
|
||||
value = floatToFixedToStr(value, precisionBits=fixes[name])
|
||||
elif name in ("created", "modified"):
|
||||
value = timestampToString(value)
|
||||
elif name in ("magicNumber", "checkSumAdjustment"):
|
||||
if value < 0:
|
||||
value = value + 0x100000000
|
||||
value = hex(value)
|
||||
if value[-1:] == "L":
|
||||
value = value[:-1]
|
||||
elif name in ("macStyle", "flags"):
|
||||
value = num2binary(value, 16)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
value = attrs["value"]
|
||||
fixes = sstruct.getformat(headFormat)[2]
|
||||
if name in fixes:
|
||||
value = strToFixedToFloat(value, precisionBits=fixes[name])
|
||||
elif name in ("created", "modified"):
|
||||
value = timestampFromString(value)
|
||||
elif name in ("macStyle", "flags"):
|
||||
value = binary2num(value)
|
||||
else:
|
||||
value = safeEval(value)
|
||||
setattr(self, name, value)
|
||||
@ -0,0 +1,135 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from fontTools.misc.fixedTools import (
|
||||
ensureVersionIsLong as fi2ve,
|
||||
versionToFixed as ve2fi,
|
||||
)
|
||||
from . import DefaultTable
|
||||
import math
|
||||
|
||||
|
||||
hheaFormat = """
|
||||
> # big endian
|
||||
tableVersion: L
|
||||
ascent: h
|
||||
descent: h
|
||||
lineGap: h
|
||||
advanceWidthMax: H
|
||||
minLeftSideBearing: h
|
||||
minRightSideBearing: h
|
||||
xMaxExtent: h
|
||||
caretSlopeRise: h
|
||||
caretSlopeRun: h
|
||||
caretOffset: h
|
||||
reserved0: h
|
||||
reserved1: h
|
||||
reserved2: h
|
||||
reserved3: h
|
||||
metricDataFormat: h
|
||||
numberOfHMetrics: H
|
||||
"""
|
||||
|
||||
|
||||
class table__h_h_e_a(DefaultTable.DefaultTable):
|
||||
# Note: Keep in sync with table__v_h_e_a
|
||||
|
||||
dependencies = ["hmtx", "glyf", "CFF ", "CFF2"]
|
||||
|
||||
# OpenType spec renamed these, add aliases for compatibility
|
||||
@property
|
||||
def ascender(self):
|
||||
return self.ascent
|
||||
|
||||
@ascender.setter
|
||||
def ascender(self, value):
|
||||
self.ascent = value
|
||||
|
||||
@property
|
||||
def descender(self):
|
||||
return self.descent
|
||||
|
||||
@descender.setter
|
||||
def descender(self, value):
|
||||
self.descent = value
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
sstruct.unpack(hheaFormat, data, self)
|
||||
|
||||
def compile(self, ttFont):
|
||||
if ttFont.recalcBBoxes and (
|
||||
ttFont.isLoaded("glyf")
|
||||
or ttFont.isLoaded("CFF ")
|
||||
or ttFont.isLoaded("CFF2")
|
||||
):
|
||||
self.recalc(ttFont)
|
||||
self.tableVersion = fi2ve(self.tableVersion)
|
||||
return sstruct.pack(hheaFormat, self)
|
||||
|
||||
def recalc(self, ttFont):
|
||||
if "hmtx" not in ttFont:
|
||||
return
|
||||
|
||||
hmtxTable = ttFont["hmtx"]
|
||||
self.advanceWidthMax = max(adv for adv, _ in hmtxTable.metrics.values())
|
||||
|
||||
boundsWidthDict = {}
|
||||
if "glyf" in ttFont:
|
||||
glyfTable = ttFont["glyf"]
|
||||
for name in ttFont.getGlyphOrder():
|
||||
g = glyfTable[name]
|
||||
if g.numberOfContours == 0:
|
||||
continue
|
||||
if g.numberOfContours < 0 and not hasattr(g, "xMax"):
|
||||
# Composite glyph without extents set.
|
||||
# Calculate those.
|
||||
g.recalcBounds(glyfTable)
|
||||
boundsWidthDict[name] = g.xMax - g.xMin
|
||||
elif "CFF " in ttFont or "CFF2" in ttFont:
|
||||
if "CFF " in ttFont:
|
||||
topDict = ttFont["CFF "].cff.topDictIndex[0]
|
||||
else:
|
||||
topDict = ttFont["CFF2"].cff.topDictIndex[0]
|
||||
charStrings = topDict.CharStrings
|
||||
for name in ttFont.getGlyphOrder():
|
||||
cs = charStrings[name]
|
||||
bounds = cs.calcBounds(charStrings)
|
||||
if bounds is not None:
|
||||
boundsWidthDict[name] = int(
|
||||
math.ceil(bounds[2]) - math.floor(bounds[0])
|
||||
)
|
||||
|
||||
if boundsWidthDict:
|
||||
minLeftSideBearing = float("inf")
|
||||
minRightSideBearing = float("inf")
|
||||
xMaxExtent = -float("inf")
|
||||
for name, boundsWidth in boundsWidthDict.items():
|
||||
advanceWidth, lsb = hmtxTable[name]
|
||||
rsb = advanceWidth - lsb - boundsWidth
|
||||
extent = lsb + boundsWidth
|
||||
minLeftSideBearing = min(minLeftSideBearing, lsb)
|
||||
minRightSideBearing = min(minRightSideBearing, rsb)
|
||||
xMaxExtent = max(xMaxExtent, extent)
|
||||
self.minLeftSideBearing = minLeftSideBearing
|
||||
self.minRightSideBearing = minRightSideBearing
|
||||
self.xMaxExtent = xMaxExtent
|
||||
|
||||
else: # No glyph has outlines.
|
||||
self.minLeftSideBearing = 0
|
||||
self.minRightSideBearing = 0
|
||||
self.xMaxExtent = 0
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
formatstring, names, fixes = sstruct.getformat(hheaFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
if name == "tableVersion":
|
||||
value = fi2ve(value)
|
||||
value = "0x%08x" % value
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "tableVersion":
|
||||
setattr(self, name, ve2fi(attrs["value"]))
|
||||
return
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
@ -0,0 +1,151 @@
|
||||
from fontTools.misc.roundTools import otRound
|
||||
from fontTools import ttLib
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
import sys
|
||||
import struct
|
||||
import array
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class table__h_m_t_x(DefaultTable.DefaultTable):
|
||||
headerTag = "hhea"
|
||||
advanceName = "width"
|
||||
sideBearingName = "lsb"
|
||||
numberOfMetricsName = "numberOfHMetrics"
|
||||
longMetricFormat = "Hh"
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
headerTable = ttFont.get(self.headerTag)
|
||||
if headerTable is not None:
|
||||
numberOfMetrics = int(getattr(headerTable, self.numberOfMetricsName))
|
||||
else:
|
||||
numberOfMetrics = numGlyphs
|
||||
if numberOfMetrics > numGlyphs:
|
||||
log.warning(
|
||||
"The %s.%s exceeds the maxp.numGlyphs"
|
||||
% (self.headerTag, self.numberOfMetricsName)
|
||||
)
|
||||
numberOfMetrics = numGlyphs
|
||||
if len(data) < 4 * numberOfMetrics:
|
||||
raise ttLib.TTLibError("not enough '%s' table data" % self.tableTag)
|
||||
# Note: advanceWidth is unsigned, but some font editors might
|
||||
# read/write as signed. We can't be sure whether it was a mistake
|
||||
# or not, so we read as unsigned but also issue a warning...
|
||||
metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
|
||||
metrics = struct.unpack(metricsFmt, data[: 4 * numberOfMetrics])
|
||||
data = data[4 * numberOfMetrics :]
|
||||
numberOfSideBearings = numGlyphs - numberOfMetrics
|
||||
sideBearings = array.array("h", data[: 2 * numberOfSideBearings])
|
||||
data = data[2 * numberOfSideBearings :]
|
||||
|
||||
if sys.byteorder != "big":
|
||||
sideBearings.byteswap()
|
||||
if data:
|
||||
log.warning("too much '%s' table data" % self.tableTag)
|
||||
self.metrics = {}
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
for i in range(numberOfMetrics):
|
||||
glyphName = glyphOrder[i]
|
||||
advanceWidth, lsb = metrics[i * 2 : i * 2 + 2]
|
||||
if advanceWidth > 32767:
|
||||
log.warning(
|
||||
"Glyph %r has a huge advance %s (%d); is it intentional or "
|
||||
"an (invalid) negative value?",
|
||||
glyphName,
|
||||
self.advanceName,
|
||||
advanceWidth,
|
||||
)
|
||||
self.metrics[glyphName] = (advanceWidth, lsb)
|
||||
lastAdvance = metrics[-2]
|
||||
for i in range(numberOfSideBearings):
|
||||
glyphName = glyphOrder[i + numberOfMetrics]
|
||||
self.metrics[glyphName] = (lastAdvance, sideBearings[i])
|
||||
|
||||
def compile(self, ttFont):
|
||||
metrics = []
|
||||
hasNegativeAdvances = False
|
||||
for glyphName in ttFont.getGlyphOrder():
|
||||
advanceWidth, sideBearing = self.metrics[glyphName]
|
||||
if advanceWidth < 0:
|
||||
log.error(
|
||||
"Glyph %r has negative advance %s" % (glyphName, self.advanceName)
|
||||
)
|
||||
hasNegativeAdvances = True
|
||||
metrics.append([advanceWidth, sideBearing])
|
||||
|
||||
headerTable = ttFont.get(self.headerTag)
|
||||
if headerTable is not None:
|
||||
lastAdvance = metrics[-1][0]
|
||||
lastIndex = len(metrics)
|
||||
while metrics[lastIndex - 2][0] == lastAdvance:
|
||||
lastIndex -= 1
|
||||
if lastIndex <= 1:
|
||||
# all advances are equal
|
||||
lastIndex = 1
|
||||
break
|
||||
additionalMetrics = metrics[lastIndex:]
|
||||
additionalMetrics = [otRound(sb) for _, sb in additionalMetrics]
|
||||
metrics = metrics[:lastIndex]
|
||||
numberOfMetrics = len(metrics)
|
||||
setattr(headerTable, self.numberOfMetricsName, numberOfMetrics)
|
||||
else:
|
||||
# no hhea/vhea, can't store numberOfMetrics; assume == numGlyphs
|
||||
numberOfMetrics = ttFont["maxp"].numGlyphs
|
||||
additionalMetrics = []
|
||||
|
||||
allMetrics = []
|
||||
for advance, sb in metrics:
|
||||
allMetrics.extend([otRound(advance), otRound(sb)])
|
||||
metricsFmt = ">" + self.longMetricFormat * numberOfMetrics
|
||||
try:
|
||||
data = struct.pack(metricsFmt, *allMetrics)
|
||||
except struct.error as e:
|
||||
if "out of range" in str(e) and hasNegativeAdvances:
|
||||
raise ttLib.TTLibError(
|
||||
"'%s' table can't contain negative advance %ss"
|
||||
% (self.tableTag, self.advanceName)
|
||||
)
|
||||
else:
|
||||
raise
|
||||
additionalMetrics = array.array("h", additionalMetrics)
|
||||
if sys.byteorder != "big":
|
||||
additionalMetrics.byteswap()
|
||||
data = data + additionalMetrics.tobytes()
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
names = sorted(self.metrics.keys())
|
||||
for glyphName in names:
|
||||
advance, sb = self.metrics[glyphName]
|
||||
writer.simpletag(
|
||||
"mtx",
|
||||
[
|
||||
("name", glyphName),
|
||||
(self.advanceName, advance),
|
||||
(self.sideBearingName, sb),
|
||||
],
|
||||
)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if not hasattr(self, "metrics"):
|
||||
self.metrics = {}
|
||||
if name == "mtx":
|
||||
self.metrics[attrs["name"]] = (
|
||||
safeEval(attrs[self.advanceName]),
|
||||
safeEval(attrs[self.sideBearingName]),
|
||||
)
|
||||
|
||||
def __delitem__(self, glyphName):
|
||||
del self.metrics[glyphName]
|
||||
|
||||
def __getitem__(self, glyphName):
|
||||
return self.metrics[glyphName]
|
||||
|
||||
def __setitem__(self, glyphName, advance_sb_pair):
|
||||
self.metrics[glyphName] = tuple(advance_sb_pair)
|
||||
@ -0,0 +1,278 @@
|
||||
from fontTools.ttLib import getSearchRange
|
||||
from fontTools.misc.textTools import safeEval, readHex
|
||||
from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
import sys
|
||||
import array
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class table__k_e_r_n(DefaultTable.DefaultTable):
|
||||
def getkern(self, format):
|
||||
for subtable in self.kernTables:
|
||||
if subtable.format == format:
|
||||
return subtable
|
||||
return None # not found
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
version, nTables = struct.unpack(">HH", data[:4])
|
||||
apple = False
|
||||
if (len(data) >= 8) and (version == 1):
|
||||
# AAT Apple's "new" format. Hm.
|
||||
version, nTables = struct.unpack(">LL", data[:8])
|
||||
self.version = fi2fl(version, 16)
|
||||
data = data[8:]
|
||||
apple = True
|
||||
else:
|
||||
self.version = version
|
||||
data = data[4:]
|
||||
self.kernTables = []
|
||||
for i in range(nTables):
|
||||
if self.version == 1.0:
|
||||
# Apple
|
||||
length, coverage, subtableFormat = struct.unpack(">LBB", data[:6])
|
||||
else:
|
||||
# in OpenType spec the "version" field refers to the common
|
||||
# subtable header; the actual subtable format is stored in
|
||||
# the 8-15 mask bits of "coverage" field.
|
||||
# This "version" is always 0 so we ignore it here
|
||||
_, length, subtableFormat, coverage = struct.unpack(">HHBB", data[:6])
|
||||
if nTables == 1 and subtableFormat == 0:
|
||||
# The "length" value is ignored since some fonts
|
||||
# (like OpenSans and Calibri) have a subtable larger than
|
||||
# its value.
|
||||
(nPairs,) = struct.unpack(">H", data[6:8])
|
||||
calculated_length = (nPairs * 6) + 14
|
||||
if length != calculated_length:
|
||||
log.warning(
|
||||
"'kern' subtable longer than defined: "
|
||||
"%d bytes instead of %d bytes" % (calculated_length, length)
|
||||
)
|
||||
length = calculated_length
|
||||
if subtableFormat not in kern_classes:
|
||||
subtable = KernTable_format_unkown(subtableFormat)
|
||||
else:
|
||||
subtable = kern_classes[subtableFormat](apple)
|
||||
subtable.decompile(data[:length], ttFont)
|
||||
self.kernTables.append(subtable)
|
||||
data = data[length:]
|
||||
|
||||
def compile(self, ttFont):
|
||||
if hasattr(self, "kernTables"):
|
||||
nTables = len(self.kernTables)
|
||||
else:
|
||||
nTables = 0
|
||||
if self.version == 1.0:
|
||||
# AAT Apple's "new" format.
|
||||
data = struct.pack(">LL", fl2fi(self.version, 16), nTables)
|
||||
else:
|
||||
data = struct.pack(">HH", self.version, nTables)
|
||||
if hasattr(self, "kernTables"):
|
||||
for subtable in self.kernTables:
|
||||
data = data + subtable.compile(ttFont)
|
||||
return data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("version", value=self.version)
|
||||
writer.newline()
|
||||
for subtable in self.kernTables:
|
||||
subtable.toXML(writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
self.version = safeEval(attrs["value"])
|
||||
return
|
||||
if name != "kernsubtable":
|
||||
return
|
||||
if not hasattr(self, "kernTables"):
|
||||
self.kernTables = []
|
||||
format = safeEval(attrs["format"])
|
||||
if format not in kern_classes:
|
||||
subtable = KernTable_format_unkown(format)
|
||||
else:
|
||||
apple = self.version == 1.0
|
||||
subtable = kern_classes[format](apple)
|
||||
self.kernTables.append(subtable)
|
||||
subtable.fromXML(name, attrs, content, ttFont)
|
||||
|
||||
|
||||
class KernTable_format_0(object):
|
||||
# 'version' is kept for backward compatibility
|
||||
version = format = 0
|
||||
|
||||
def __init__(self, apple=False):
|
||||
self.apple = apple
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
if not self.apple:
|
||||
version, length, subtableFormat, coverage = struct.unpack(">HHBB", data[:6])
|
||||
if version != 0:
|
||||
from fontTools.ttLib import TTLibError
|
||||
|
||||
raise TTLibError("unsupported kern subtable version: %d" % version)
|
||||
tupleIndex = None
|
||||
# Should we also assert length == len(data)?
|
||||
data = data[6:]
|
||||
else:
|
||||
length, coverage, subtableFormat, tupleIndex = struct.unpack(
|
||||
">LBBH", data[:8]
|
||||
)
|
||||
data = data[8:]
|
||||
assert self.format == subtableFormat, "unsupported format"
|
||||
self.coverage = coverage
|
||||
self.tupleIndex = tupleIndex
|
||||
|
||||
self.kernTable = kernTable = {}
|
||||
|
||||
nPairs, searchRange, entrySelector, rangeShift = struct.unpack(
|
||||
">HHHH", data[:8]
|
||||
)
|
||||
data = data[8:]
|
||||
|
||||
datas = array.array("H", data[: 6 * nPairs])
|
||||
if sys.byteorder != "big":
|
||||
datas.byteswap()
|
||||
it = iter(datas)
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
for k in range(nPairs):
|
||||
left, right, value = next(it), next(it), next(it)
|
||||
if value >= 32768:
|
||||
value -= 65536
|
||||
try:
|
||||
kernTable[(glyphOrder[left], glyphOrder[right])] = value
|
||||
except IndexError:
|
||||
# Slower, but will not throw an IndexError on an invalid
|
||||
# glyph id.
|
||||
kernTable[(ttFont.getGlyphName(left), ttFont.getGlyphName(right))] = (
|
||||
value
|
||||
)
|
||||
if len(data) > 6 * nPairs + 4: # Ignore up to 4 bytes excess
|
||||
log.warning(
|
||||
"excess data in 'kern' subtable: %d bytes", len(data) - 6 * nPairs
|
||||
)
|
||||
|
||||
def compile(self, ttFont):
|
||||
nPairs = min(len(self.kernTable), 0xFFFF)
|
||||
searchRange, entrySelector, rangeShift = getSearchRange(nPairs, 6)
|
||||
searchRange &= 0xFFFF
|
||||
entrySelector = min(entrySelector, 0xFFFF)
|
||||
rangeShift = min(rangeShift, 0xFFFF)
|
||||
data = struct.pack(">HHHH", nPairs, searchRange, entrySelector, rangeShift)
|
||||
|
||||
# yeehee! (I mean, turn names into indices)
|
||||
try:
|
||||
reverseOrder = ttFont.getReverseGlyphMap()
|
||||
kernTable = sorted(
|
||||
(reverseOrder[left], reverseOrder[right], value)
|
||||
for ((left, right), value) in self.kernTable.items()
|
||||
)
|
||||
except KeyError:
|
||||
# Slower, but will not throw KeyError on invalid glyph id.
|
||||
getGlyphID = ttFont.getGlyphID
|
||||
kernTable = sorted(
|
||||
(getGlyphID(left), getGlyphID(right), value)
|
||||
for ((left, right), value) in self.kernTable.items()
|
||||
)
|
||||
|
||||
for left, right, value in kernTable:
|
||||
data = data + struct.pack(">HHh", left, right, value)
|
||||
|
||||
if not self.apple:
|
||||
version = 0
|
||||
length = len(data) + 6
|
||||
if length >= 0x10000:
|
||||
log.warning(
|
||||
'"kern" subtable overflow, '
|
||||
"truncating length value while preserving pairs."
|
||||
)
|
||||
length &= 0xFFFF
|
||||
header = struct.pack(">HHBB", version, length, self.format, self.coverage)
|
||||
else:
|
||||
if self.tupleIndex is None:
|
||||
# sensible default when compiling a TTX from an old fonttools
|
||||
# or when inserting a Windows-style format 0 subtable into an
|
||||
# Apple version=1.0 kern table
|
||||
log.warning("'tupleIndex' is None; default to 0")
|
||||
self.tupleIndex = 0
|
||||
length = len(data) + 8
|
||||
header = struct.pack(
|
||||
">LBBH", length, self.coverage, self.format, self.tupleIndex
|
||||
)
|
||||
return header + data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
attrs = dict(coverage=self.coverage, format=self.format)
|
||||
if self.apple:
|
||||
if self.tupleIndex is None:
|
||||
log.warning("'tupleIndex' is None; default to 0")
|
||||
attrs["tupleIndex"] = 0
|
||||
else:
|
||||
attrs["tupleIndex"] = self.tupleIndex
|
||||
writer.begintag("kernsubtable", **attrs)
|
||||
writer.newline()
|
||||
items = sorted(self.kernTable.items())
|
||||
for (left, right), value in items:
|
||||
writer.simpletag("pair", [("l", left), ("r", right), ("v", value)])
|
||||
writer.newline()
|
||||
writer.endtag("kernsubtable")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
self.coverage = safeEval(attrs["coverage"])
|
||||
subtableFormat = safeEval(attrs["format"])
|
||||
if self.apple:
|
||||
if "tupleIndex" in attrs:
|
||||
self.tupleIndex = safeEval(attrs["tupleIndex"])
|
||||
else:
|
||||
# previous fontTools versions didn't export tupleIndex
|
||||
log.warning("Apple kern subtable is missing 'tupleIndex' attribute")
|
||||
self.tupleIndex = None
|
||||
else:
|
||||
self.tupleIndex = None
|
||||
assert subtableFormat == self.format, "unsupported format"
|
||||
if not hasattr(self, "kernTable"):
|
||||
self.kernTable = {}
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
self.kernTable[(attrs["l"], attrs["r"])] = safeEval(attrs["v"])
|
||||
|
||||
def __getitem__(self, pair):
|
||||
return self.kernTable[pair]
|
||||
|
||||
def __setitem__(self, pair, value):
|
||||
self.kernTable[pair] = value
|
||||
|
||||
def __delitem__(self, pair):
|
||||
del self.kernTable[pair]
|
||||
|
||||
|
||||
class KernTable_format_unkown(object):
|
||||
def __init__(self, format):
|
||||
self.format = format
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
self.data = data
|
||||
|
||||
def compile(self, ttFont):
|
||||
return self.data
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.begintag("kernsubtable", format=self.format)
|
||||
writer.newline()
|
||||
writer.comment("unknown 'kern' subtable format")
|
||||
writer.newline()
|
||||
writer.dumphex(self.data)
|
||||
writer.endtag("kernsubtable")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
self.decompile(readHex(content), ttFont)
|
||||
|
||||
|
||||
kern_classes = {0: KernTable_format_0}
|
||||
@ -0,0 +1,5 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
class table__l_c_a_r(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,62 @@
|
||||
from . import DefaultTable
|
||||
import sys
|
||||
import array
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class table__l_o_c_a(DefaultTable.DefaultTable):
|
||||
dependencies = ["glyf"]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
longFormat = ttFont["head"].indexToLocFormat
|
||||
if longFormat:
|
||||
format = "I"
|
||||
else:
|
||||
format = "H"
|
||||
locations = array.array(format)
|
||||
locations.frombytes(data)
|
||||
if sys.byteorder != "big":
|
||||
locations.byteswap()
|
||||
if not longFormat:
|
||||
locations = array.array("I", (2 * l for l in locations))
|
||||
if len(locations) < (ttFont["maxp"].numGlyphs + 1):
|
||||
log.warning(
|
||||
"corrupt 'loca' table, or wrong numGlyphs in 'maxp': %d %d",
|
||||
len(locations) - 1,
|
||||
ttFont["maxp"].numGlyphs,
|
||||
)
|
||||
self.locations = locations
|
||||
|
||||
def compile(self, ttFont):
|
||||
try:
|
||||
max_location = max(self.locations)
|
||||
except AttributeError:
|
||||
self.set([])
|
||||
max_location = 0
|
||||
if max_location < 0x20000 and all(l % 2 == 0 for l in self.locations):
|
||||
locations = array.array("H")
|
||||
for i in range(len(self.locations)):
|
||||
locations.append(self.locations[i] // 2)
|
||||
ttFont["head"].indexToLocFormat = 0
|
||||
else:
|
||||
locations = array.array("I", self.locations)
|
||||
ttFont["head"].indexToLocFormat = 1
|
||||
if sys.byteorder != "big":
|
||||
locations.byteswap()
|
||||
return locations.tobytes()
|
||||
|
||||
def set(self, locations):
|
||||
self.locations = array.array("I", locations)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.comment("The 'loca' table will be calculated by the compiler")
|
||||
writer.newline()
|
||||
|
||||
def __getitem__(self, index):
|
||||
return self.locations[index]
|
||||
|
||||
def __len__(self):
|
||||
return len(self.locations)
|
||||
@ -0,0 +1,64 @@
|
||||
from fontTools.misc.textTools import bytesjoin, tobytes, safeEval
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6ltag.html
|
||||
|
||||
|
||||
class table__l_t_a_g(DefaultTable.DefaultTable):
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.version, self.flags = 1, 0
|
||||
self.tags = []
|
||||
|
||||
def addTag(self, tag):
|
||||
"""Add 'tag' to the list of langauge tags if not already there.
|
||||
|
||||
Returns the integer index of 'tag' in the list of all tags.
|
||||
"""
|
||||
try:
|
||||
return self.tags.index(tag)
|
||||
except ValueError:
|
||||
self.tags.append(tag)
|
||||
return len(self.tags) - 1
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
self.version, self.flags, numTags = struct.unpack(">LLL", data[:12])
|
||||
assert self.version == 1
|
||||
self.tags = []
|
||||
for i in range(numTags):
|
||||
pos = 12 + i * 4
|
||||
offset, length = struct.unpack(">HH", data[pos : pos + 4])
|
||||
tag = data[offset : offset + length].decode("ascii")
|
||||
self.tags.append(tag)
|
||||
|
||||
def compile(self, ttFont):
|
||||
dataList = [struct.pack(">LLL", self.version, self.flags, len(self.tags))]
|
||||
stringPool = ""
|
||||
for tag in self.tags:
|
||||
offset = stringPool.find(tag)
|
||||
if offset < 0:
|
||||
offset = len(stringPool)
|
||||
stringPool = stringPool + tag
|
||||
offset = offset + 12 + len(self.tags) * 4
|
||||
dataList.append(struct.pack(">HH", offset, len(tag)))
|
||||
dataList.append(tobytes(stringPool))
|
||||
return bytesjoin(dataList)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("version", value=self.version)
|
||||
writer.newline()
|
||||
writer.simpletag("flags", value=self.flags)
|
||||
writer.newline()
|
||||
for tag in self.tags:
|
||||
writer.simpletag("LanguageTag", tag=tag)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if not hasattr(self, "tags"):
|
||||
self.tags = []
|
||||
if name == "LanguageTag":
|
||||
self.tags.append(attrs["tag"])
|
||||
elif "value" in attrs:
|
||||
value = safeEval(attrs["value"])
|
||||
setattr(self, name, value)
|
||||
@ -0,0 +1,139 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from . import DefaultTable
|
||||
|
||||
maxpFormat_0_5 = """
|
||||
> # big endian
|
||||
tableVersion: i
|
||||
numGlyphs: H
|
||||
"""
|
||||
|
||||
maxpFormat_1_0_add = """
|
||||
> # big endian
|
||||
maxPoints: H
|
||||
maxContours: H
|
||||
maxCompositePoints: H
|
||||
maxCompositeContours: H
|
||||
maxZones: H
|
||||
maxTwilightPoints: H
|
||||
maxStorage: H
|
||||
maxFunctionDefs: H
|
||||
maxInstructionDefs: H
|
||||
maxStackElements: H
|
||||
maxSizeOfInstructions: H
|
||||
maxComponentElements: H
|
||||
maxComponentDepth: H
|
||||
"""
|
||||
|
||||
|
||||
class table__m_a_x_p(DefaultTable.DefaultTable):
|
||||
dependencies = ["glyf"]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
dummy, data = sstruct.unpack2(maxpFormat_0_5, data, self)
|
||||
self.numGlyphs = int(self.numGlyphs)
|
||||
if self.tableVersion != 0x00005000:
|
||||
dummy, data = sstruct.unpack2(maxpFormat_1_0_add, data, self)
|
||||
assert len(data) == 0
|
||||
|
||||
def compile(self, ttFont):
|
||||
if "glyf" in ttFont:
|
||||
if ttFont.isLoaded("glyf") and ttFont.recalcBBoxes:
|
||||
self.recalc(ttFont)
|
||||
else:
|
||||
pass # CFF
|
||||
self.numGlyphs = len(ttFont.getGlyphOrder())
|
||||
if self.tableVersion != 0x00005000:
|
||||
self.tableVersion = 0x00010000
|
||||
data = sstruct.pack(maxpFormat_0_5, self)
|
||||
if self.tableVersion == 0x00010000:
|
||||
data = data + sstruct.pack(maxpFormat_1_0_add, self)
|
||||
return data
|
||||
|
||||
def recalc(self, ttFont):
|
||||
"""Recalculate the font bounding box, and most other maxp values except
|
||||
for the TT instructions values. Also recalculate the value of bit 1
|
||||
of the flags field and the font bounding box of the 'head' table.
|
||||
"""
|
||||
glyfTable = ttFont["glyf"]
|
||||
hmtxTable = ttFont["hmtx"]
|
||||
headTable = ttFont["head"]
|
||||
self.numGlyphs = len(glyfTable)
|
||||
INFINITY = 100000
|
||||
xMin = +INFINITY
|
||||
yMin = +INFINITY
|
||||
xMax = -INFINITY
|
||||
yMax = -INFINITY
|
||||
maxPoints = 0
|
||||
maxContours = 0
|
||||
maxCompositePoints = 0
|
||||
maxCompositeContours = 0
|
||||
maxComponentElements = 0
|
||||
maxComponentDepth = 0
|
||||
allXMinIsLsb = 1
|
||||
for glyphName in ttFont.getGlyphOrder():
|
||||
g = glyfTable[glyphName]
|
||||
if g.numberOfContours:
|
||||
if hmtxTable[glyphName][1] != g.xMin:
|
||||
allXMinIsLsb = 0
|
||||
xMin = min(xMin, g.xMin)
|
||||
yMin = min(yMin, g.yMin)
|
||||
xMax = max(xMax, g.xMax)
|
||||
yMax = max(yMax, g.yMax)
|
||||
if g.numberOfContours > 0:
|
||||
nPoints, nContours = g.getMaxpValues()
|
||||
maxPoints = max(maxPoints, nPoints)
|
||||
maxContours = max(maxContours, nContours)
|
||||
elif g.isComposite():
|
||||
nPoints, nContours, componentDepth = g.getCompositeMaxpValues(
|
||||
glyfTable
|
||||
)
|
||||
maxCompositePoints = max(maxCompositePoints, nPoints)
|
||||
maxCompositeContours = max(maxCompositeContours, nContours)
|
||||
maxComponentElements = max(maxComponentElements, len(g.components))
|
||||
maxComponentDepth = max(maxComponentDepth, componentDepth)
|
||||
if xMin == +INFINITY:
|
||||
headTable.xMin = 0
|
||||
headTable.yMin = 0
|
||||
headTable.xMax = 0
|
||||
headTable.yMax = 0
|
||||
else:
|
||||
headTable.xMin = xMin
|
||||
headTable.yMin = yMin
|
||||
headTable.xMax = xMax
|
||||
headTable.yMax = yMax
|
||||
self.maxPoints = maxPoints
|
||||
self.maxContours = maxContours
|
||||
self.maxCompositePoints = maxCompositePoints
|
||||
self.maxCompositeContours = maxCompositeContours
|
||||
self.maxComponentElements = maxComponentElements
|
||||
self.maxComponentDepth = maxComponentDepth
|
||||
if allXMinIsLsb:
|
||||
headTable.flags = headTable.flags | 0x2
|
||||
else:
|
||||
headTable.flags = headTable.flags & ~0x2
|
||||
|
||||
def testrepr(self):
|
||||
items = sorted(self.__dict__.items())
|
||||
print(". . . . . . . . .")
|
||||
for combo in items:
|
||||
print(" %s: %s" % combo)
|
||||
print(". . . . . . . . .")
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
if self.tableVersion != 0x00005000:
|
||||
writer.comment("Most of this table will be recalculated by the compiler")
|
||||
writer.newline()
|
||||
formatstring, names, fixes = sstruct.getformat(maxpFormat_0_5)
|
||||
if self.tableVersion != 0x00005000:
|
||||
formatstring, names_1_0, fixes = sstruct.getformat(maxpFormat_1_0_add)
|
||||
names = {**names, **names_1_0}
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
if name == "tableVersion":
|
||||
value = hex(value)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
@ -0,0 +1,104 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytesjoin, strjoin, readHex
|
||||
from fontTools.ttLib import TTLibError
|
||||
from . import DefaultTable
|
||||
|
||||
# Apple's documentation of 'meta':
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6meta.html
|
||||
|
||||
META_HEADER_FORMAT = """
|
||||
> # big endian
|
||||
version: L
|
||||
flags: L
|
||||
dataOffset: L
|
||||
numDataMaps: L
|
||||
"""
|
||||
|
||||
|
||||
DATA_MAP_FORMAT = """
|
||||
> # big endian
|
||||
tag: 4s
|
||||
dataOffset: L
|
||||
dataLength: L
|
||||
"""
|
||||
|
||||
|
||||
class table__m_e_t_a(DefaultTable.DefaultTable):
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.data = {}
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
headerSize = sstruct.calcsize(META_HEADER_FORMAT)
|
||||
header = sstruct.unpack(META_HEADER_FORMAT, data[0:headerSize])
|
||||
if header["version"] != 1:
|
||||
raise TTLibError("unsupported 'meta' version %d" % header["version"])
|
||||
dataMapSize = sstruct.calcsize(DATA_MAP_FORMAT)
|
||||
for i in range(header["numDataMaps"]):
|
||||
dataMapOffset = headerSize + i * dataMapSize
|
||||
dataMap = sstruct.unpack(
|
||||
DATA_MAP_FORMAT, data[dataMapOffset : dataMapOffset + dataMapSize]
|
||||
)
|
||||
tag = dataMap["tag"]
|
||||
offset = dataMap["dataOffset"]
|
||||
self.data[tag] = data[offset : offset + dataMap["dataLength"]]
|
||||
if tag in ["dlng", "slng"]:
|
||||
self.data[tag] = self.data[tag].decode("utf-8")
|
||||
|
||||
def compile(self, ttFont):
|
||||
keys = sorted(self.data.keys())
|
||||
headerSize = sstruct.calcsize(META_HEADER_FORMAT)
|
||||
dataOffset = headerSize + len(keys) * sstruct.calcsize(DATA_MAP_FORMAT)
|
||||
header = sstruct.pack(
|
||||
META_HEADER_FORMAT,
|
||||
{
|
||||
"version": 1,
|
||||
"flags": 0,
|
||||
"dataOffset": dataOffset,
|
||||
"numDataMaps": len(keys),
|
||||
},
|
||||
)
|
||||
dataMaps = []
|
||||
dataBlocks = []
|
||||
for tag in keys:
|
||||
if tag in ["dlng", "slng"]:
|
||||
data = self.data[tag].encode("utf-8")
|
||||
else:
|
||||
data = self.data[tag]
|
||||
dataMaps.append(
|
||||
sstruct.pack(
|
||||
DATA_MAP_FORMAT,
|
||||
{"tag": tag, "dataOffset": dataOffset, "dataLength": len(data)},
|
||||
)
|
||||
)
|
||||
dataBlocks.append(data)
|
||||
dataOffset += len(data)
|
||||
return bytesjoin([header] + dataMaps + dataBlocks)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
for tag in sorted(self.data.keys()):
|
||||
if tag in ["dlng", "slng"]:
|
||||
writer.begintag("text", tag=tag)
|
||||
writer.newline()
|
||||
writer.write(self.data[tag])
|
||||
writer.newline()
|
||||
writer.endtag("text")
|
||||
writer.newline()
|
||||
else:
|
||||
writer.begintag("hexdata", tag=tag)
|
||||
writer.newline()
|
||||
data = self.data[tag]
|
||||
if min(data) >= 0x20 and max(data) <= 0x7E:
|
||||
writer.comment("ascii: " + data.decode("ascii"))
|
||||
writer.newline()
|
||||
writer.dumphex(data)
|
||||
writer.endtag("hexdata")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "hexdata":
|
||||
self.data[attrs["tag"]] = readHex(content)
|
||||
elif name == "text" and attrs["tag"] in ["dlng", "slng"]:
|
||||
self.data[attrs["tag"]] = strjoin(content).strip()
|
||||
else:
|
||||
raise TTLibError("can't handle '%s' element" % name)
|
||||
@ -0,0 +1,6 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6mort.html
|
||||
class table__m_o_r_t(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,6 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6morx.html
|
||||
class table__m_o_r_x(BaseTTXConverter):
|
||||
pass
|
||||
1235
venv/lib/python3.12/site-packages/fontTools/ttLib/tables/_n_a_m_e.py
Normal file
1235
venv/lib/python3.12/site-packages/fontTools/ttLib/tables/_n_a_m_e.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,6 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6opbd.html
|
||||
class table__o_p_b_d(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,308 @@
|
||||
from fontTools import ttLib
|
||||
from fontTools.ttLib.standardGlyphOrder import standardGlyphOrder
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytechr, byteord, tobytes, tostr, safeEval, readHex
|
||||
from . import DefaultTable
|
||||
import sys
|
||||
import struct
|
||||
import array
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
postFormat = """
|
||||
>
|
||||
formatType: 16.16F
|
||||
italicAngle: 16.16F # italic angle in degrees
|
||||
underlinePosition: h
|
||||
underlineThickness: h
|
||||
isFixedPitch: L
|
||||
minMemType42: L # minimum memory if TrueType font is downloaded
|
||||
maxMemType42: L # maximum memory if TrueType font is downloaded
|
||||
minMemType1: L # minimum memory if Type1 font is downloaded
|
||||
maxMemType1: L # maximum memory if Type1 font is downloaded
|
||||
"""
|
||||
|
||||
postFormatSize = sstruct.calcsize(postFormat)
|
||||
|
||||
|
||||
class table__p_o_s_t(DefaultTable.DefaultTable):
|
||||
def decompile(self, data, ttFont):
|
||||
sstruct.unpack(postFormat, data[:postFormatSize], self)
|
||||
data = data[postFormatSize:]
|
||||
if self.formatType == 1.0:
|
||||
self.decode_format_1_0(data, ttFont)
|
||||
elif self.formatType == 2.0:
|
||||
self.decode_format_2_0(data, ttFont)
|
||||
elif self.formatType == 3.0:
|
||||
self.decode_format_3_0(data, ttFont)
|
||||
elif self.formatType == 4.0:
|
||||
self.decode_format_4_0(data, ttFont)
|
||||
else:
|
||||
# supported format
|
||||
raise ttLib.TTLibError(
|
||||
"'post' table format %f not supported" % self.formatType
|
||||
)
|
||||
|
||||
def compile(self, ttFont):
|
||||
data = sstruct.pack(postFormat, self)
|
||||
if self.formatType == 1.0:
|
||||
pass # we're done
|
||||
elif self.formatType == 2.0:
|
||||
data = data + self.encode_format_2_0(ttFont)
|
||||
elif self.formatType == 3.0:
|
||||
pass # we're done
|
||||
elif self.formatType == 4.0:
|
||||
data = data + self.encode_format_4_0(ttFont)
|
||||
else:
|
||||
# supported format
|
||||
raise ttLib.TTLibError(
|
||||
"'post' table format %f not supported" % self.formatType
|
||||
)
|
||||
return data
|
||||
|
||||
def getGlyphOrder(self):
|
||||
"""This function will get called by a ttLib.TTFont instance.
|
||||
Do not call this function yourself, use TTFont().getGlyphOrder()
|
||||
or its relatives instead!
|
||||
"""
|
||||
if not hasattr(self, "glyphOrder"):
|
||||
raise ttLib.TTLibError("illegal use of getGlyphOrder()")
|
||||
glyphOrder = self.glyphOrder
|
||||
del self.glyphOrder
|
||||
return glyphOrder
|
||||
|
||||
def decode_format_1_0(self, data, ttFont):
|
||||
self.glyphOrder = standardGlyphOrder[: ttFont["maxp"].numGlyphs]
|
||||
|
||||
def decode_format_2_0(self, data, ttFont):
|
||||
(numGlyphs,) = struct.unpack(">H", data[:2])
|
||||
numGlyphs = int(numGlyphs)
|
||||
if numGlyphs > ttFont["maxp"].numGlyphs:
|
||||
# Assume the numGlyphs field is bogus, so sync with maxp.
|
||||
# I've seen this in one font, and if the assumption is
|
||||
# wrong elsewhere, well, so be it: it's hard enough to
|
||||
# work around _one_ non-conforming post format...
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
data = data[2:]
|
||||
indices = array.array("H")
|
||||
indices.frombytes(data[: 2 * numGlyphs])
|
||||
if sys.byteorder != "big":
|
||||
indices.byteswap()
|
||||
data = data[2 * numGlyphs :]
|
||||
maxIndex = max(indices)
|
||||
self.extraNames = extraNames = unpackPStrings(data, maxIndex - 257)
|
||||
self.glyphOrder = glyphOrder = [""] * int(ttFont["maxp"].numGlyphs)
|
||||
for glyphID in range(numGlyphs):
|
||||
index = indices[glyphID]
|
||||
if index > 257:
|
||||
try:
|
||||
name = extraNames[index - 258]
|
||||
except IndexError:
|
||||
name = ""
|
||||
else:
|
||||
# fetch names from standard list
|
||||
name = standardGlyphOrder[index]
|
||||
glyphOrder[glyphID] = name
|
||||
self.build_psNameMapping(ttFont)
|
||||
|
||||
def build_psNameMapping(self, ttFont):
|
||||
mapping = {}
|
||||
allNames = {}
|
||||
for i in range(ttFont["maxp"].numGlyphs):
|
||||
glyphName = psName = self.glyphOrder[i]
|
||||
if glyphName == "":
|
||||
glyphName = "glyph%.5d" % i
|
||||
if glyphName in allNames:
|
||||
# make up a new glyphName that's unique
|
||||
n = allNames[glyphName]
|
||||
while (glyphName + "#" + str(n)) in allNames:
|
||||
n += 1
|
||||
allNames[glyphName] = n + 1
|
||||
glyphName = glyphName + "#" + str(n)
|
||||
|
||||
self.glyphOrder[i] = glyphName
|
||||
allNames[glyphName] = 1
|
||||
if glyphName != psName:
|
||||
mapping[glyphName] = psName
|
||||
|
||||
self.mapping = mapping
|
||||
|
||||
def decode_format_3_0(self, data, ttFont):
|
||||
# Setting self.glyphOrder to None will cause the TTFont object
|
||||
# try and construct glyph names from a Unicode cmap table.
|
||||
self.glyphOrder = None
|
||||
|
||||
def decode_format_4_0(self, data, ttFont):
|
||||
from fontTools import agl
|
||||
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
indices = array.array("H")
|
||||
indices.frombytes(data)
|
||||
if sys.byteorder != "big":
|
||||
indices.byteswap()
|
||||
# In some older fonts, the size of the post table doesn't match
|
||||
# the number of glyphs. Sometimes it's bigger, sometimes smaller.
|
||||
self.glyphOrder = glyphOrder = [""] * int(numGlyphs)
|
||||
for i in range(min(len(indices), numGlyphs)):
|
||||
if indices[i] == 0xFFFF:
|
||||
self.glyphOrder[i] = ""
|
||||
elif indices[i] in agl.UV2AGL:
|
||||
self.glyphOrder[i] = agl.UV2AGL[indices[i]]
|
||||
else:
|
||||
self.glyphOrder[i] = "uni%04X" % indices[i]
|
||||
self.build_psNameMapping(ttFont)
|
||||
|
||||
def encode_format_2_0(self, ttFont):
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
assert len(glyphOrder) == numGlyphs
|
||||
indices = array.array("H")
|
||||
extraDict = {}
|
||||
extraNames = self.extraNames = [
|
||||
n for n in self.extraNames if n not in standardGlyphOrder
|
||||
]
|
||||
for i in range(len(extraNames)):
|
||||
extraDict[extraNames[i]] = i
|
||||
for glyphID in range(numGlyphs):
|
||||
glyphName = glyphOrder[glyphID]
|
||||
if glyphName in self.mapping:
|
||||
psName = self.mapping[glyphName]
|
||||
else:
|
||||
psName = glyphName
|
||||
if psName in extraDict:
|
||||
index = 258 + extraDict[psName]
|
||||
elif psName in standardGlyphOrder:
|
||||
index = standardGlyphOrder.index(psName)
|
||||
else:
|
||||
index = 258 + len(extraNames)
|
||||
extraDict[psName] = len(extraNames)
|
||||
extraNames.append(psName)
|
||||
indices.append(index)
|
||||
if sys.byteorder != "big":
|
||||
indices.byteswap()
|
||||
return (
|
||||
struct.pack(">H", numGlyphs) + indices.tobytes() + packPStrings(extraNames)
|
||||
)
|
||||
|
||||
def encode_format_4_0(self, ttFont):
|
||||
from fontTools import agl
|
||||
|
||||
numGlyphs = ttFont["maxp"].numGlyphs
|
||||
glyphOrder = ttFont.getGlyphOrder()
|
||||
assert len(glyphOrder) == numGlyphs
|
||||
indices = array.array("H")
|
||||
for glyphID in glyphOrder:
|
||||
glyphID = glyphID.split("#")[0]
|
||||
if glyphID in agl.AGL2UV:
|
||||
indices.append(agl.AGL2UV[glyphID])
|
||||
elif len(glyphID) == 7 and glyphID[:3] == "uni":
|
||||
indices.append(int(glyphID[3:], 16))
|
||||
else:
|
||||
indices.append(0xFFFF)
|
||||
if sys.byteorder != "big":
|
||||
indices.byteswap()
|
||||
return indices.tobytes()
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
formatstring, names, fixes = sstruct.getformat(postFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
if hasattr(self, "mapping"):
|
||||
writer.begintag("psNames")
|
||||
writer.newline()
|
||||
writer.comment(
|
||||
"This file uses unique glyph names based on the information\n"
|
||||
"found in the 'post' table. Since these names might not be unique,\n"
|
||||
"we have to invent artificial names in case of clashes. In order to\n"
|
||||
"be able to retain the original information, we need a name to\n"
|
||||
"ps name mapping for those cases where they differ. That's what\n"
|
||||
"you see below.\n"
|
||||
)
|
||||
writer.newline()
|
||||
items = sorted(self.mapping.items())
|
||||
for name, psName in items:
|
||||
writer.simpletag("psName", name=name, psName=psName)
|
||||
writer.newline()
|
||||
writer.endtag("psNames")
|
||||
writer.newline()
|
||||
if hasattr(self, "extraNames"):
|
||||
writer.begintag("extraNames")
|
||||
writer.newline()
|
||||
writer.comment(
|
||||
"following are the name that are not taken from the standard Mac glyph order"
|
||||
)
|
||||
writer.newline()
|
||||
for name in self.extraNames:
|
||||
writer.simpletag("psName", name=name)
|
||||
writer.newline()
|
||||
writer.endtag("extraNames")
|
||||
writer.newline()
|
||||
if hasattr(self, "data"):
|
||||
writer.begintag("hexdata")
|
||||
writer.newline()
|
||||
writer.dumphex(self.data)
|
||||
writer.endtag("hexdata")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name not in ("psNames", "extraNames", "hexdata"):
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
elif name == "psNames":
|
||||
self.mapping = {}
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "psName":
|
||||
self.mapping[attrs["name"]] = attrs["psName"]
|
||||
elif name == "extraNames":
|
||||
self.extraNames = []
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content = element
|
||||
if name == "psName":
|
||||
self.extraNames.append(attrs["name"])
|
||||
else:
|
||||
self.data = readHex(content)
|
||||
|
||||
|
||||
def unpackPStrings(data, n):
|
||||
# extract n Pascal strings from data.
|
||||
# if there is not enough data, use ""
|
||||
|
||||
strings = []
|
||||
index = 0
|
||||
dataLen = len(data)
|
||||
|
||||
for _ in range(n):
|
||||
if dataLen <= index:
|
||||
length = 0
|
||||
else:
|
||||
length = byteord(data[index])
|
||||
index += 1
|
||||
|
||||
if dataLen <= index + length - 1:
|
||||
name = ""
|
||||
else:
|
||||
name = tostr(data[index : index + length], encoding="latin1")
|
||||
strings.append(name)
|
||||
index += length
|
||||
|
||||
if index < dataLen:
|
||||
log.warning("%d extra bytes in post.stringData array", dataLen - index)
|
||||
|
||||
elif dataLen < index:
|
||||
log.warning("not enough data in post.stringData array")
|
||||
|
||||
return strings
|
||||
|
||||
|
||||
def packPStrings(strings):
|
||||
data = b""
|
||||
for s in strings:
|
||||
data = data + bytechr(len(s)) + tobytes(s, encoding="latin1")
|
||||
return data
|
||||
@ -0,0 +1,7 @@
|
||||
from fontTools import ttLib
|
||||
|
||||
superclass = ttLib.getTableClass("fpgm")
|
||||
|
||||
|
||||
class table__p_r_e_p(superclass):
|
||||
pass
|
||||
@ -0,0 +1,6 @@
|
||||
from .otBase import BaseTTXConverter
|
||||
|
||||
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6prop.html
|
||||
class table__p_r_o_p(BaseTTXConverter):
|
||||
pass
|
||||
@ -0,0 +1,119 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval, num2binary, binary2num
|
||||
from . import DefaultTable
|
||||
from .sbixStrike import Strike
|
||||
|
||||
|
||||
sbixHeaderFormat = """
|
||||
>
|
||||
version: H # Version number (set to 1)
|
||||
flags: H # The only two bits used in the flags field are bits 0
|
||||
# and 1. For historical reasons, bit 0 must always be 1.
|
||||
# Bit 1 is a sbixDrawOutlines flag and is interpreted as
|
||||
# follows:
|
||||
# 0: Draw only 'sbix' bitmaps
|
||||
# 1: Draw both 'sbix' bitmaps and outlines, in that
|
||||
# order
|
||||
numStrikes: L # Number of bitmap strikes to follow
|
||||
"""
|
||||
sbixHeaderFormatSize = sstruct.calcsize(sbixHeaderFormat)
|
||||
|
||||
|
||||
sbixStrikeOffsetFormat = """
|
||||
>
|
||||
strikeOffset: L # Offset from begining of table to data for the
|
||||
# individual strike
|
||||
"""
|
||||
sbixStrikeOffsetFormatSize = sstruct.calcsize(sbixStrikeOffsetFormat)
|
||||
|
||||
|
||||
class table__s_b_i_x(DefaultTable.DefaultTable):
|
||||
def __init__(self, tag=None):
|
||||
DefaultTable.DefaultTable.__init__(self, tag)
|
||||
self.version = 1
|
||||
self.flags = 1
|
||||
self.numStrikes = 0
|
||||
self.strikes = {}
|
||||
self.strikeOffsets = []
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
# read table header
|
||||
sstruct.unpack(sbixHeaderFormat, data[:sbixHeaderFormatSize], self)
|
||||
# collect offsets to individual strikes in self.strikeOffsets
|
||||
for i in range(self.numStrikes):
|
||||
current_offset = sbixHeaderFormatSize + i * sbixStrikeOffsetFormatSize
|
||||
offset_entry = sbixStrikeOffset()
|
||||
sstruct.unpack(
|
||||
sbixStrikeOffsetFormat,
|
||||
data[current_offset : current_offset + sbixStrikeOffsetFormatSize],
|
||||
offset_entry,
|
||||
)
|
||||
self.strikeOffsets.append(offset_entry.strikeOffset)
|
||||
|
||||
# decompile Strikes
|
||||
for i in range(self.numStrikes - 1, -1, -1):
|
||||
current_strike = Strike(rawdata=data[self.strikeOffsets[i] :])
|
||||
data = data[: self.strikeOffsets[i]]
|
||||
current_strike.decompile(ttFont)
|
||||
# print " Strike length: %xh" % len(bitmapSetData)
|
||||
# print "Number of Glyph entries:", len(current_strike.glyphs)
|
||||
if current_strike.ppem in self.strikes:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError("Pixel 'ppem' must be unique for each Strike")
|
||||
self.strikes[current_strike.ppem] = current_strike
|
||||
|
||||
# after the glyph data records have been extracted, we don't need the offsets anymore
|
||||
del self.strikeOffsets
|
||||
del self.numStrikes
|
||||
|
||||
def compile(self, ttFont):
|
||||
sbixData = b""
|
||||
self.numStrikes = len(self.strikes)
|
||||
sbixHeader = sstruct.pack(sbixHeaderFormat, self)
|
||||
|
||||
# calculate offset to start of first strike
|
||||
setOffset = sbixHeaderFormatSize + sbixStrikeOffsetFormatSize * self.numStrikes
|
||||
|
||||
for si in sorted(self.strikes.keys()):
|
||||
current_strike = self.strikes[si]
|
||||
current_strike.compile(ttFont)
|
||||
# append offset to this strike to table header
|
||||
current_strike.strikeOffset = setOffset
|
||||
sbixHeader += sstruct.pack(sbixStrikeOffsetFormat, current_strike)
|
||||
setOffset += len(current_strike.data)
|
||||
sbixData += current_strike.data
|
||||
|
||||
return sbixHeader + sbixData
|
||||
|
||||
def toXML(self, xmlWriter, ttFont):
|
||||
xmlWriter.simpletag("version", value=self.version)
|
||||
xmlWriter.newline()
|
||||
xmlWriter.simpletag("flags", value=num2binary(self.flags, 16))
|
||||
xmlWriter.newline()
|
||||
for i in sorted(self.strikes.keys()):
|
||||
self.strikes[i].toXML(xmlWriter, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
elif name == "flags":
|
||||
setattr(self, name, binary2num(attrs["value"]))
|
||||
elif name == "strike":
|
||||
current_strike = Strike()
|
||||
for element in content:
|
||||
if isinstance(element, tuple):
|
||||
name, attrs, content = element
|
||||
current_strike.fromXML(name, attrs, content, ttFont)
|
||||
self.strikes[current_strike.ppem] = current_strike
|
||||
else:
|
||||
from fontTools import ttLib
|
||||
|
||||
raise ttLib.TTLibError("can't handle '%s' element" % name)
|
||||
|
||||
|
||||
# Helper classes
|
||||
|
||||
|
||||
class sbixStrikeOffset(object):
|
||||
pass
|
||||
@ -0,0 +1,325 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.fixedTools import (
|
||||
fixedToFloat as fi2fl,
|
||||
floatToFixed as fl2fi,
|
||||
floatToFixedToStr as fl2str,
|
||||
strToFixedToFloat as str2fl,
|
||||
)
|
||||
from fontTools.misc.textTools import bytesjoin, safeEval
|
||||
from fontTools.ttLib import TTLibError
|
||||
from . import DefaultTable
|
||||
import struct
|
||||
from collections.abc import MutableMapping
|
||||
|
||||
|
||||
# Apple's documentation of 'trak':
|
||||
# https://developer.apple.com/fonts/TrueType-Reference-Manual/RM06/Chap6trak.html
|
||||
|
||||
TRAK_HEADER_FORMAT = """
|
||||
> # big endian
|
||||
version: 16.16F
|
||||
format: H
|
||||
horizOffset: H
|
||||
vertOffset: H
|
||||
reserved: H
|
||||
"""
|
||||
|
||||
TRAK_HEADER_FORMAT_SIZE = sstruct.calcsize(TRAK_HEADER_FORMAT)
|
||||
|
||||
|
||||
TRACK_DATA_FORMAT = """
|
||||
> # big endian
|
||||
nTracks: H
|
||||
nSizes: H
|
||||
sizeTableOffset: L
|
||||
"""
|
||||
|
||||
TRACK_DATA_FORMAT_SIZE = sstruct.calcsize(TRACK_DATA_FORMAT)
|
||||
|
||||
|
||||
TRACK_TABLE_ENTRY_FORMAT = """
|
||||
> # big endian
|
||||
track: 16.16F
|
||||
nameIndex: H
|
||||
offset: H
|
||||
"""
|
||||
|
||||
TRACK_TABLE_ENTRY_FORMAT_SIZE = sstruct.calcsize(TRACK_TABLE_ENTRY_FORMAT)
|
||||
|
||||
|
||||
# size values are actually '16.16F' fixed-point values, but here I do the
|
||||
# fixedToFloat conversion manually instead of relying on sstruct
|
||||
SIZE_VALUE_FORMAT = ">l"
|
||||
SIZE_VALUE_FORMAT_SIZE = struct.calcsize(SIZE_VALUE_FORMAT)
|
||||
|
||||
# per-Size values are in 'FUnits', i.e. 16-bit signed integers
|
||||
PER_SIZE_VALUE_FORMAT = ">h"
|
||||
PER_SIZE_VALUE_FORMAT_SIZE = struct.calcsize(PER_SIZE_VALUE_FORMAT)
|
||||
|
||||
|
||||
class table__t_r_a_k(DefaultTable.DefaultTable):
|
||||
dependencies = ["name"]
|
||||
|
||||
def compile(self, ttFont):
|
||||
dataList = []
|
||||
offset = TRAK_HEADER_FORMAT_SIZE
|
||||
for direction in ("horiz", "vert"):
|
||||
trackData = getattr(self, direction + "Data", TrackData())
|
||||
offsetName = direction + "Offset"
|
||||
# set offset to 0 if None or empty
|
||||
if not trackData:
|
||||
setattr(self, offsetName, 0)
|
||||
continue
|
||||
# TrackData table format must be longword aligned
|
||||
alignedOffset = (offset + 3) & ~3
|
||||
padding, offset = b"\x00" * (alignedOffset - offset), alignedOffset
|
||||
setattr(self, offsetName, offset)
|
||||
|
||||
data = trackData.compile(offset)
|
||||
offset += len(data)
|
||||
dataList.append(padding + data)
|
||||
|
||||
self.reserved = 0
|
||||
tableData = bytesjoin([sstruct.pack(TRAK_HEADER_FORMAT, self)] + dataList)
|
||||
return tableData
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
sstruct.unpack(TRAK_HEADER_FORMAT, data[:TRAK_HEADER_FORMAT_SIZE], self)
|
||||
for direction in ("horiz", "vert"):
|
||||
trackData = TrackData()
|
||||
offset = getattr(self, direction + "Offset")
|
||||
if offset != 0:
|
||||
trackData.decompile(data, offset)
|
||||
setattr(self, direction + "Data", trackData)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
writer.simpletag("version", value=self.version)
|
||||
writer.newline()
|
||||
writer.simpletag("format", value=self.format)
|
||||
writer.newline()
|
||||
for direction in ("horiz", "vert"):
|
||||
dataName = direction + "Data"
|
||||
writer.begintag(dataName)
|
||||
writer.newline()
|
||||
trackData = getattr(self, dataName, TrackData())
|
||||
trackData.toXML(writer, ttFont)
|
||||
writer.endtag(dataName)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "version":
|
||||
self.version = safeEval(attrs["value"])
|
||||
elif name == "format":
|
||||
self.format = safeEval(attrs["value"])
|
||||
elif name in ("horizData", "vertData"):
|
||||
trackData = TrackData()
|
||||
setattr(self, name, trackData)
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, content_ = element
|
||||
trackData.fromXML(name, attrs, content_, ttFont)
|
||||
|
||||
|
||||
class TrackData(MutableMapping):
|
||||
def __init__(self, initialdata={}):
|
||||
self._map = dict(initialdata)
|
||||
|
||||
def compile(self, offset):
|
||||
nTracks = len(self)
|
||||
sizes = self.sizes()
|
||||
nSizes = len(sizes)
|
||||
|
||||
# offset to the start of the size subtable
|
||||
offset += TRACK_DATA_FORMAT_SIZE + TRACK_TABLE_ENTRY_FORMAT_SIZE * nTracks
|
||||
trackDataHeader = sstruct.pack(
|
||||
TRACK_DATA_FORMAT,
|
||||
{"nTracks": nTracks, "nSizes": nSizes, "sizeTableOffset": offset},
|
||||
)
|
||||
|
||||
entryDataList = []
|
||||
perSizeDataList = []
|
||||
# offset to per-size tracking values
|
||||
offset += SIZE_VALUE_FORMAT_SIZE * nSizes
|
||||
# sort track table entries by track value
|
||||
for track, entry in sorted(self.items()):
|
||||
assert entry.nameIndex is not None
|
||||
entry.track = track
|
||||
entry.offset = offset
|
||||
entryDataList += [sstruct.pack(TRACK_TABLE_ENTRY_FORMAT, entry)]
|
||||
# sort per-size values by size
|
||||
for size, value in sorted(entry.items()):
|
||||
perSizeDataList += [struct.pack(PER_SIZE_VALUE_FORMAT, value)]
|
||||
offset += PER_SIZE_VALUE_FORMAT_SIZE * nSizes
|
||||
# sort size values
|
||||
sizeDataList = [
|
||||
struct.pack(SIZE_VALUE_FORMAT, fl2fi(sv, 16)) for sv in sorted(sizes)
|
||||
]
|
||||
|
||||
data = bytesjoin(
|
||||
[trackDataHeader] + entryDataList + sizeDataList + perSizeDataList
|
||||
)
|
||||
return data
|
||||
|
||||
def decompile(self, data, offset):
|
||||
# initial offset is from the start of trak table to the current TrackData
|
||||
trackDataHeader = data[offset : offset + TRACK_DATA_FORMAT_SIZE]
|
||||
if len(trackDataHeader) != TRACK_DATA_FORMAT_SIZE:
|
||||
raise TTLibError("not enough data to decompile TrackData header")
|
||||
sstruct.unpack(TRACK_DATA_FORMAT, trackDataHeader, self)
|
||||
offset += TRACK_DATA_FORMAT_SIZE
|
||||
|
||||
nSizes = self.nSizes
|
||||
sizeTableOffset = self.sizeTableOffset
|
||||
sizeTable = []
|
||||
for i in range(nSizes):
|
||||
sizeValueData = data[
|
||||
sizeTableOffset : sizeTableOffset + SIZE_VALUE_FORMAT_SIZE
|
||||
]
|
||||
if len(sizeValueData) < SIZE_VALUE_FORMAT_SIZE:
|
||||
raise TTLibError("not enough data to decompile TrackData size subtable")
|
||||
(sizeValue,) = struct.unpack(SIZE_VALUE_FORMAT, sizeValueData)
|
||||
sizeTable.append(fi2fl(sizeValue, 16))
|
||||
sizeTableOffset += SIZE_VALUE_FORMAT_SIZE
|
||||
|
||||
for i in range(self.nTracks):
|
||||
entry = TrackTableEntry()
|
||||
entryData = data[offset : offset + TRACK_TABLE_ENTRY_FORMAT_SIZE]
|
||||
if len(entryData) < TRACK_TABLE_ENTRY_FORMAT_SIZE:
|
||||
raise TTLibError("not enough data to decompile TrackTableEntry record")
|
||||
sstruct.unpack(TRACK_TABLE_ENTRY_FORMAT, entryData, entry)
|
||||
perSizeOffset = entry.offset
|
||||
for j in range(nSizes):
|
||||
size = sizeTable[j]
|
||||
perSizeValueData = data[
|
||||
perSizeOffset : perSizeOffset + PER_SIZE_VALUE_FORMAT_SIZE
|
||||
]
|
||||
if len(perSizeValueData) < PER_SIZE_VALUE_FORMAT_SIZE:
|
||||
raise TTLibError(
|
||||
"not enough data to decompile per-size track values"
|
||||
)
|
||||
(perSizeValue,) = struct.unpack(PER_SIZE_VALUE_FORMAT, perSizeValueData)
|
||||
entry[size] = perSizeValue
|
||||
perSizeOffset += PER_SIZE_VALUE_FORMAT_SIZE
|
||||
self[entry.track] = entry
|
||||
offset += TRACK_TABLE_ENTRY_FORMAT_SIZE
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
nTracks = len(self)
|
||||
nSizes = len(self.sizes())
|
||||
writer.comment("nTracks=%d, nSizes=%d" % (nTracks, nSizes))
|
||||
writer.newline()
|
||||
for track, entry in sorted(self.items()):
|
||||
assert entry.nameIndex is not None
|
||||
entry.track = track
|
||||
entry.toXML(writer, ttFont)
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name != "trackEntry":
|
||||
return
|
||||
entry = TrackTableEntry()
|
||||
entry.fromXML(name, attrs, content, ttFont)
|
||||
self[entry.track] = entry
|
||||
|
||||
def sizes(self):
|
||||
if not self:
|
||||
return frozenset()
|
||||
tracks = list(self.tracks())
|
||||
sizes = self[tracks.pop(0)].sizes()
|
||||
for track in tracks:
|
||||
entrySizes = self[track].sizes()
|
||||
if sizes != entrySizes:
|
||||
raise TTLibError(
|
||||
"'trak' table entries must specify the same sizes: "
|
||||
"%s != %s" % (sorted(sizes), sorted(entrySizes))
|
||||
)
|
||||
return frozenset(sizes)
|
||||
|
||||
def __getitem__(self, track):
|
||||
return self._map[track]
|
||||
|
||||
def __delitem__(self, track):
|
||||
del self._map[track]
|
||||
|
||||
def __setitem__(self, track, entry):
|
||||
self._map[track] = entry
|
||||
|
||||
def __len__(self):
|
||||
return len(self._map)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._map)
|
||||
|
||||
def keys(self):
|
||||
return self._map.keys()
|
||||
|
||||
tracks = keys
|
||||
|
||||
def __repr__(self):
|
||||
return "TrackData({})".format(self._map if self else "")
|
||||
|
||||
|
||||
class TrackTableEntry(MutableMapping):
|
||||
def __init__(self, values={}, nameIndex=None):
|
||||
self.nameIndex = nameIndex
|
||||
self._map = dict(values)
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
name = ttFont["name"].getDebugName(self.nameIndex)
|
||||
writer.begintag(
|
||||
"trackEntry",
|
||||
(("value", fl2str(self.track, 16)), ("nameIndex", self.nameIndex)),
|
||||
)
|
||||
writer.newline()
|
||||
if name:
|
||||
writer.comment(name)
|
||||
writer.newline()
|
||||
for size, perSizeValue in sorted(self.items()):
|
||||
writer.simpletag("track", size=fl2str(size, 16), value=perSizeValue)
|
||||
writer.newline()
|
||||
writer.endtag("trackEntry")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
self.track = str2fl(attrs["value"], 16)
|
||||
self.nameIndex = safeEval(attrs["nameIndex"])
|
||||
for element in content:
|
||||
if not isinstance(element, tuple):
|
||||
continue
|
||||
name, attrs, _ = element
|
||||
if name != "track":
|
||||
continue
|
||||
size = str2fl(attrs["size"], 16)
|
||||
self[size] = safeEval(attrs["value"])
|
||||
|
||||
def __getitem__(self, size):
|
||||
return self._map[size]
|
||||
|
||||
def __delitem__(self, size):
|
||||
del self._map[size]
|
||||
|
||||
def __setitem__(self, size, value):
|
||||
self._map[size] = value
|
||||
|
||||
def __len__(self):
|
||||
return len(self._map)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._map)
|
||||
|
||||
def keys(self):
|
||||
return self._map.keys()
|
||||
|
||||
sizes = keys
|
||||
|
||||
def __repr__(self):
|
||||
return "TrackTableEntry({}, nameIndex={})".format(self._map, self.nameIndex)
|
||||
|
||||
def __eq__(self, other):
|
||||
if not isinstance(other, self.__class__):
|
||||
return NotImplemented
|
||||
return self.nameIndex == other.nameIndex and dict(self) == dict(other)
|
||||
|
||||
def __ne__(self, other):
|
||||
result = self.__eq__(other)
|
||||
return result if result is NotImplemented else not result
|
||||
@ -0,0 +1,127 @@
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from fontTools.misc.fixedTools import (
|
||||
ensureVersionIsLong as fi2ve,
|
||||
versionToFixed as ve2fi,
|
||||
)
|
||||
from . import DefaultTable
|
||||
import math
|
||||
|
||||
|
||||
vheaFormat = """
|
||||
> # big endian
|
||||
tableVersion: L
|
||||
ascent: h
|
||||
descent: h
|
||||
lineGap: h
|
||||
advanceHeightMax: H
|
||||
minTopSideBearing: h
|
||||
minBottomSideBearing: h
|
||||
yMaxExtent: h
|
||||
caretSlopeRise: h
|
||||
caretSlopeRun: h
|
||||
caretOffset: h
|
||||
reserved1: h
|
||||
reserved2: h
|
||||
reserved3: h
|
||||
reserved4: h
|
||||
metricDataFormat: h
|
||||
numberOfVMetrics: H
|
||||
"""
|
||||
|
||||
|
||||
class table__v_h_e_a(DefaultTable.DefaultTable):
|
||||
# Note: Keep in sync with table__h_h_e_a
|
||||
|
||||
dependencies = ["vmtx", "glyf", "CFF ", "CFF2"]
|
||||
|
||||
def decompile(self, data, ttFont):
|
||||
sstruct.unpack(vheaFormat, data, self)
|
||||
|
||||
def compile(self, ttFont):
|
||||
if ttFont.recalcBBoxes and (
|
||||
ttFont.isLoaded("glyf")
|
||||
or ttFont.isLoaded("CFF ")
|
||||
or ttFont.isLoaded("CFF2")
|
||||
):
|
||||
self.recalc(ttFont)
|
||||
self.tableVersion = fi2ve(self.tableVersion)
|
||||
return sstruct.pack(vheaFormat, self)
|
||||
|
||||
def recalc(self, ttFont):
|
||||
if "vmtx" not in ttFont:
|
||||
return
|
||||
|
||||
vmtxTable = ttFont["vmtx"]
|
||||
self.advanceHeightMax = max(adv for adv, _ in vmtxTable.metrics.values())
|
||||
|
||||
boundsHeightDict = {}
|
||||
if "glyf" in ttFont:
|
||||
glyfTable = ttFont["glyf"]
|
||||
for name in ttFont.getGlyphOrder():
|
||||
g = glyfTable[name]
|
||||
if g.numberOfContours == 0:
|
||||
continue
|
||||
if g.numberOfContours < 0 and not hasattr(g, "yMax"):
|
||||
# Composite glyph without extents set.
|
||||
# Calculate those.
|
||||
g.recalcBounds(glyfTable)
|
||||
boundsHeightDict[name] = g.yMax - g.yMin
|
||||
elif "CFF " in ttFont or "CFF2" in ttFont:
|
||||
if "CFF " in ttFont:
|
||||
topDict = ttFont["CFF "].cff.topDictIndex[0]
|
||||
else:
|
||||
topDict = ttFont["CFF2"].cff.topDictIndex[0]
|
||||
charStrings = topDict.CharStrings
|
||||
for name in ttFont.getGlyphOrder():
|
||||
cs = charStrings[name]
|
||||
bounds = cs.calcBounds(charStrings)
|
||||
if bounds is not None:
|
||||
boundsHeightDict[name] = int(
|
||||
math.ceil(bounds[3]) - math.floor(bounds[1])
|
||||
)
|
||||
|
||||
if boundsHeightDict:
|
||||
minTopSideBearing = float("inf")
|
||||
minBottomSideBearing = float("inf")
|
||||
yMaxExtent = -float("inf")
|
||||
for name, boundsHeight in boundsHeightDict.items():
|
||||
advanceHeight, tsb = vmtxTable[name]
|
||||
bsb = advanceHeight - tsb - boundsHeight
|
||||
extent = tsb + boundsHeight
|
||||
minTopSideBearing = min(minTopSideBearing, tsb)
|
||||
minBottomSideBearing = min(minBottomSideBearing, bsb)
|
||||
yMaxExtent = max(yMaxExtent, extent)
|
||||
self.minTopSideBearing = minTopSideBearing
|
||||
self.minBottomSideBearing = minBottomSideBearing
|
||||
self.yMaxExtent = yMaxExtent
|
||||
|
||||
else: # No glyph has outlines.
|
||||
self.minTopSideBearing = 0
|
||||
self.minBottomSideBearing = 0
|
||||
self.yMaxExtent = 0
|
||||
|
||||
def toXML(self, writer, ttFont):
|
||||
formatstring, names, fixes = sstruct.getformat(vheaFormat)
|
||||
for name in names:
|
||||
value = getattr(self, name)
|
||||
if name == "tableVersion":
|
||||
value = fi2ve(value)
|
||||
value = "0x%08x" % value
|
||||
writer.simpletag(name, value=value)
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
if name == "tableVersion":
|
||||
setattr(self, name, ve2fi(attrs["value"]))
|
||||
return
|
||||
setattr(self, name, safeEval(attrs["value"]))
|
||||
|
||||
# reserved0 is caretOffset for legacy reasons
|
||||
@property
|
||||
def reserved0(self):
|
||||
return self.caretOffset
|
||||
|
||||
@reserved0.setter
|
||||
def reserved0(self, value):
|
||||
self.caretOffset = value
|
||||
@ -0,0 +1,10 @@
|
||||
from fontTools import ttLib
|
||||
|
||||
superclass = ttLib.getTableClass("hmtx")
|
||||
|
||||
|
||||
class table__v_m_t_x(superclass):
|
||||
headerTag = "vhea"
|
||||
advanceName = "height"
|
||||
sideBearingName = "tsb"
|
||||
numberOfMetricsName = "numberOfVMetrics"
|
||||
@ -0,0 +1,20 @@
|
||||
from fontTools.misc.textTools import strjoin, tobytes, tostr
|
||||
from . import DefaultTable
|
||||
|
||||
|
||||
class asciiTable(DefaultTable.DefaultTable):
|
||||
def toXML(self, writer, ttFont):
|
||||
data = tostr(self.data)
|
||||
# removing null bytes. XXX needed??
|
||||
data = data.split("\0")
|
||||
data = strjoin(data)
|
||||
writer.begintag("source")
|
||||
writer.newline()
|
||||
writer.write_noindent(data)
|
||||
writer.newline()
|
||||
writer.endtag("source")
|
||||
writer.newline()
|
||||
|
||||
def fromXML(self, name, attrs, content, ttFont):
|
||||
lines = strjoin(content).split("\n")
|
||||
self.data = tobytes("\n".join(lines[1:-1]))
|
||||
@ -0,0 +1,92 @@
|
||||
import struct, warnings
|
||||
|
||||
try:
|
||||
import lz4
|
||||
except ImportError:
|
||||
lz4 = None
|
||||
else:
|
||||
import lz4.block
|
||||
|
||||
# old scheme for VERSION < 0.9 otherwise use lz4.block
|
||||
|
||||
|
||||
def decompress(data):
|
||||
(compression,) = struct.unpack(">L", data[4:8])
|
||||
scheme = compression >> 27
|
||||
size = compression & 0x07FFFFFF
|
||||
if scheme == 0:
|
||||
pass
|
||||
elif scheme == 1 and lz4:
|
||||
res = lz4.block.decompress(struct.pack("<L", size) + data[8:])
|
||||
if len(res) != size:
|
||||
warnings.warn("Table decompression failed.")
|
||||
else:
|
||||
data = res
|
||||
else:
|
||||
warnings.warn("Table is compressed with an unsupported compression scheme")
|
||||
return (data, scheme)
|
||||
|
||||
|
||||
def compress(scheme, data):
|
||||
hdr = data[:4] + struct.pack(">L", (scheme << 27) + (len(data) & 0x07FFFFFF))
|
||||
if scheme == 0:
|
||||
return data
|
||||
elif scheme == 1 and lz4:
|
||||
res = lz4.block.compress(
|
||||
data, mode="high_compression", compression=16, store_size=False
|
||||
)
|
||||
return hdr + res
|
||||
else:
|
||||
warnings.warn("Table failed to compress by unsupported compression scheme")
|
||||
return data
|
||||
|
||||
|
||||
def _entries(attrs, sameval):
|
||||
ak = 0
|
||||
vals = []
|
||||
lastv = 0
|
||||
for k, v in attrs:
|
||||
if len(vals) and (k != ak + 1 or (sameval and v != lastv)):
|
||||
yield (ak - len(vals) + 1, len(vals), vals)
|
||||
vals = []
|
||||
ak = k
|
||||
vals.append(v)
|
||||
lastv = v
|
||||
yield (ak - len(vals) + 1, len(vals), vals)
|
||||
|
||||
|
||||
def entries(attributes, sameval=False):
|
||||
g = _entries(sorted(attributes.items(), key=lambda x: int(x[0])), sameval)
|
||||
return g
|
||||
|
||||
|
||||
def bininfo(num, size=1):
|
||||
if num == 0:
|
||||
return struct.pack(">4H", 0, 0, 0, 0)
|
||||
srange = 1
|
||||
select = 0
|
||||
while srange <= num:
|
||||
srange *= 2
|
||||
select += 1
|
||||
select -= 1
|
||||
srange //= 2
|
||||
srange *= size
|
||||
shift = num * size - srange
|
||||
return struct.pack(">4H", num, srange, select, shift)
|
||||
|
||||
|
||||
def num2tag(n):
|
||||
if n < 0x200000:
|
||||
return str(n)
|
||||
else:
|
||||
return (
|
||||
struct.unpack("4s", struct.pack(">L", n))[0].replace(b"\000", b"").decode()
|
||||
)
|
||||
|
||||
|
||||
def tag2num(n):
|
||||
try:
|
||||
return int(n)
|
||||
except ValueError:
|
||||
n = (n + " ")[:4]
|
||||
return struct.unpack(">L", n.encode("ascii"))[0]
|
||||
1465
venv/lib/python3.12/site-packages/fontTools/ttLib/tables/otBase.py
Normal file
1465
venv/lib/python3.12/site-packages/fontTools/ttLib/tables/otBase.py
Normal file
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user