This commit is contained in:
2024-11-29 18:15:30 +00:00
parent 40aade2d8e
commit bc9415586e
5298 changed files with 1938676 additions and 80 deletions

View File

@ -0,0 +1,203 @@
"""CFF2 to CFF converter."""
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.cliTools import makeOutputFileName
from fontTools.cffLib import (
TopDictIndex,
buildOrder,
buildDefaults,
topDictOperators,
privateDictOperators,
)
from .width import optimizeWidths
from collections import defaultdict
import logging
__all__ = ["convertCFF2ToCFF", "main"]
log = logging.getLogger("fontTools.cffLib")
def _convertCFF2ToCFF(cff, otFont):
"""Converts this object from CFF2 format to CFF format. This conversion
is done 'in-place'. The conversion cannot be reversed.
The CFF2 font cannot be variable. (TODO Accept those and convert to the
default instance?)
This assumes a decompiled CFF table. (i.e. that the object has been
filled via :meth:`decompile` and e.g. not loaded from XML.)"""
cff.major = 1
topDictData = TopDictIndex(None)
for item in cff.topDictIndex:
# Iterate over, such that all are decompiled
item.cff2GetGlyphOrder = None
topDictData.append(item)
cff.topDictIndex = topDictData
topDict = topDictData[0]
if hasattr(topDict, "VarStore"):
raise ValueError("Variable CFF2 font cannot be converted to CFF format.")
opOrder = buildOrder(topDictOperators)
topDict.order = opOrder
for key in topDict.rawDict.keys():
if key not in opOrder:
del topDict.rawDict[key]
if hasattr(topDict, key):
delattr(topDict, key)
fdArray = topDict.FDArray
charStrings = topDict.CharStrings
defaults = buildDefaults(privateDictOperators)
order = buildOrder(privateDictOperators)
for fd in fdArray:
fd.setCFF2(False)
privateDict = fd.Private
privateDict.order = order
for key in order:
if key not in privateDict.rawDict and key in defaults:
privateDict.rawDict[key] = defaults[key]
for key in privateDict.rawDict.keys():
if key not in order:
del privateDict.rawDict[key]
if hasattr(privateDict, key):
delattr(privateDict, key)
for cs in charStrings.values():
cs.decompile()
cs.program.append("endchar")
for subrSets in [cff.GlobalSubrs] + [
getattr(fd.Private, "Subrs", []) for fd in fdArray
]:
for cs in subrSets:
cs.program.append("return")
# Add (optimal) width to CharStrings that need it.
widths = defaultdict(list)
metrics = otFont["hmtx"].metrics
for glyphName in charStrings.keys():
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
if fdIndex == None:
fdIndex = 0
widths[fdIndex].append(metrics[glyphName][0])
for fdIndex, widthList in widths.items():
bestDefault, bestNominal = optimizeWidths(widthList)
private = fdArray[fdIndex].Private
private.defaultWidthX = bestDefault
private.nominalWidthX = bestNominal
for glyphName in charStrings.keys():
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
if fdIndex == None:
fdIndex = 0
private = fdArray[fdIndex].Private
width = metrics[glyphName][0]
if width != private.defaultWidthX:
cs.program.insert(0, width - private.nominalWidthX)
mapping = {
name: ("cid" + str(n) if n else ".notdef")
for n, name in enumerate(topDict.charset)
}
topDict.charset = [
"cid" + str(n) if n else ".notdef" for n in range(len(topDict.charset))
]
charStrings.charStrings = {
mapping[name]: v for name, v in charStrings.charStrings.items()
}
# I'm not sure why the following is *not* necessary. And it breaks
# the output if I add it.
# topDict.ROS = ("Adobe", "Identity", 0)
def convertCFF2ToCFF(font, *, updatePostTable=True):
cff = font["CFF2"].cff
_convertCFF2ToCFF(cff, font)
del font["CFF2"]
table = font["CFF "] = newTable("CFF ")
table.cff = cff
if updatePostTable and "post" in font:
# Only version supported for fonts with CFF table is 0x00030000 not 0x20000
post = font["post"]
if post.formatType == 2.0:
post.formatType = 3.0
def main(args=None):
"""Convert CFF OTF font to CFF2 OTF font"""
if args is None:
import sys
args = sys.argv[1:]
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.CFFToCFF2",
description="Upgrade a CFF font to CFF2.",
)
parser.add_argument(
"input", metavar="INPUT.ttf", help="Input OTF file with CFF table."
)
parser.add_argument(
"-o",
"--output",
metavar="OUTPUT.ttf",
default=None,
help="Output instance OTF file (default: INPUT-CFF2.ttf).",
)
parser.add_argument(
"--no-recalc-timestamp",
dest="recalc_timestamp",
action="store_false",
help="Don't set the output font's timestamp to the current time.",
)
loggingGroup = parser.add_mutually_exclusive_group(required=False)
loggingGroup.add_argument(
"-v", "--verbose", action="store_true", help="Run more verbosely."
)
loggingGroup.add_argument(
"-q", "--quiet", action="store_true", help="Turn verbosity off."
)
options = parser.parse_args(args)
from fontTools import configLogger
configLogger(
level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
)
import os
infile = options.input
if not os.path.isfile(infile):
parser.error("No such file '{}'".format(infile))
outfile = (
makeOutputFileName(infile, overWrite=True, suffix="-CFF")
if not options.output
else options.output
)
font = TTFont(infile, recalcTimestamp=options.recalc_timestamp, recalcBBoxes=False)
convertCFF2ToCFF(font)
log.info(
"Saving %s",
outfile,
)
font.save(outfile)
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv[1:]))

View File

@ -0,0 +1,305 @@
"""CFF to CFF2 converter."""
from fontTools.ttLib import TTFont, newTable
from fontTools.misc.cliTools import makeOutputFileName
from fontTools.misc.psCharStrings import T2WidthExtractor
from fontTools.cffLib import (
TopDictIndex,
FDArrayIndex,
FontDict,
buildOrder,
topDictOperators,
privateDictOperators,
topDictOperators2,
privateDictOperators2,
)
from io import BytesIO
import logging
__all__ = ["convertCFFToCFF2", "main"]
log = logging.getLogger("fontTools.cffLib")
class _NominalWidthUsedError(Exception):
def __add__(self, other):
raise self
def __radd__(self, other):
raise self
def _convertCFFToCFF2(cff, otFont):
"""Converts this object from CFF format to CFF2 format. This conversion
is done 'in-place'. The conversion cannot be reversed.
This assumes a decompiled CFF table. (i.e. that the object has been
filled via :meth:`decompile` and e.g. not loaded from XML.)"""
# Clean up T2CharStrings
topDict = cff.topDictIndex[0]
fdArray = topDict.FDArray if hasattr(topDict, "FDArray") else None
charStrings = topDict.CharStrings
globalSubrs = cff.GlobalSubrs
localSubrs = (
[getattr(fd.Private, "Subrs", []) for fd in fdArray]
if fdArray
else (
[topDict.Private.Subrs]
if hasattr(topDict, "Private") and hasattr(topDict.Private, "Subrs")
else []
)
)
for glyphName in charStrings.keys():
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
cs.decompile()
# Clean up subroutines first
for subrs in [globalSubrs] + localSubrs:
for subr in subrs:
program = subr.program
i = j = len(program)
try:
i = program.index("return")
except ValueError:
pass
try:
j = program.index("endchar")
except ValueError:
pass
program[min(i, j) :] = []
# Clean up glyph charstrings
removeUnusedSubrs = False
nominalWidthXError = _NominalWidthUsedError()
for glyphName in charStrings.keys():
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
program = cs.program
thisLocalSubrs = (
localSubrs[fdIndex]
if fdIndex is not None
else (
getattr(topDict.Private, "Subrs", [])
if hasattr(topDict, "Private")
else []
)
)
# Intentionally use custom type for nominalWidthX, such that any
# CharString that has an explicit width encoded will throw back to us.
extractor = T2WidthExtractor(
thisLocalSubrs,
globalSubrs,
nominalWidthXError,
0,
)
try:
extractor.execute(cs)
except _NominalWidthUsedError:
# Program has explicit width. We want to drop it, but can't
# just pop the first number since it may be a subroutine call.
# Instead, when seeing that, we embed the subroutine and recurse.
# If this ever happened, we later prune unused subroutines.
while len(program) >= 2 and program[1] in ["callsubr", "callgsubr"]:
removeUnusedSubrs = True
subrNumber = program.pop(0)
assert isinstance(subrNumber, int), subrNumber
op = program.pop(0)
bias = extractor.localBias if op == "callsubr" else extractor.globalBias
subrNumber += bias
subrSet = thisLocalSubrs if op == "callsubr" else globalSubrs
subrProgram = subrSet[subrNumber].program
program[:0] = subrProgram
# Now pop the actual width
assert len(program) >= 1, program
program.pop(0)
if program and program[-1] == "endchar":
program.pop()
if removeUnusedSubrs:
cff.remove_unused_subroutines()
# Upconvert TopDict
cff.major = 2
cff2GetGlyphOrder = cff.otFont.getGlyphOrder
topDictData = TopDictIndex(None, cff2GetGlyphOrder)
for item in cff.topDictIndex:
# Iterate over, such that all are decompiled
topDictData.append(item)
cff.topDictIndex = topDictData
topDict = topDictData[0]
if hasattr(topDict, "Private"):
privateDict = topDict.Private
else:
privateDict = None
opOrder = buildOrder(topDictOperators2)
topDict.order = opOrder
topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
if not hasattr(topDict, "FDArray"):
fdArray = topDict.FDArray = FDArrayIndex()
fdArray.strings = None
fdArray.GlobalSubrs = topDict.GlobalSubrs
topDict.GlobalSubrs.fdArray = fdArray
charStrings = topDict.CharStrings
if charStrings.charStringsAreIndexed:
charStrings.charStringsIndex.fdArray = fdArray
else:
charStrings.fdArray = fdArray
fontDict = FontDict()
fontDict.setCFF2(True)
fdArray.append(fontDict)
fontDict.Private = privateDict
privateOpOrder = buildOrder(privateDictOperators2)
if privateDict is not None:
for entry in privateDictOperators:
key = entry[1]
if key not in privateOpOrder:
if key in privateDict.rawDict:
# print "Removing private dict", key
del privateDict.rawDict[key]
if hasattr(privateDict, key):
delattr(privateDict, key)
# print "Removing privateDict attr", key
else:
# clean up the PrivateDicts in the fdArray
fdArray = topDict.FDArray
privateOpOrder = buildOrder(privateDictOperators2)
for fontDict in fdArray:
fontDict.setCFF2(True)
for key in list(fontDict.rawDict.keys()):
if key not in fontDict.order:
del fontDict.rawDict[key]
if hasattr(fontDict, key):
delattr(fontDict, key)
privateDict = fontDict.Private
for entry in privateDictOperators:
key = entry[1]
if key not in privateOpOrder:
if key in list(privateDict.rawDict.keys()):
# print "Removing private dict", key
del privateDict.rawDict[key]
if hasattr(privateDict, key):
delattr(privateDict, key)
# print "Removing privateDict attr", key
# Now delete up the deprecated topDict operators from CFF 1.0
for entry in topDictOperators:
key = entry[1]
# We seem to need to keep the charset operator for now,
# or we fail to compile with some fonts, like AdditionFont.otf.
# I don't know which kind of CFF font those are. But keeping
# charset seems to work. It will be removed when we save and
# read the font again.
#
# AdditionFont.otf has <Encoding name="StandardEncoding"/>.
if key == "charset":
continue
if key not in opOrder:
if key in topDict.rawDict:
del topDict.rawDict[key]
if hasattr(topDict, key):
delattr(topDict, key)
# TODO(behdad): What does the following comment even mean? Both CFF and CFF2
# use the same T2Charstring class. I *think* what it means is that the CharStrings
# were loaded for CFF1, and we need to reload them for CFF2 to set varstore, etc
# on them. At least that's what I understand. It's probably safe to remove this
# and just set vstore where needed.
#
# See comment above about charset as well.
# At this point, the Subrs and Charstrings are all still T2Charstring class
# easiest to fix this by compiling, then decompiling again
file = BytesIO()
cff.compile(file, otFont, isCFF2=True)
file.seek(0)
cff.decompile(file, otFont, isCFF2=True)
def convertCFFToCFF2(font):
cff = font["CFF "].cff
del font["CFF "]
_convertCFFToCFF2(cff, font)
table = font["CFF2"] = newTable("CFF2")
table.cff = cff
def main(args=None):
"""Convert CFF OTF font to CFF2 OTF font"""
if args is None:
import sys
args = sys.argv[1:]
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.CFFToCFF2",
description="Upgrade a CFF font to CFF2.",
)
parser.add_argument(
"input", metavar="INPUT.ttf", help="Input OTF file with CFF table."
)
parser.add_argument(
"-o",
"--output",
metavar="OUTPUT.ttf",
default=None,
help="Output instance OTF file (default: INPUT-CFF2.ttf).",
)
parser.add_argument(
"--no-recalc-timestamp",
dest="recalc_timestamp",
action="store_false",
help="Don't set the output font's timestamp to the current time.",
)
loggingGroup = parser.add_mutually_exclusive_group(required=False)
loggingGroup.add_argument(
"-v", "--verbose", action="store_true", help="Run more verbosely."
)
loggingGroup.add_argument(
"-q", "--quiet", action="store_true", help="Turn verbosity off."
)
options = parser.parse_args(args)
from fontTools import configLogger
configLogger(
level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
)
import os
infile = options.input
if not os.path.isfile(infile):
parser.error("No such file '{}'".format(infile))
outfile = (
makeOutputFileName(infile, overWrite=True, suffix="-CFF2")
if not options.output
else options.output
)
font = TTFont(infile, recalcTimestamp=options.recalc_timestamp, recalcBBoxes=False)
convertCFFToCFF2(font)
log.info(
"Saving %s",
outfile,
)
font.save(outfile)
if __name__ == "__main__":
import sys
sys.exit(main(sys.argv[1:]))

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,924 @@
# -*- coding: utf-8 -*-
"""T2CharString operator specializer and generalizer.
PostScript glyph drawing operations can be expressed in multiple different
ways. For example, as well as the ``lineto`` operator, there is also a
``hlineto`` operator which draws a horizontal line, removing the need to
specify a ``dx`` coordinate, and a ``vlineto`` operator which draws a
vertical line, removing the need to specify a ``dy`` coordinate. As well
as decompiling :class:`fontTools.misc.psCharStrings.T2CharString` objects
into lists of operations, this module allows for conversion between general
and specific forms of the operation.
"""
from fontTools.cffLib import maxStackLimit
def stringToProgram(string):
if isinstance(string, str):
string = string.split()
program = []
for token in string:
try:
token = int(token)
except ValueError:
try:
token = float(token)
except ValueError:
pass
program.append(token)
return program
def programToString(program):
return " ".join(str(x) for x in program)
def programToCommands(program, getNumRegions=None):
"""Takes a T2CharString program list and returns list of commands.
Each command is a two-tuple of commandname,arg-list. The commandname might
be empty string if no commandname shall be emitted (used for glyph width,
hintmask/cntrmask argument, as well as stray arguments at the end of the
program (🤷).
'getNumRegions' may be None, or a callable object. It must return the
number of regions. 'getNumRegions' takes a single argument, vsindex. It
returns the numRegions for the vsindex.
The Charstring may or may not start with a width value. If the first
non-blend operator has an odd number of arguments, then the first argument is
a width, and is popped off. This is complicated with blend operators, as
there may be more than one before the first hint or moveto operator, and each
one reduces several arguments to just one list argument. We have to sum the
number of arguments that are not part of the blend arguments, and all the
'numBlends' values. We could instead have said that by definition, if there
is a blend operator, there is no width value, since CFF2 Charstrings don't
have width values. I discussed this with Behdad, and we are allowing for an
initial width value in this case because developers may assemble a CFF2
charstring from CFF Charstrings, which could have width values.
"""
seenWidthOp = False
vsIndex = 0
lenBlendStack = 0
lastBlendIndex = 0
commands = []
stack = []
it = iter(program)
for token in it:
if not isinstance(token, str):
stack.append(token)
continue
if token == "blend":
assert getNumRegions is not None
numSourceFonts = 1 + getNumRegions(vsIndex)
# replace the blend op args on the stack with a single list
# containing all the blend op args.
numBlends = stack[-1]
numBlendArgs = numBlends * numSourceFonts + 1
# replace first blend op by a list of the blend ops.
stack[-numBlendArgs:] = [stack[-numBlendArgs:]]
lenStack = len(stack)
lenBlendStack += numBlends + lenStack - 1
lastBlendIndex = lenStack
# if a blend op exists, this is or will be a CFF2 charstring.
continue
elif token == "vsindex":
vsIndex = stack[-1]
assert type(vsIndex) is int
elif (not seenWidthOp) and token in {
"hstem",
"hstemhm",
"vstem",
"vstemhm",
"cntrmask",
"hintmask",
"hmoveto",
"vmoveto",
"rmoveto",
"endchar",
}:
seenWidthOp = True
parity = token in {"hmoveto", "vmoveto"}
if lenBlendStack:
# lenBlendStack has the number of args represented by the last blend
# arg and all the preceding args. We need to now add the number of
# args following the last blend arg.
numArgs = lenBlendStack + len(stack[lastBlendIndex:])
else:
numArgs = len(stack)
if numArgs and (numArgs % 2) ^ parity:
width = stack.pop(0)
commands.append(("", [width]))
if token in {"hintmask", "cntrmask"}:
if stack:
commands.append(("", stack))
commands.append((token, []))
commands.append(("", [next(it)]))
else:
commands.append((token, stack))
stack = []
if stack:
commands.append(("", stack))
return commands
def _flattenBlendArgs(args):
token_list = []
for arg in args:
if isinstance(arg, list):
token_list.extend(arg)
token_list.append("blend")
else:
token_list.append(arg)
return token_list
def commandsToProgram(commands):
"""Takes a commands list as returned by programToCommands() and converts
it back to a T2CharString program list."""
program = []
for op, args in commands:
if any(isinstance(arg, list) for arg in args):
args = _flattenBlendArgs(args)
program.extend(args)
if op:
program.append(op)
return program
def _everyN(el, n):
"""Group the list el into groups of size n"""
l = len(el)
if l % n != 0:
raise ValueError(el)
for i in range(0, l, n):
yield el[i : i + n]
class _GeneralizerDecombinerCommandsMap(object):
@staticmethod
def rmoveto(args):
if len(args) != 2:
raise ValueError(args)
yield ("rmoveto", args)
@staticmethod
def hmoveto(args):
if len(args) != 1:
raise ValueError(args)
yield ("rmoveto", [args[0], 0])
@staticmethod
def vmoveto(args):
if len(args) != 1:
raise ValueError(args)
yield ("rmoveto", [0, args[0]])
@staticmethod
def rlineto(args):
if not args:
raise ValueError(args)
for args in _everyN(args, 2):
yield ("rlineto", args)
@staticmethod
def hlineto(args):
if not args:
raise ValueError(args)
it = iter(args)
try:
while True:
yield ("rlineto", [next(it), 0])
yield ("rlineto", [0, next(it)])
except StopIteration:
pass
@staticmethod
def vlineto(args):
if not args:
raise ValueError(args)
it = iter(args)
try:
while True:
yield ("rlineto", [0, next(it)])
yield ("rlineto", [next(it), 0])
except StopIteration:
pass
@staticmethod
def rrcurveto(args):
if not args:
raise ValueError(args)
for args in _everyN(args, 6):
yield ("rrcurveto", args)
@staticmethod
def hhcurveto(args):
l = len(args)
if l < 4 or l % 4 > 1:
raise ValueError(args)
if l % 2 == 1:
yield ("rrcurveto", [args[1], args[0], args[2], args[3], args[4], 0])
args = args[5:]
for args in _everyN(args, 4):
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[3], 0])
@staticmethod
def vvcurveto(args):
l = len(args)
if l < 4 or l % 4 > 1:
raise ValueError(args)
if l % 2 == 1:
yield ("rrcurveto", [args[0], args[1], args[2], args[3], 0, args[4]])
args = args[5:]
for args in _everyN(args, 4):
yield ("rrcurveto", [0, args[0], args[1], args[2], 0, args[3]])
@staticmethod
def hvcurveto(args):
l = len(args)
if l < 4 or l % 8 not in {0, 1, 4, 5}:
raise ValueError(args)
last_args = None
if l % 2 == 1:
lastStraight = l % 8 == 5
args, last_args = args[:-5], args[-5:]
it = _everyN(args, 4)
try:
while True:
args = next(it)
yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
args = next(it)
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
except StopIteration:
pass
if last_args:
args = last_args
if lastStraight:
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
else:
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
@staticmethod
def vhcurveto(args):
l = len(args)
if l < 4 or l % 8 not in {0, 1, 4, 5}:
raise ValueError(args)
last_args = None
if l % 2 == 1:
lastStraight = l % 8 == 5
args, last_args = args[:-5], args[-5:]
it = _everyN(args, 4)
try:
while True:
args = next(it)
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
args = next(it)
yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
except StopIteration:
pass
if last_args:
args = last_args
if lastStraight:
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
else:
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
@staticmethod
def rcurveline(args):
l = len(args)
if l < 8 or l % 6 != 2:
raise ValueError(args)
args, last_args = args[:-2], args[-2:]
for args in _everyN(args, 6):
yield ("rrcurveto", args)
yield ("rlineto", last_args)
@staticmethod
def rlinecurve(args):
l = len(args)
if l < 8 or l % 2 != 0:
raise ValueError(args)
args, last_args = args[:-6], args[-6:]
for args in _everyN(args, 2):
yield ("rlineto", args)
yield ("rrcurveto", last_args)
def _convertBlendOpToArgs(blendList):
# args is list of blend op args. Since we are supporting
# recursive blend op calls, some of these args may also
# be a list of blend op args, and need to be converted before
# we convert the current list.
if any([isinstance(arg, list) for arg in blendList]):
args = [
i
for e in blendList
for i in (_convertBlendOpToArgs(e) if isinstance(e, list) else [e])
]
else:
args = blendList
# We now know that blendList contains a blend op argument list, even if
# some of the args are lists that each contain a blend op argument list.
# Convert from:
# [default font arg sequence x0,...,xn] + [delta tuple for x0] + ... + [delta tuple for xn]
# to:
# [ [x0] + [delta tuple for x0],
# ...,
# [xn] + [delta tuple for xn] ]
numBlends = args[-1]
# Can't use args.pop() when the args are being used in a nested list
# comprehension. See calling context
args = args[:-1]
l = len(args)
numRegions = l // numBlends - 1
if not (numBlends * (numRegions + 1) == l):
raise ValueError(blendList)
defaultArgs = [[arg] for arg in args[:numBlends]]
deltaArgs = args[numBlends:]
numDeltaValues = len(deltaArgs)
deltaList = [
deltaArgs[i : i + numRegions] for i in range(0, numDeltaValues, numRegions)
]
blend_args = [a + b + [1] for a, b in zip(defaultArgs, deltaList)]
return blend_args
def generalizeCommands(commands, ignoreErrors=False):
result = []
mapping = _GeneralizerDecombinerCommandsMap
for op, args in commands:
# First, generalize any blend args in the arg list.
if any([isinstance(arg, list) for arg in args]):
try:
args = [
n
for arg in args
for n in (
_convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg]
)
]
except ValueError:
if ignoreErrors:
# Store op as data, such that consumers of commands do not have to
# deal with incorrect number of arguments.
result.append(("", args))
result.append(("", [op]))
else:
raise
func = getattr(mapping, op, None)
if func is None:
result.append((op, args))
continue
try:
for command in func(args):
result.append(command)
except ValueError:
if ignoreErrors:
# Store op as data, such that consumers of commands do not have to
# deal with incorrect number of arguments.
result.append(("", args))
result.append(("", [op]))
else:
raise
return result
def generalizeProgram(program, getNumRegions=None, **kwargs):
return commandsToProgram(
generalizeCommands(programToCommands(program, getNumRegions), **kwargs)
)
def _categorizeVector(v):
"""
Takes X,Y vector v and returns one of r, h, v, or 0 depending on which
of X and/or Y are zero, plus tuple of nonzero ones. If both are zero,
it returns a single zero still.
>>> _categorizeVector((0,0))
('0', (0,))
>>> _categorizeVector((1,0))
('h', (1,))
>>> _categorizeVector((0,2))
('v', (2,))
>>> _categorizeVector((1,2))
('r', (1, 2))
"""
if not v[0]:
if not v[1]:
return "0", v[:1]
else:
return "v", v[1:]
else:
if not v[1]:
return "h", v[:1]
else:
return "r", v
def _mergeCategories(a, b):
if a == "0":
return b
if b == "0":
return a
if a == b:
return a
return None
def _negateCategory(a):
if a == "h":
return "v"
if a == "v":
return "h"
assert a in "0r"
return a
def _convertToBlendCmds(args):
# return a list of blend commands, and
# the remaining non-blended args, if any.
num_args = len(args)
stack_use = 0
new_args = []
i = 0
while i < num_args:
arg = args[i]
i += 1
if not isinstance(arg, list):
new_args.append(arg)
stack_use += 1
else:
prev_stack_use = stack_use
# The arg is a tuple of blend values.
# These are each (master 0,delta 1..delta n, 1)
# Combine as many successive tuples as we can,
# up to the max stack limit.
num_sources = len(arg) - 1
blendlist = [arg]
stack_use += 1 + num_sources # 1 for the num_blends arg
# if we are here, max stack is the CFF2 max stack.
# I use the CFF2 max stack limit here rather than
# the 'maxstack' chosen by the client, as the default
# maxstack may have been used unintentionally. For all
# the other operators, this just produces a little less
# optimization, but here it puts a hard (and low) limit
# on the number of source fonts that can be used.
#
# Make sure the stack depth does not exceed (maxstack - 1), so
# that subroutinizer can insert subroutine calls at any point.
while (
(i < num_args)
and isinstance(args[i], list)
and stack_use + num_sources < maxStackLimit
):
blendlist.append(args[i])
i += 1
stack_use += num_sources
# blendList now contains as many single blend tuples as can be
# combined without exceeding the CFF2 stack limit.
num_blends = len(blendlist)
# append the 'num_blends' default font values
blend_args = []
for arg in blendlist:
blend_args.append(arg[0])
for arg in blendlist:
assert arg[-1] == 1
blend_args.extend(arg[1:-1])
blend_args.append(num_blends)
new_args.append(blend_args)
stack_use = prev_stack_use + num_blends
return new_args
def _addArgs(a, b):
if isinstance(b, list):
if isinstance(a, list):
if len(a) != len(b) or a[-1] != b[-1]:
raise ValueError()
return [_addArgs(va, vb) for va, vb in zip(a[:-1], b[:-1])] + [a[-1]]
else:
a, b = b, a
if isinstance(a, list):
assert a[-1] == 1
return [_addArgs(a[0], b)] + a[1:]
return a + b
def _argsStackUse(args):
stackLen = 0
maxLen = 0
for arg in args:
if type(arg) is list:
# Blended arg
maxLen = max(maxLen, stackLen + _argsStackUse(arg))
stackLen += arg[-1]
else:
stackLen += 1
return max(stackLen, maxLen)
def specializeCommands(
commands,
ignoreErrors=False,
generalizeFirst=True,
preserveTopology=False,
maxstack=48,
):
# We perform several rounds of optimizations. They are carefully ordered and are:
#
# 0. Generalize commands.
# This ensures that they are in our expected simple form, with each line/curve only
# having arguments for one segment, and using the generic form (rlineto/rrcurveto).
# If caller is sure the input is in this form, they can turn off generalization to
# save time.
#
# 1. Combine successive rmoveto operations.
#
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
# We specialize into some, made-up, variants as well, which simplifies following
# passes.
#
# 3. Merge or delete redundant operations, to the extent requested.
# OpenType spec declares point numbers in CFF undefined. As such, we happily
# change topology. If client relies on point numbers (in GPOS anchors, or for
# hinting purposes(what?)) they can turn this off.
#
# 4. Peephole optimization to revert back some of the h/v variants back into their
# original "relative" operator (rline/rrcurveto) if that saves a byte.
#
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
#
# 6. Resolve any remaining made-up operators into real operators.
#
# I have convinced myself that this produces optimal bytecode (except for, possibly
# one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-)
# A dynamic-programming approach can do the same but would be significantly slower.
#
# 7. For any args which are blend lists, convert them to a blend command.
# 0. Generalize commands.
if generalizeFirst:
commands = generalizeCommands(commands, ignoreErrors=ignoreErrors)
else:
commands = list(commands) # Make copy since we modify in-place later.
# 1. Combine successive rmoveto operations.
for i in range(len(commands) - 1, 0, -1):
if "rmoveto" == commands[i][0] == commands[i - 1][0]:
v1, v2 = commands[i - 1][1], commands[i][1]
commands[i - 1] = ("rmoveto", [v1[0] + v2[0], v1[1] + v2[1]])
del commands[i]
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
#
# We, in fact, specialize into more, made-up, variants that special-case when both
# X and Y components are zero. This simplifies the following optimization passes.
# This case is rare, but OCD does not let me skip it.
#
# After this round, we will have four variants that use the following mnemonics:
#
# - 'r' for relative, ie. non-zero X and non-zero Y,
# - 'h' for horizontal, ie. zero X and non-zero Y,
# - 'v' for vertical, ie. non-zero X and zero Y,
# - '0' for zeros, ie. zero X and zero Y.
#
# The '0' pseudo-operators are not part of the spec, but help simplify the following
# optimization rounds. We resolve them at the end. So, after this, we will have four
# moveto and four lineto variants:
#
# - 0moveto, 0lineto
# - hmoveto, hlineto
# - vmoveto, vlineto
# - rmoveto, rlineto
#
# and sixteen curveto variants. For example, a '0hcurveto' operator means a curve
# dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3.
# An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3.
#
# There are nine different variants of curves without the '0'. Those nine map exactly
# to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto,
# vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of
# arguments and one without. Eg. an hhcurveto with an extra argument (odd number of
# arguments) is in fact an rhcurveto. The operators in the spec are designed such that
# all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve.
#
# Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest
# of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be
# thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always
# encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute
# the '0' with either 'h' or 'v' and it works.
#
# When we get to curve splines however, things become more complicated... XXX finish this.
# There's one more complexity with splines. If one side of the spline is not horizontal or
# vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode.
# Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and
# only hvcurveto and vhcurveto operators can encode a spline ending with 'r'.
# This limits our merge opportunities later.
#
for i in range(len(commands)):
op, args = commands[i]
if op in {"rmoveto", "rlineto"}:
c, args = _categorizeVector(args)
commands[i] = c + op[1:], args
continue
if op == "rrcurveto":
c1, args1 = _categorizeVector(args[:2])
c2, args2 = _categorizeVector(args[-2:])
commands[i] = c1 + c2 + "curveto", args1 + args[2:4] + args2
continue
# 3. Merge or delete redundant operations, to the extent requested.
#
# TODO
# A 0moveto that comes before all other path operations can be removed.
# though I find conflicting evidence for this.
#
# TODO
# "If hstem and vstem hints are both declared at the beginning of a
# CharString, and this sequence is followed directly by the hintmask or
# cntrmask operators, then the vstem hint operator (or, if applicable,
# the vstemhm operator) need not be included."
#
# "The sequence and form of a CFF2 CharString program may be represented as:
# {hs* vs* cm* hm* mt subpath}? {mt subpath}*"
#
# https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1
#
# For Type2 CharStrings the sequence is:
# w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar"
# Some other redundancies change topology (point numbers).
if not preserveTopology:
for i in range(len(commands) - 1, -1, -1):
op, args = commands[i]
# A 00curveto is demoted to a (specialized) lineto.
if op == "00curveto":
assert len(args) == 4
c, args = _categorizeVector(args[1:3])
op = c + "lineto"
commands[i] = op, args
# and then...
# A 0lineto can be deleted.
if op == "0lineto":
del commands[i]
continue
# Merge adjacent hlineto's and vlineto's.
# In CFF2 charstrings from variable fonts, each
# arg item may be a list of blendable values, one from
# each source font.
if i and op in {"hlineto", "vlineto"} and (op == commands[i - 1][0]):
_, other_args = commands[i - 1]
assert len(args) == 1 and len(other_args) == 1
try:
new_args = [_addArgs(args[0], other_args[0])]
except ValueError:
continue
commands[i - 1] = (op, new_args)
del commands[i]
continue
# 4. Peephole optimization to revert back some of the h/v variants back into their
# original "relative" operator (rline/rrcurveto) if that saves a byte.
for i in range(1, len(commands) - 1):
op, args = commands[i]
prv, nxt = commands[i - 1][0], commands[i + 1][0]
if op in {"0lineto", "hlineto", "vlineto"} and prv == nxt == "rlineto":
assert len(args) == 1
args = [0, args[0]] if op[0] == "v" else [args[0], 0]
commands[i] = ("rlineto", args)
continue
if op[2:] == "curveto" and len(args) == 5 and prv == nxt == "rrcurveto":
assert (op[0] == "r") ^ (op[1] == "r")
if op[0] == "v":
pos = 0
elif op[0] != "r":
pos = 1
elif op[1] == "v":
pos = 4
else:
pos = 5
# Insert, while maintaining the type of args (can be tuple or list).
args = args[:pos] + type(args)((0,)) + args[pos:]
commands[i] = ("rrcurveto", args)
continue
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
stackUse = _argsStackUse(commands[-1][1]) if commands else 0
for i in range(len(commands) - 1, 0, -1):
op1, args1 = commands[i - 1]
op2, args2 = commands[i]
new_op = None
# Merge logic...
if {op1, op2} <= {"rlineto", "rrcurveto"}:
if op1 == op2:
new_op = op1
else:
l = len(args2)
if op2 == "rrcurveto" and l == 6:
new_op = "rlinecurve"
elif l == 2:
new_op = "rcurveline"
elif (op1, op2) in {("rlineto", "rlinecurve"), ("rrcurveto", "rcurveline")}:
new_op = op2
elif {op1, op2} == {"vlineto", "hlineto"}:
new_op = op1
elif "curveto" == op1[2:] == op2[2:]:
d0, d1 = op1[:2]
d2, d3 = op2[:2]
if d1 == "r" or d2 == "r" or d0 == d3 == "r":
continue
d = _mergeCategories(d1, d2)
if d is None:
continue
if d0 == "r":
d = _mergeCategories(d, d3)
if d is None:
continue
new_op = "r" + d + "curveto"
elif d3 == "r":
d0 = _mergeCategories(d0, _negateCategory(d))
if d0 is None:
continue
new_op = d0 + "r" + "curveto"
else:
d0 = _mergeCategories(d0, d3)
if d0 is None:
continue
new_op = d0 + d + "curveto"
# Make sure the stack depth does not exceed (maxstack - 1), so
# that subroutinizer can insert subroutine calls at any point.
args1StackUse = _argsStackUse(args1)
combinedStackUse = max(args1StackUse, len(args1) + stackUse)
if new_op and combinedStackUse < maxstack:
commands[i - 1] = (new_op, args1 + args2)
del commands[i]
stackUse = combinedStackUse
else:
stackUse = args1StackUse
# 6. Resolve any remaining made-up operators into real operators.
for i in range(len(commands)):
op, args = commands[i]
if op in {"0moveto", "0lineto"}:
commands[i] = "h" + op[1:], args
continue
if op[2:] == "curveto" and op[:2] not in {"rr", "hh", "vv", "vh", "hv"}:
l = len(args)
op0, op1 = op[:2]
if (op0 == "r") ^ (op1 == "r"):
assert l % 2 == 1
if op0 == "0":
op0 = "h"
if op1 == "0":
op1 = "h"
if op0 == "r":
op0 = op1
if op1 == "r":
op1 = _negateCategory(op0)
assert {op0, op1} <= {"h", "v"}, (op0, op1)
if l % 2:
if op0 != op1: # vhcurveto / hvcurveto
if (op0 == "h") ^ (l % 8 == 1):
# Swap last two args order
args = args[:-2] + args[-1:] + args[-2:-1]
else: # hhcurveto / vvcurveto
if op0 == "h": # hhcurveto
# Swap first two args order
args = args[1:2] + args[:1] + args[2:]
commands[i] = op0 + op1 + "curveto", args
continue
# 7. For any series of args which are blend lists, convert the series to a single blend arg.
for i in range(len(commands)):
op, args = commands[i]
if any(isinstance(arg, list) for arg in args):
commands[i] = op, _convertToBlendCmds(args)
return commands
def specializeProgram(program, getNumRegions=None, **kwargs):
return commandsToProgram(
specializeCommands(programToCommands(program, getNumRegions), **kwargs)
)
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.specializer",
description="CFF CharString generalizer/specializer",
)
parser.add_argument("program", metavar="command", nargs="*", help="Commands.")
parser.add_argument(
"--num-regions",
metavar="NumRegions",
nargs="*",
default=None,
help="Number of variable-font regions for blend opertaions.",
)
parser.add_argument(
"--font",
metavar="FONTFILE",
default=None,
help="CFF2 font to specialize.",
)
parser.add_argument(
"-o",
"--output-file",
type=str,
help="Output font file name.",
)
options = parser.parse_args(sys.argv[1:])
if options.program:
getNumRegions = (
None
if options.num_regions is None
else lambda vsIndex: int(
options.num_regions[0 if vsIndex is None else vsIndex]
)
)
program = stringToProgram(options.program)
print("Program:")
print(programToString(program))
commands = programToCommands(program, getNumRegions)
print("Commands:")
print(commands)
program2 = commandsToProgram(commands)
print("Program from commands:")
print(programToString(program2))
assert program == program2
print("Generalized program:")
print(programToString(generalizeProgram(program, getNumRegions)))
print("Specialized program:")
print(programToString(specializeProgram(program, getNumRegions)))
if options.font:
from fontTools.ttLib import TTFont
font = TTFont(options.font)
cff2 = font["CFF2"].cff.topDictIndex[0]
charstrings = cff2.CharStrings
for glyphName in charstrings.keys():
charstring = charstrings[glyphName]
charstring.decompile()
getNumRegions = charstring.private.getNumRegions
charstring.program = specializeProgram(
charstring.program, getNumRegions, maxstack=maxStackLimit
)
if options.output_file is None:
from fontTools.misc.cliTools import makeOutputFileName
outfile = makeOutputFileName(
options.font, overWrite=True, suffix=".specialized"
)
else:
outfile = options.output_file
if outfile:
print("Saving", outfile)
font.save(outfile)

View File

@ -0,0 +1,485 @@
from fontTools.misc.psCharStrings import (
SimpleT2Decompiler,
T2WidthExtractor,
calcSubrBias,
)
def _uniq_sort(l):
return sorted(set(l))
class StopHintCountEvent(Exception):
pass
class _DesubroutinizingT2Decompiler(SimpleT2Decompiler):
stop_hintcount_ops = (
"op_hintmask",
"op_cntrmask",
"op_rmoveto",
"op_hmoveto",
"op_vmoveto",
)
def __init__(self, localSubrs, globalSubrs, private=None):
SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private)
def execute(self, charString):
self.need_hintcount = True # until proven otherwise
for op_name in self.stop_hintcount_ops:
setattr(self, op_name, self.stop_hint_count)
if hasattr(charString, "_desubroutinized"):
# If a charstring has already been desubroutinized, we will still
# need to execute it if we need to count hints in order to
# compute the byte length for mask arguments, and haven't finished
# counting hints pairs.
if self.need_hintcount and self.callingStack:
try:
SimpleT2Decompiler.execute(self, charString)
except StopHintCountEvent:
del self.callingStack[-1]
return
charString._patches = []
SimpleT2Decompiler.execute(self, charString)
desubroutinized = charString.program[:]
for idx, expansion in reversed(charString._patches):
assert idx >= 2
assert desubroutinized[idx - 1] in [
"callsubr",
"callgsubr",
], desubroutinized[idx - 1]
assert type(desubroutinized[idx - 2]) == int
if expansion[-1] == "return":
expansion = expansion[:-1]
desubroutinized[idx - 2 : idx] = expansion
if not self.private.in_cff2:
if "endchar" in desubroutinized:
# Cut off after first endchar
desubroutinized = desubroutinized[
: desubroutinized.index("endchar") + 1
]
charString._desubroutinized = desubroutinized
del charString._patches
def op_callsubr(self, index):
subr = self.localSubrs[self.operandStack[-1] + self.localBias]
SimpleT2Decompiler.op_callsubr(self, index)
self.processSubr(index, subr)
def op_callgsubr(self, index):
subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
SimpleT2Decompiler.op_callgsubr(self, index)
self.processSubr(index, subr)
def stop_hint_count(self, *args):
self.need_hintcount = False
for op_name in self.stop_hintcount_ops:
setattr(self, op_name, None)
cs = self.callingStack[-1]
if hasattr(cs, "_desubroutinized"):
raise StopHintCountEvent()
def op_hintmask(self, index):
SimpleT2Decompiler.op_hintmask(self, index)
if self.need_hintcount:
self.stop_hint_count()
def processSubr(self, index, subr):
cs = self.callingStack[-1]
if not hasattr(cs, "_desubroutinized"):
cs._patches.append((index, subr._desubroutinized))
def desubroutinize(cff):
for fontName in cff.fontNames:
font = cff[fontName]
cs = font.CharStrings
for c in cs.values():
c.decompile()
subrs = getattr(c.private, "Subrs", [])
decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs, c.private)
decompiler.execute(c)
c.program = c._desubroutinized
del c._desubroutinized
# Delete all the local subrs
if hasattr(font, "FDArray"):
for fd in font.FDArray:
pd = fd.Private
if hasattr(pd, "Subrs"):
del pd.Subrs
if "Subrs" in pd.rawDict:
del pd.rawDict["Subrs"]
else:
pd = font.Private
if hasattr(pd, "Subrs"):
del pd.Subrs
if "Subrs" in pd.rawDict:
del pd.rawDict["Subrs"]
# as well as the global subrs
cff.GlobalSubrs.clear()
class _MarkingT2Decompiler(SimpleT2Decompiler):
def __init__(self, localSubrs, globalSubrs, private):
SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private)
for subrs in [localSubrs, globalSubrs]:
if subrs and not hasattr(subrs, "_used"):
subrs._used = set()
def op_callsubr(self, index):
self.localSubrs._used.add(self.operandStack[-1] + self.localBias)
SimpleT2Decompiler.op_callsubr(self, index)
def op_callgsubr(self, index):
self.globalSubrs._used.add(self.operandStack[-1] + self.globalBias)
SimpleT2Decompiler.op_callgsubr(self, index)
class _DehintingT2Decompiler(T2WidthExtractor):
class Hints(object):
def __init__(self):
# Whether calling this charstring produces any hint stems
# Note that if a charstring starts with hintmask, it will
# have has_hint set to True, because it *might* produce an
# implicit vstem if called under certain conditions.
self.has_hint = False
# Index to start at to drop all hints
self.last_hint = 0
# Index up to which we know more hints are possible.
# Only relevant if status is 0 or 1.
self.last_checked = 0
# The status means:
# 0: after dropping hints, this charstring is empty
# 1: after dropping hints, there may be more hints
# continuing after this, or there might be
# other things. Not clear yet.
# 2: no more hints possible after this charstring
self.status = 0
# Has hintmask instructions; not recursive
self.has_hintmask = False
# List of indices of calls to empty subroutines to remove.
self.deletions = []
pass
def __init__(
self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None
):
self._css = css
T2WidthExtractor.__init__(
self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX
)
self.private = private
def execute(self, charString):
old_hints = charString._hints if hasattr(charString, "_hints") else None
charString._hints = self.Hints()
T2WidthExtractor.execute(self, charString)
hints = charString._hints
if hints.has_hint or hints.has_hintmask:
self._css.add(charString)
if hints.status != 2:
# Check from last_check, make sure we didn't have any operators.
for i in range(hints.last_checked, len(charString.program) - 1):
if isinstance(charString.program[i], str):
hints.status = 2
break
else:
hints.status = 1 # There's *something* here
hints.last_checked = len(charString.program)
if old_hints:
assert hints.__dict__ == old_hints.__dict__
def op_callsubr(self, index):
subr = self.localSubrs[self.operandStack[-1] + self.localBias]
T2WidthExtractor.op_callsubr(self, index)
self.processSubr(index, subr)
def op_callgsubr(self, index):
subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
T2WidthExtractor.op_callgsubr(self, index)
self.processSubr(index, subr)
def op_hstem(self, index):
T2WidthExtractor.op_hstem(self, index)
self.processHint(index)
def op_vstem(self, index):
T2WidthExtractor.op_vstem(self, index)
self.processHint(index)
def op_hstemhm(self, index):
T2WidthExtractor.op_hstemhm(self, index)
self.processHint(index)
def op_vstemhm(self, index):
T2WidthExtractor.op_vstemhm(self, index)
self.processHint(index)
def op_hintmask(self, index):
rv = T2WidthExtractor.op_hintmask(self, index)
self.processHintmask(index)
return rv
def op_cntrmask(self, index):
rv = T2WidthExtractor.op_cntrmask(self, index)
self.processHintmask(index)
return rv
def processHintmask(self, index):
cs = self.callingStack[-1]
hints = cs._hints
hints.has_hintmask = True
if hints.status != 2:
# Check from last_check, see if we may be an implicit vstem
for i in range(hints.last_checked, index - 1):
if isinstance(cs.program[i], str):
hints.status = 2
break
else:
# We are an implicit vstem
hints.has_hint = True
hints.last_hint = index + 1
hints.status = 0
hints.last_checked = index + 1
def processHint(self, index):
cs = self.callingStack[-1]
hints = cs._hints
hints.has_hint = True
hints.last_hint = index
hints.last_checked = index
def processSubr(self, index, subr):
cs = self.callingStack[-1]
hints = cs._hints
subr_hints = subr._hints
# Check from last_check, make sure we didn't have
# any operators.
if hints.status != 2:
for i in range(hints.last_checked, index - 1):
if isinstance(cs.program[i], str):
hints.status = 2
break
hints.last_checked = index
if hints.status != 2:
if subr_hints.has_hint:
hints.has_hint = True
# Decide where to chop off from
if subr_hints.status == 0:
hints.last_hint = index
else:
hints.last_hint = index - 2 # Leave the subr call in
elif subr_hints.status == 0:
hints.deletions.append(index)
hints.status = max(hints.status, subr_hints.status)
def _cs_subset_subroutines(charstring, subrs, gsubrs):
p = charstring.program
for i in range(1, len(p)):
if p[i] == "callsubr":
assert isinstance(p[i - 1], int)
p[i - 1] = subrs._used.index(p[i - 1] + subrs._old_bias) - subrs._new_bias
elif p[i] == "callgsubr":
assert isinstance(p[i - 1], int)
p[i - 1] = (
gsubrs._used.index(p[i - 1] + gsubrs._old_bias) - gsubrs._new_bias
)
def _cs_drop_hints(charstring):
hints = charstring._hints
if hints.deletions:
p = charstring.program
for idx in reversed(hints.deletions):
del p[idx - 2 : idx]
if hints.has_hint:
assert not hints.deletions or hints.last_hint <= hints.deletions[0]
charstring.program = charstring.program[hints.last_hint :]
if not charstring.program:
# TODO CFF2 no need for endchar.
charstring.program.append("endchar")
if hasattr(charstring, "width"):
# Insert width back if needed
if charstring.width != charstring.private.defaultWidthX:
# For CFF2 charstrings, this should never happen
assert (
charstring.private.defaultWidthX is not None
), "CFF2 CharStrings must not have an initial width value"
charstring.program.insert(
0, charstring.width - charstring.private.nominalWidthX
)
if hints.has_hintmask:
i = 0
p = charstring.program
while i < len(p):
if p[i] in ["hintmask", "cntrmask"]:
assert i + 1 <= len(p)
del p[i : i + 2]
continue
i += 1
assert len(charstring.program)
del charstring._hints
def remove_hints(cff, *, removeUnusedSubrs: bool = True):
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# This can be tricky, but doesn't have to. What we do is:
#
# - Run all used glyph charstrings and recurse into subroutines,
# - For each charstring (including subroutines), if it has any
# of the hint stem operators, we mark it as such.
# Upon returning, for each charstring we note all the
# subroutine calls it makes that (recursively) contain a stem,
# - Dropping hinting then consists of the following two ops:
# * Drop the piece of the program in each charstring before the
# last call to a stem op or a stem-calling subroutine,
# * Drop all hintmask operations.
# - It's trickier... A hintmask right after hints and a few numbers
# will act as an implicit vstemhm. As such, we track whether
# we have seen any non-hint operators so far and do the right
# thing, recursively... Good luck understanding that :(
css = set()
for c in cs.values():
c.decompile()
subrs = getattr(c.private, "Subrs", [])
decompiler = _DehintingT2Decompiler(
css,
subrs,
c.globalSubrs,
c.private.nominalWidthX,
c.private.defaultWidthX,
c.private,
)
decompiler.execute(c)
c.width = decompiler.width
for charstring in css:
_cs_drop_hints(charstring)
del css
# Drop font-wide hinting values
all_privs = []
if hasattr(font, "FDArray"):
all_privs.extend(fd.Private for fd in font.FDArray)
else:
all_privs.append(font.Private)
for priv in all_privs:
for k in [
"BlueValues",
"OtherBlues",
"FamilyBlues",
"FamilyOtherBlues",
"BlueScale",
"BlueShift",
"BlueFuzz",
"StemSnapH",
"StemSnapV",
"StdHW",
"StdVW",
"ForceBold",
"LanguageGroup",
"ExpansionFactor",
]:
if hasattr(priv, k):
setattr(priv, k, None)
if removeUnusedSubrs:
remove_unused_subroutines(cff)
def _pd_delete_empty_subrs(private_dict):
if hasattr(private_dict, "Subrs") and not private_dict.Subrs:
if "Subrs" in private_dict.rawDict:
del private_dict.rawDict["Subrs"]
del private_dict.Subrs
def remove_unused_subroutines(cff):
for fontname in cff.keys():
font = cff[fontname]
cs = font.CharStrings
# Renumber subroutines to remove unused ones
# Mark all used subroutines
for c in cs.values():
subrs = getattr(c.private, "Subrs", [])
decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs, c.private)
decompiler.execute(c)
all_subrs = [font.GlobalSubrs]
if hasattr(font, "FDArray"):
all_subrs.extend(
fd.Private.Subrs
for fd in font.FDArray
if hasattr(fd.Private, "Subrs") and fd.Private.Subrs
)
elif hasattr(font.Private, "Subrs") and font.Private.Subrs:
all_subrs.append(font.Private.Subrs)
subrs = set(subrs) # Remove duplicates
# Prepare
for subrs in all_subrs:
if not hasattr(subrs, "_used"):
subrs._used = set()
subrs._used = _uniq_sort(subrs._used)
subrs._old_bias = calcSubrBias(subrs)
subrs._new_bias = calcSubrBias(subrs._used)
# Renumber glyph charstrings
for c in cs.values():
subrs = getattr(c.private, "Subrs", None)
_cs_subset_subroutines(c, subrs, font.GlobalSubrs)
# Renumber subroutines themselves
for subrs in all_subrs:
if subrs == font.GlobalSubrs:
if not hasattr(font, "FDArray") and hasattr(font.Private, "Subrs"):
local_subrs = font.Private.Subrs
elif hasattr(font, "FDArray") and len(font.FDArray) == 1:
local_subrs = font.FDArray[0].Private.Subrs
else:
local_subrs = None
else:
local_subrs = subrs
subrs.items = [subrs.items[i] for i in subrs._used]
if hasattr(subrs, "file"):
del subrs.file
if hasattr(subrs, "offsets"):
del subrs.offsets
for subr in subrs.items:
_cs_subset_subroutines(subr, local_subrs, font.GlobalSubrs)
# Delete local SubrsIndex if empty
if hasattr(font, "FDArray"):
for fd in font.FDArray:
_pd_delete_empty_subrs(fd.Private)
else:
_pd_delete_empty_subrs(font.Private)
# Cleanup
for subrs in all_subrs:
del subrs._used, subrs._old_bias, subrs._new_bias

View File

@ -0,0 +1,210 @@
# -*- coding: utf-8 -*-
"""T2CharString glyph width optimizer.
CFF glyphs whose width equals the CFF Private dictionary's ``defaultWidthX``
value do not need to specify their width in their charstring, saving bytes.
This module determines the optimum ``defaultWidthX`` and ``nominalWidthX``
values for a font, when provided with a list of glyph widths."""
from fontTools.ttLib import TTFont
from collections import defaultdict
from operator import add
from functools import reduce
__all__ = ["optimizeWidths", "main"]
class missingdict(dict):
def __init__(self, missing_func):
self.missing_func = missing_func
def __missing__(self, v):
return self.missing_func(v)
def cumSum(f, op=add, start=0, decreasing=False):
keys = sorted(f.keys())
minx, maxx = keys[0], keys[-1]
total = reduce(op, f.values(), start)
if decreasing:
missing = lambda x: start if x > maxx else total
domain = range(maxx, minx - 1, -1)
else:
missing = lambda x: start if x < minx else total
domain = range(minx, maxx + 1)
out = missingdict(missing)
v = start
for x in domain:
v = op(v, f[x])
out[x] = v
return out
def byteCost(widths, default, nominal):
if not hasattr(widths, "items"):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
cost = 0
for w, freq in widths.items():
if w == default:
continue
diff = abs(w - nominal)
if diff <= 107:
cost += freq
elif diff <= 1131:
cost += freq * 2
else:
cost += freq * 5
return cost
def optimizeWidthsBruteforce(widths):
"""Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts."""
d = defaultdict(int)
for w in widths:
d[w] += 1
# Maximum number of bytes using default can possibly save
maxDefaultAdvantage = 5 * max(d.values())
minw, maxw = min(widths), max(widths)
domain = list(range(minw, maxw + 1))
bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain)
bestCost = len(widths) * 5 + 1
for nominal in domain:
if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage:
continue
for default in domain:
cost = byteCost(widths, default, nominal)
if cost < bestCost:
bestCost = cost
bestDefault = default
bestNominal = nominal
return bestDefault, bestNominal
def optimizeWidths(widths):
"""Given a list of glyph widths, or dictionary mapping glyph width to number of
glyphs having that, returns a tuple of best CFF default and nominal glyph widths.
This algorithm is linear in UPEM+numGlyphs."""
if not hasattr(widths, "items"):
d = defaultdict(int)
for w in widths:
d[w] += 1
widths = d
keys = sorted(widths.keys())
minw, maxw = keys[0], keys[-1]
domain = list(range(minw, maxw + 1))
# Cumulative sum/max forward/backward.
cumFrqU = cumSum(widths, op=add)
cumMaxU = cumSum(widths, op=max)
cumFrqD = cumSum(widths, op=add, decreasing=True)
cumMaxD = cumSum(widths, op=max, decreasing=True)
# Cost per nominal choice, without default consideration.
nomnCostU = missingdict(
lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3
)
nomnCostD = missingdict(
lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3
)
nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
# Cost-saving per nominal choice, by best default choice.
dfltCostU = missingdict(
lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5)
)
dfltCostD = missingdict(
lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5)
)
dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
# Combined cost per nominal choice.
bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x])
# Best nominal.
nominal = min(domain, key=lambda x: bestCost[x])
# Work back the best default.
bestC = bestCost[nominal]
dfltC = nomnCost[nominal] - bestCost[nominal]
ends = []
if dfltC == dfltCostU[nominal]:
starts = [nominal, nominal - 108, nominal - 1132]
for start in starts:
while cumMaxU[start] and cumMaxU[start] == cumMaxU[start - 1]:
start -= 1
ends.append(start)
else:
starts = [nominal, nominal + 108, nominal + 1132]
for start in starts:
while cumMaxD[start] and cumMaxD[start] == cumMaxD[start + 1]:
start += 1
ends.append(start)
default = min(ends, key=lambda default: byteCost(widths, default, nominal))
return default, nominal
def main(args=None):
"""Calculate optimum defaultWidthX/nominalWidthX values"""
import argparse
parser = argparse.ArgumentParser(
"fonttools cffLib.width",
description=main.__doc__,
)
parser.add_argument(
"inputs", metavar="FILE", type=str, nargs="+", help="Input TTF files"
)
parser.add_argument(
"-b",
"--brute-force",
dest="brute",
action="store_true",
help="Use brute-force approach (VERY slow)",
)
args = parser.parse_args(args)
for fontfile in args.inputs:
font = TTFont(fontfile)
hmtx = font["hmtx"]
widths = [m[0] for m in hmtx.metrics.values()]
if args.brute:
default, nominal = optimizeWidthsBruteforce(widths)
else:
default, nominal = optimizeWidths(widths)
print(
"glyphs=%d default=%d nominal=%d byteCost=%d"
% (len(widths), default, nominal, byteCost(widths, default, nominal))
)
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
import doctest
sys.exit(doctest.testmod().failed)
main()