asd
This commit is contained in:
8
venv/lib/python3.12/site-packages/fontTools/__init__.py
Normal file
8
venv/lib/python3.12/site-packages/fontTools/__init__.py
Normal file
@ -0,0 +1,8 @@
|
||||
import logging
|
||||
from fontTools.misc.loggingTools import configLogger
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
version = __version__ = "4.55.0"
|
||||
|
||||
__all__ = ["version", "log", "configLogger"]
|
||||
35
venv/lib/python3.12/site-packages/fontTools/__main__.py
Normal file
35
venv/lib/python3.12/site-packages/fontTools/__main__.py
Normal file
@ -0,0 +1,35 @@
|
||||
import sys
|
||||
|
||||
|
||||
def main(args=None):
|
||||
if args is None:
|
||||
args = sys.argv[1:]
|
||||
|
||||
# TODO Handle library-wide options. Eg.:
|
||||
# --unicodedata
|
||||
# --verbose / other logging stuff
|
||||
|
||||
# TODO Allow a way to run arbitrary modules? Useful for setting
|
||||
# library-wide options and calling another library. Eg.:
|
||||
#
|
||||
# $ fonttools --unicodedata=... fontmake ...
|
||||
#
|
||||
# This allows for a git-like command where thirdparty commands
|
||||
# can be added. Should we just try importing the fonttools
|
||||
# module first and try without if it fails?
|
||||
|
||||
if len(sys.argv) < 2:
|
||||
sys.argv.append("help")
|
||||
if sys.argv[1] == "-h" or sys.argv[1] == "--help":
|
||||
sys.argv[1] = "help"
|
||||
mod = "fontTools." + sys.argv[1]
|
||||
sys.argv[1] = sys.argv[0] + " " + sys.argv[1]
|
||||
del sys.argv[0]
|
||||
|
||||
import runpy
|
||||
|
||||
runpy.run_module(mod, run_name="__main__")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
439
venv/lib/python3.12/site-packages/fontTools/afmLib.py
Normal file
439
venv/lib/python3.12/site-packages/fontTools/afmLib.py
Normal file
@ -0,0 +1,439 @@
|
||||
"""Module for reading and writing AFM (Adobe Font Metrics) files.
|
||||
|
||||
Note that this has been designed to read in AFM files generated by Fontographer
|
||||
and has not been tested on many other files. In particular, it does not
|
||||
implement the whole Adobe AFM specification [#f1]_ but, it should read most
|
||||
"common" AFM files.
|
||||
|
||||
Here is an example of using `afmLib` to read, modify and write an AFM file:
|
||||
|
||||
>>> from fontTools.afmLib import AFM
|
||||
>>> f = AFM("Tests/afmLib/data/TestAFM.afm")
|
||||
>>>
|
||||
>>> # Accessing a pair gets you the kern value
|
||||
>>> f[("V","A")]
|
||||
-60
|
||||
>>>
|
||||
>>> # Accessing a glyph name gets you metrics
|
||||
>>> f["A"]
|
||||
(65, 668, (8, -25, 660, 666))
|
||||
>>> # (charnum, width, bounding box)
|
||||
>>>
|
||||
>>> # Accessing an attribute gets you metadata
|
||||
>>> f.FontName
|
||||
'TestFont-Regular'
|
||||
>>> f.FamilyName
|
||||
'TestFont'
|
||||
>>> f.Weight
|
||||
'Regular'
|
||||
>>> f.XHeight
|
||||
500
|
||||
>>> f.Ascender
|
||||
750
|
||||
>>>
|
||||
>>> # Attributes and items can also be set
|
||||
>>> f[("A","V")] = -150 # Tighten kerning
|
||||
>>> f.FontName = "TestFont Squished"
|
||||
>>>
|
||||
>>> # And the font written out again (remove the # in front)
|
||||
>>> #f.write("testfont-squished.afm")
|
||||
|
||||
.. rubric:: Footnotes
|
||||
|
||||
.. [#f1] `Adobe Technote 5004 <https://www.adobe.com/content/dam/acom/en/devnet/font/pdfs/5004.AFM_Spec.pdf>`_,
|
||||
Adobe Font Metrics File Format Specification.
|
||||
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
# every single line starts with a "word"
|
||||
identifierRE = re.compile(r"^([A-Za-z]+).*")
|
||||
|
||||
# regular expression to parse char lines
|
||||
charRE = re.compile(
|
||||
r"(-?\d+)" # charnum
|
||||
r"\s*;\s*WX\s+" # ; WX
|
||||
r"(-?\d+)" # width
|
||||
r"\s*;\s*N\s+" # ; N
|
||||
r"([.A-Za-z0-9_]+)" # charname
|
||||
r"\s*;\s*B\s+" # ; B
|
||||
r"(-?\d+)" # left
|
||||
r"\s+"
|
||||
r"(-?\d+)" # bottom
|
||||
r"\s+"
|
||||
r"(-?\d+)" # right
|
||||
r"\s+"
|
||||
r"(-?\d+)" # top
|
||||
r"\s*;\s*" # ;
|
||||
)
|
||||
|
||||
# regular expression to parse kerning lines
|
||||
kernRE = re.compile(
|
||||
r"([.A-Za-z0-9_]+)" # leftchar
|
||||
r"\s+"
|
||||
r"([.A-Za-z0-9_]+)" # rightchar
|
||||
r"\s+"
|
||||
r"(-?\d+)" # value
|
||||
r"\s*"
|
||||
)
|
||||
|
||||
# regular expressions to parse composite info lines of the form:
|
||||
# Aacute 2 ; PCC A 0 0 ; PCC acute 182 211 ;
|
||||
compositeRE = re.compile(
|
||||
r"([.A-Za-z0-9_]+)" # char name
|
||||
r"\s+"
|
||||
r"(\d+)" # number of parts
|
||||
r"\s*;\s*"
|
||||
)
|
||||
componentRE = re.compile(
|
||||
r"PCC\s+" # PPC
|
||||
r"([.A-Za-z0-9_]+)" # base char name
|
||||
r"\s+"
|
||||
r"(-?\d+)" # x offset
|
||||
r"\s+"
|
||||
r"(-?\d+)" # y offset
|
||||
r"\s*;\s*"
|
||||
)
|
||||
|
||||
preferredAttributeOrder = [
|
||||
"FontName",
|
||||
"FullName",
|
||||
"FamilyName",
|
||||
"Weight",
|
||||
"ItalicAngle",
|
||||
"IsFixedPitch",
|
||||
"FontBBox",
|
||||
"UnderlinePosition",
|
||||
"UnderlineThickness",
|
||||
"Version",
|
||||
"Notice",
|
||||
"EncodingScheme",
|
||||
"CapHeight",
|
||||
"XHeight",
|
||||
"Ascender",
|
||||
"Descender",
|
||||
]
|
||||
|
||||
|
||||
class error(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class AFM(object):
|
||||
_attrs = None
|
||||
|
||||
_keywords = [
|
||||
"StartFontMetrics",
|
||||
"EndFontMetrics",
|
||||
"StartCharMetrics",
|
||||
"EndCharMetrics",
|
||||
"StartKernData",
|
||||
"StartKernPairs",
|
||||
"EndKernPairs",
|
||||
"EndKernData",
|
||||
"StartComposites",
|
||||
"EndComposites",
|
||||
]
|
||||
|
||||
def __init__(self, path=None):
|
||||
"""AFM file reader.
|
||||
|
||||
Instantiating an object with a path name will cause the file to be opened,
|
||||
read, and parsed. Alternatively the path can be left unspecified, and a
|
||||
file can be parsed later with the :meth:`read` method."""
|
||||
self._attrs = {}
|
||||
self._chars = {}
|
||||
self._kerning = {}
|
||||
self._index = {}
|
||||
self._comments = []
|
||||
self._composites = {}
|
||||
if path is not None:
|
||||
self.read(path)
|
||||
|
||||
def read(self, path):
|
||||
"""Opens, reads and parses a file."""
|
||||
lines = readlines(path)
|
||||
for line in lines:
|
||||
if not line.strip():
|
||||
continue
|
||||
m = identifierRE.match(line)
|
||||
if m is None:
|
||||
raise error("syntax error in AFM file: " + repr(line))
|
||||
|
||||
pos = m.regs[1][1]
|
||||
word = line[:pos]
|
||||
rest = line[pos:].strip()
|
||||
if word in self._keywords:
|
||||
continue
|
||||
if word == "C":
|
||||
self.parsechar(rest)
|
||||
elif word == "KPX":
|
||||
self.parsekernpair(rest)
|
||||
elif word == "CC":
|
||||
self.parsecomposite(rest)
|
||||
else:
|
||||
self.parseattr(word, rest)
|
||||
|
||||
def parsechar(self, rest):
|
||||
m = charRE.match(rest)
|
||||
if m is None:
|
||||
raise error("syntax error in AFM file: " + repr(rest))
|
||||
things = []
|
||||
for fr, to in m.regs[1:]:
|
||||
things.append(rest[fr:to])
|
||||
charname = things[2]
|
||||
del things[2]
|
||||
charnum, width, l, b, r, t = (int(thing) for thing in things)
|
||||
self._chars[charname] = charnum, width, (l, b, r, t)
|
||||
|
||||
def parsekernpair(self, rest):
|
||||
m = kernRE.match(rest)
|
||||
if m is None:
|
||||
raise error("syntax error in AFM file: " + repr(rest))
|
||||
things = []
|
||||
for fr, to in m.regs[1:]:
|
||||
things.append(rest[fr:to])
|
||||
leftchar, rightchar, value = things
|
||||
value = int(value)
|
||||
self._kerning[(leftchar, rightchar)] = value
|
||||
|
||||
def parseattr(self, word, rest):
|
||||
if word == "FontBBox":
|
||||
l, b, r, t = [int(thing) for thing in rest.split()]
|
||||
self._attrs[word] = l, b, r, t
|
||||
elif word == "Comment":
|
||||
self._comments.append(rest)
|
||||
else:
|
||||
try:
|
||||
value = int(rest)
|
||||
except (ValueError, OverflowError):
|
||||
self._attrs[word] = rest
|
||||
else:
|
||||
self._attrs[word] = value
|
||||
|
||||
def parsecomposite(self, rest):
|
||||
m = compositeRE.match(rest)
|
||||
if m is None:
|
||||
raise error("syntax error in AFM file: " + repr(rest))
|
||||
charname = m.group(1)
|
||||
ncomponents = int(m.group(2))
|
||||
rest = rest[m.regs[0][1] :]
|
||||
components = []
|
||||
while True:
|
||||
m = componentRE.match(rest)
|
||||
if m is None:
|
||||
raise error("syntax error in AFM file: " + repr(rest))
|
||||
basechar = m.group(1)
|
||||
xoffset = int(m.group(2))
|
||||
yoffset = int(m.group(3))
|
||||
components.append((basechar, xoffset, yoffset))
|
||||
rest = rest[m.regs[0][1] :]
|
||||
if not rest:
|
||||
break
|
||||
assert len(components) == ncomponents
|
||||
self._composites[charname] = components
|
||||
|
||||
def write(self, path, sep="\r"):
|
||||
"""Writes out an AFM font to the given path."""
|
||||
import time
|
||||
|
||||
lines = [
|
||||
"StartFontMetrics 2.0",
|
||||
"Comment Generated by afmLib; at %s"
|
||||
% (time.strftime("%m/%d/%Y %H:%M:%S", time.localtime(time.time()))),
|
||||
]
|
||||
|
||||
# write comments, assuming (possibly wrongly!) they should
|
||||
# all appear at the top
|
||||
for comment in self._comments:
|
||||
lines.append("Comment " + comment)
|
||||
|
||||
# write attributes, first the ones we know about, in
|
||||
# a preferred order
|
||||
attrs = self._attrs
|
||||
for attr in preferredAttributeOrder:
|
||||
if attr in attrs:
|
||||
value = attrs[attr]
|
||||
if attr == "FontBBox":
|
||||
value = "%s %s %s %s" % value
|
||||
lines.append(attr + " " + str(value))
|
||||
# then write the attributes we don't know about,
|
||||
# in alphabetical order
|
||||
items = sorted(attrs.items())
|
||||
for attr, value in items:
|
||||
if attr in preferredAttributeOrder:
|
||||
continue
|
||||
lines.append(attr + " " + str(value))
|
||||
|
||||
# write char metrics
|
||||
lines.append("StartCharMetrics " + repr(len(self._chars)))
|
||||
items = [
|
||||
(charnum, (charname, width, box))
|
||||
for charname, (charnum, width, box) in self._chars.items()
|
||||
]
|
||||
|
||||
def myKey(a):
|
||||
"""Custom key function to make sure unencoded chars (-1)
|
||||
end up at the end of the list after sorting."""
|
||||
if a[0] == -1:
|
||||
a = (0xFFFF,) + a[1:] # 0xffff is an arbitrary large number
|
||||
return a
|
||||
|
||||
items.sort(key=myKey)
|
||||
|
||||
for charnum, (charname, width, (l, b, r, t)) in items:
|
||||
lines.append(
|
||||
"C %d ; WX %d ; N %s ; B %d %d %d %d ;"
|
||||
% (charnum, width, charname, l, b, r, t)
|
||||
)
|
||||
lines.append("EndCharMetrics")
|
||||
|
||||
# write kerning info
|
||||
lines.append("StartKernData")
|
||||
lines.append("StartKernPairs " + repr(len(self._kerning)))
|
||||
items = sorted(self._kerning.items())
|
||||
for (leftchar, rightchar), value in items:
|
||||
lines.append("KPX %s %s %d" % (leftchar, rightchar, value))
|
||||
lines.append("EndKernPairs")
|
||||
lines.append("EndKernData")
|
||||
|
||||
if self._composites:
|
||||
composites = sorted(self._composites.items())
|
||||
lines.append("StartComposites %s" % len(self._composites))
|
||||
for charname, components in composites:
|
||||
line = "CC %s %s ;" % (charname, len(components))
|
||||
for basechar, xoffset, yoffset in components:
|
||||
line = line + " PCC %s %s %s ;" % (basechar, xoffset, yoffset)
|
||||
lines.append(line)
|
||||
lines.append("EndComposites")
|
||||
|
||||
lines.append("EndFontMetrics")
|
||||
|
||||
writelines(path, lines, sep)
|
||||
|
||||
def has_kernpair(self, pair):
|
||||
"""Returns `True` if the given glyph pair (specified as a tuple) exists
|
||||
in the kerning dictionary."""
|
||||
return pair in self._kerning
|
||||
|
||||
def kernpairs(self):
|
||||
"""Returns a list of all kern pairs in the kerning dictionary."""
|
||||
return list(self._kerning.keys())
|
||||
|
||||
def has_char(self, char):
|
||||
"""Returns `True` if the given glyph exists in the font."""
|
||||
return char in self._chars
|
||||
|
||||
def chars(self):
|
||||
"""Returns a list of all glyph names in the font."""
|
||||
return list(self._chars.keys())
|
||||
|
||||
def comments(self):
|
||||
"""Returns all comments from the file."""
|
||||
return self._comments
|
||||
|
||||
def addComment(self, comment):
|
||||
"""Adds a new comment to the file."""
|
||||
self._comments.append(comment)
|
||||
|
||||
def addComposite(self, glyphName, components):
|
||||
"""Specifies that the glyph `glyphName` is made up of the given components.
|
||||
The components list should be of the following form::
|
||||
|
||||
[
|
||||
(glyphname, xOffset, yOffset),
|
||||
...
|
||||
]
|
||||
|
||||
"""
|
||||
self._composites[glyphName] = components
|
||||
|
||||
def __getattr__(self, attr):
|
||||
if attr in self._attrs:
|
||||
return self._attrs[attr]
|
||||
else:
|
||||
raise AttributeError(attr)
|
||||
|
||||
def __setattr__(self, attr, value):
|
||||
# all attrs *not* starting with "_" are consider to be AFM keywords
|
||||
if attr[:1] == "_":
|
||||
self.__dict__[attr] = value
|
||||
else:
|
||||
self._attrs[attr] = value
|
||||
|
||||
def __delattr__(self, attr):
|
||||
# all attrs *not* starting with "_" are consider to be AFM keywords
|
||||
if attr[:1] == "_":
|
||||
try:
|
||||
del self.__dict__[attr]
|
||||
except KeyError:
|
||||
raise AttributeError(attr)
|
||||
else:
|
||||
try:
|
||||
del self._attrs[attr]
|
||||
except KeyError:
|
||||
raise AttributeError(attr)
|
||||
|
||||
def __getitem__(self, key):
|
||||
if isinstance(key, tuple):
|
||||
# key is a tuple, return the kernpair
|
||||
return self._kerning[key]
|
||||
else:
|
||||
# return the metrics instead
|
||||
return self._chars[key]
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if isinstance(key, tuple):
|
||||
# key is a tuple, set kernpair
|
||||
self._kerning[key] = value
|
||||
else:
|
||||
# set char metrics
|
||||
self._chars[key] = value
|
||||
|
||||
def __delitem__(self, key):
|
||||
if isinstance(key, tuple):
|
||||
# key is a tuple, del kernpair
|
||||
del self._kerning[key]
|
||||
else:
|
||||
# del char metrics
|
||||
del self._chars[key]
|
||||
|
||||
def __repr__(self):
|
||||
if hasattr(self, "FullName"):
|
||||
return "<AFM object for %s>" % self.FullName
|
||||
else:
|
||||
return "<AFM object at %x>" % id(self)
|
||||
|
||||
|
||||
def readlines(path):
|
||||
with open(path, "r", encoding="ascii") as f:
|
||||
data = f.read()
|
||||
return data.splitlines()
|
||||
|
||||
|
||||
def writelines(path, lines, sep="\r"):
|
||||
with open(path, "w", encoding="ascii", newline=sep) as f:
|
||||
f.write("\n".join(lines) + "\n")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import EasyDialogs
|
||||
|
||||
path = EasyDialogs.AskFileForOpen()
|
||||
if path:
|
||||
afm = AFM(path)
|
||||
char = "A"
|
||||
if afm.has_char(char):
|
||||
print(afm[char]) # print charnum, width and boundingbox
|
||||
pair = ("A", "V")
|
||||
if afm.has_kernpair(pair):
|
||||
print(afm[pair]) # print kerning value for pair
|
||||
print(afm.Version) # various other afm entries have become attributes
|
||||
print(afm.Weight)
|
||||
# afm.comments() returns a list of all Comment lines found in the AFM
|
||||
print(afm.comments())
|
||||
# print afm.chars()
|
||||
# print afm.kernpairs()
|
||||
print(afm)
|
||||
afm.write(path + ".muck")
|
||||
5233
venv/lib/python3.12/site-packages/fontTools/agl.py
Normal file
5233
venv/lib/python3.12/site-packages/fontTools/agl.py
Normal file
File diff suppressed because it is too large
Load Diff
203
venv/lib/python3.12/site-packages/fontTools/cffLib/CFF2ToCFF.py
Normal file
203
venv/lib/python3.12/site-packages/fontTools/cffLib/CFF2ToCFF.py
Normal file
@ -0,0 +1,203 @@
|
||||
"""CFF2 to CFF converter."""
|
||||
|
||||
from fontTools.ttLib import TTFont, newTable
|
||||
from fontTools.misc.cliTools import makeOutputFileName
|
||||
from fontTools.cffLib import (
|
||||
TopDictIndex,
|
||||
buildOrder,
|
||||
buildDefaults,
|
||||
topDictOperators,
|
||||
privateDictOperators,
|
||||
)
|
||||
from .width import optimizeWidths
|
||||
from collections import defaultdict
|
||||
import logging
|
||||
|
||||
|
||||
__all__ = ["convertCFF2ToCFF", "main"]
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.cffLib")
|
||||
|
||||
|
||||
def _convertCFF2ToCFF(cff, otFont):
|
||||
"""Converts this object from CFF2 format to CFF format. This conversion
|
||||
is done 'in-place'. The conversion cannot be reversed.
|
||||
|
||||
The CFF2 font cannot be variable. (TODO Accept those and convert to the
|
||||
default instance?)
|
||||
|
||||
This assumes a decompiled CFF table. (i.e. that the object has been
|
||||
filled via :meth:`decompile` and e.g. not loaded from XML.)"""
|
||||
|
||||
cff.major = 1
|
||||
|
||||
topDictData = TopDictIndex(None)
|
||||
for item in cff.topDictIndex:
|
||||
# Iterate over, such that all are decompiled
|
||||
item.cff2GetGlyphOrder = None
|
||||
topDictData.append(item)
|
||||
cff.topDictIndex = topDictData
|
||||
topDict = topDictData[0]
|
||||
|
||||
if hasattr(topDict, "VarStore"):
|
||||
raise ValueError("Variable CFF2 font cannot be converted to CFF format.")
|
||||
|
||||
opOrder = buildOrder(topDictOperators)
|
||||
topDict.order = opOrder
|
||||
for key in topDict.rawDict.keys():
|
||||
if key not in opOrder:
|
||||
del topDict.rawDict[key]
|
||||
if hasattr(topDict, key):
|
||||
delattr(topDict, key)
|
||||
|
||||
fdArray = topDict.FDArray
|
||||
charStrings = topDict.CharStrings
|
||||
|
||||
defaults = buildDefaults(privateDictOperators)
|
||||
order = buildOrder(privateDictOperators)
|
||||
for fd in fdArray:
|
||||
fd.setCFF2(False)
|
||||
privateDict = fd.Private
|
||||
privateDict.order = order
|
||||
for key in order:
|
||||
if key not in privateDict.rawDict and key in defaults:
|
||||
privateDict.rawDict[key] = defaults[key]
|
||||
for key in privateDict.rawDict.keys():
|
||||
if key not in order:
|
||||
del privateDict.rawDict[key]
|
||||
if hasattr(privateDict, key):
|
||||
delattr(privateDict, key)
|
||||
|
||||
for cs in charStrings.values():
|
||||
cs.decompile()
|
||||
cs.program.append("endchar")
|
||||
for subrSets in [cff.GlobalSubrs] + [
|
||||
getattr(fd.Private, "Subrs", []) for fd in fdArray
|
||||
]:
|
||||
for cs in subrSets:
|
||||
cs.program.append("return")
|
||||
|
||||
# Add (optimal) width to CharStrings that need it.
|
||||
widths = defaultdict(list)
|
||||
metrics = otFont["hmtx"].metrics
|
||||
for glyphName in charStrings.keys():
|
||||
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
|
||||
if fdIndex == None:
|
||||
fdIndex = 0
|
||||
widths[fdIndex].append(metrics[glyphName][0])
|
||||
for fdIndex, widthList in widths.items():
|
||||
bestDefault, bestNominal = optimizeWidths(widthList)
|
||||
private = fdArray[fdIndex].Private
|
||||
private.defaultWidthX = bestDefault
|
||||
private.nominalWidthX = bestNominal
|
||||
for glyphName in charStrings.keys():
|
||||
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
|
||||
if fdIndex == None:
|
||||
fdIndex = 0
|
||||
private = fdArray[fdIndex].Private
|
||||
width = metrics[glyphName][0]
|
||||
if width != private.defaultWidthX:
|
||||
cs.program.insert(0, width - private.nominalWidthX)
|
||||
|
||||
mapping = {
|
||||
name: ("cid" + str(n) if n else ".notdef")
|
||||
for n, name in enumerate(topDict.charset)
|
||||
}
|
||||
topDict.charset = [
|
||||
"cid" + str(n) if n else ".notdef" for n in range(len(topDict.charset))
|
||||
]
|
||||
charStrings.charStrings = {
|
||||
mapping[name]: v for name, v in charStrings.charStrings.items()
|
||||
}
|
||||
|
||||
# I'm not sure why the following is *not* necessary. And it breaks
|
||||
# the output if I add it.
|
||||
# topDict.ROS = ("Adobe", "Identity", 0)
|
||||
|
||||
|
||||
def convertCFF2ToCFF(font, *, updatePostTable=True):
|
||||
cff = font["CFF2"].cff
|
||||
_convertCFF2ToCFF(cff, font)
|
||||
del font["CFF2"]
|
||||
table = font["CFF "] = newTable("CFF ")
|
||||
table.cff = cff
|
||||
|
||||
if updatePostTable and "post" in font:
|
||||
# Only version supported for fonts with CFF table is 0x00030000 not 0x20000
|
||||
post = font["post"]
|
||||
if post.formatType == 2.0:
|
||||
post.formatType = 3.0
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Convert CFF OTF font to CFF2 OTF font"""
|
||||
if args is None:
|
||||
import sys
|
||||
|
||||
args = sys.argv[1:]
|
||||
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools cffLib.CFFToCFF2",
|
||||
description="Upgrade a CFF font to CFF2.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"input", metavar="INPUT.ttf", help="Input OTF file with CFF table."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--output",
|
||||
metavar="OUTPUT.ttf",
|
||||
default=None,
|
||||
help="Output instance OTF file (default: INPUT-CFF2.ttf).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-recalc-timestamp",
|
||||
dest="recalc_timestamp",
|
||||
action="store_false",
|
||||
help="Don't set the output font's timestamp to the current time.",
|
||||
)
|
||||
loggingGroup = parser.add_mutually_exclusive_group(required=False)
|
||||
loggingGroup.add_argument(
|
||||
"-v", "--verbose", action="store_true", help="Run more verbosely."
|
||||
)
|
||||
loggingGroup.add_argument(
|
||||
"-q", "--quiet", action="store_true", help="Turn verbosity off."
|
||||
)
|
||||
options = parser.parse_args(args)
|
||||
|
||||
from fontTools import configLogger
|
||||
|
||||
configLogger(
|
||||
level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
|
||||
)
|
||||
|
||||
import os
|
||||
|
||||
infile = options.input
|
||||
if not os.path.isfile(infile):
|
||||
parser.error("No such file '{}'".format(infile))
|
||||
|
||||
outfile = (
|
||||
makeOutputFileName(infile, overWrite=True, suffix="-CFF")
|
||||
if not options.output
|
||||
else options.output
|
||||
)
|
||||
|
||||
font = TTFont(infile, recalcTimestamp=options.recalc_timestamp, recalcBBoxes=False)
|
||||
|
||||
convertCFF2ToCFF(font)
|
||||
|
||||
log.info(
|
||||
"Saving %s",
|
||||
outfile,
|
||||
)
|
||||
font.save(outfile)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
305
venv/lib/python3.12/site-packages/fontTools/cffLib/CFFToCFF2.py
Normal file
305
venv/lib/python3.12/site-packages/fontTools/cffLib/CFFToCFF2.py
Normal file
@ -0,0 +1,305 @@
|
||||
"""CFF to CFF2 converter."""
|
||||
|
||||
from fontTools.ttLib import TTFont, newTable
|
||||
from fontTools.misc.cliTools import makeOutputFileName
|
||||
from fontTools.misc.psCharStrings import T2WidthExtractor
|
||||
from fontTools.cffLib import (
|
||||
TopDictIndex,
|
||||
FDArrayIndex,
|
||||
FontDict,
|
||||
buildOrder,
|
||||
topDictOperators,
|
||||
privateDictOperators,
|
||||
topDictOperators2,
|
||||
privateDictOperators2,
|
||||
)
|
||||
from io import BytesIO
|
||||
import logging
|
||||
|
||||
__all__ = ["convertCFFToCFF2", "main"]
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.cffLib")
|
||||
|
||||
|
||||
class _NominalWidthUsedError(Exception):
|
||||
def __add__(self, other):
|
||||
raise self
|
||||
|
||||
def __radd__(self, other):
|
||||
raise self
|
||||
|
||||
|
||||
def _convertCFFToCFF2(cff, otFont):
|
||||
"""Converts this object from CFF format to CFF2 format. This conversion
|
||||
is done 'in-place'. The conversion cannot be reversed.
|
||||
|
||||
This assumes a decompiled CFF table. (i.e. that the object has been
|
||||
filled via :meth:`decompile` and e.g. not loaded from XML.)"""
|
||||
|
||||
# Clean up T2CharStrings
|
||||
|
||||
topDict = cff.topDictIndex[0]
|
||||
fdArray = topDict.FDArray if hasattr(topDict, "FDArray") else None
|
||||
charStrings = topDict.CharStrings
|
||||
globalSubrs = cff.GlobalSubrs
|
||||
localSubrs = (
|
||||
[getattr(fd.Private, "Subrs", []) for fd in fdArray]
|
||||
if fdArray
|
||||
else (
|
||||
[topDict.Private.Subrs]
|
||||
if hasattr(topDict, "Private") and hasattr(topDict.Private, "Subrs")
|
||||
else []
|
||||
)
|
||||
)
|
||||
|
||||
for glyphName in charStrings.keys():
|
||||
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
|
||||
cs.decompile()
|
||||
|
||||
# Clean up subroutines first
|
||||
for subrs in [globalSubrs] + localSubrs:
|
||||
for subr in subrs:
|
||||
program = subr.program
|
||||
i = j = len(program)
|
||||
try:
|
||||
i = program.index("return")
|
||||
except ValueError:
|
||||
pass
|
||||
try:
|
||||
j = program.index("endchar")
|
||||
except ValueError:
|
||||
pass
|
||||
program[min(i, j) :] = []
|
||||
|
||||
# Clean up glyph charstrings
|
||||
removeUnusedSubrs = False
|
||||
nominalWidthXError = _NominalWidthUsedError()
|
||||
for glyphName in charStrings.keys():
|
||||
cs, fdIndex = charStrings.getItemAndSelector(glyphName)
|
||||
program = cs.program
|
||||
|
||||
thisLocalSubrs = (
|
||||
localSubrs[fdIndex]
|
||||
if fdIndex is not None
|
||||
else (
|
||||
getattr(topDict.Private, "Subrs", [])
|
||||
if hasattr(topDict, "Private")
|
||||
else []
|
||||
)
|
||||
)
|
||||
|
||||
# Intentionally use custom type for nominalWidthX, such that any
|
||||
# CharString that has an explicit width encoded will throw back to us.
|
||||
extractor = T2WidthExtractor(
|
||||
thisLocalSubrs,
|
||||
globalSubrs,
|
||||
nominalWidthXError,
|
||||
0,
|
||||
)
|
||||
try:
|
||||
extractor.execute(cs)
|
||||
except _NominalWidthUsedError:
|
||||
# Program has explicit width. We want to drop it, but can't
|
||||
# just pop the first number since it may be a subroutine call.
|
||||
# Instead, when seeing that, we embed the subroutine and recurse.
|
||||
# If this ever happened, we later prune unused subroutines.
|
||||
while len(program) >= 2 and program[1] in ["callsubr", "callgsubr"]:
|
||||
removeUnusedSubrs = True
|
||||
subrNumber = program.pop(0)
|
||||
assert isinstance(subrNumber, int), subrNumber
|
||||
op = program.pop(0)
|
||||
bias = extractor.localBias if op == "callsubr" else extractor.globalBias
|
||||
subrNumber += bias
|
||||
subrSet = thisLocalSubrs if op == "callsubr" else globalSubrs
|
||||
subrProgram = subrSet[subrNumber].program
|
||||
program[:0] = subrProgram
|
||||
# Now pop the actual width
|
||||
assert len(program) >= 1, program
|
||||
program.pop(0)
|
||||
|
||||
if program and program[-1] == "endchar":
|
||||
program.pop()
|
||||
|
||||
if removeUnusedSubrs:
|
||||
cff.remove_unused_subroutines()
|
||||
|
||||
# Upconvert TopDict
|
||||
|
||||
cff.major = 2
|
||||
cff2GetGlyphOrder = cff.otFont.getGlyphOrder
|
||||
topDictData = TopDictIndex(None, cff2GetGlyphOrder)
|
||||
for item in cff.topDictIndex:
|
||||
# Iterate over, such that all are decompiled
|
||||
topDictData.append(item)
|
||||
cff.topDictIndex = topDictData
|
||||
topDict = topDictData[0]
|
||||
if hasattr(topDict, "Private"):
|
||||
privateDict = topDict.Private
|
||||
else:
|
||||
privateDict = None
|
||||
opOrder = buildOrder(topDictOperators2)
|
||||
topDict.order = opOrder
|
||||
topDict.cff2GetGlyphOrder = cff2GetGlyphOrder
|
||||
|
||||
if not hasattr(topDict, "FDArray"):
|
||||
fdArray = topDict.FDArray = FDArrayIndex()
|
||||
fdArray.strings = None
|
||||
fdArray.GlobalSubrs = topDict.GlobalSubrs
|
||||
topDict.GlobalSubrs.fdArray = fdArray
|
||||
charStrings = topDict.CharStrings
|
||||
if charStrings.charStringsAreIndexed:
|
||||
charStrings.charStringsIndex.fdArray = fdArray
|
||||
else:
|
||||
charStrings.fdArray = fdArray
|
||||
fontDict = FontDict()
|
||||
fontDict.setCFF2(True)
|
||||
fdArray.append(fontDict)
|
||||
fontDict.Private = privateDict
|
||||
privateOpOrder = buildOrder(privateDictOperators2)
|
||||
if privateDict is not None:
|
||||
for entry in privateDictOperators:
|
||||
key = entry[1]
|
||||
if key not in privateOpOrder:
|
||||
if key in privateDict.rawDict:
|
||||
# print "Removing private dict", key
|
||||
del privateDict.rawDict[key]
|
||||
if hasattr(privateDict, key):
|
||||
delattr(privateDict, key)
|
||||
# print "Removing privateDict attr", key
|
||||
else:
|
||||
# clean up the PrivateDicts in the fdArray
|
||||
fdArray = topDict.FDArray
|
||||
privateOpOrder = buildOrder(privateDictOperators2)
|
||||
for fontDict in fdArray:
|
||||
fontDict.setCFF2(True)
|
||||
for key in list(fontDict.rawDict.keys()):
|
||||
if key not in fontDict.order:
|
||||
del fontDict.rawDict[key]
|
||||
if hasattr(fontDict, key):
|
||||
delattr(fontDict, key)
|
||||
|
||||
privateDict = fontDict.Private
|
||||
for entry in privateDictOperators:
|
||||
key = entry[1]
|
||||
if key not in privateOpOrder:
|
||||
if key in list(privateDict.rawDict.keys()):
|
||||
# print "Removing private dict", key
|
||||
del privateDict.rawDict[key]
|
||||
if hasattr(privateDict, key):
|
||||
delattr(privateDict, key)
|
||||
# print "Removing privateDict attr", key
|
||||
|
||||
# Now delete up the deprecated topDict operators from CFF 1.0
|
||||
for entry in topDictOperators:
|
||||
key = entry[1]
|
||||
# We seem to need to keep the charset operator for now,
|
||||
# or we fail to compile with some fonts, like AdditionFont.otf.
|
||||
# I don't know which kind of CFF font those are. But keeping
|
||||
# charset seems to work. It will be removed when we save and
|
||||
# read the font again.
|
||||
#
|
||||
# AdditionFont.otf has <Encoding name="StandardEncoding"/>.
|
||||
if key == "charset":
|
||||
continue
|
||||
if key not in opOrder:
|
||||
if key in topDict.rawDict:
|
||||
del topDict.rawDict[key]
|
||||
if hasattr(topDict, key):
|
||||
delattr(topDict, key)
|
||||
|
||||
# TODO(behdad): What does the following comment even mean? Both CFF and CFF2
|
||||
# use the same T2Charstring class. I *think* what it means is that the CharStrings
|
||||
# were loaded for CFF1, and we need to reload them for CFF2 to set varstore, etc
|
||||
# on them. At least that's what I understand. It's probably safe to remove this
|
||||
# and just set vstore where needed.
|
||||
#
|
||||
# See comment above about charset as well.
|
||||
|
||||
# At this point, the Subrs and Charstrings are all still T2Charstring class
|
||||
# easiest to fix this by compiling, then decompiling again
|
||||
file = BytesIO()
|
||||
cff.compile(file, otFont, isCFF2=True)
|
||||
file.seek(0)
|
||||
cff.decompile(file, otFont, isCFF2=True)
|
||||
|
||||
|
||||
def convertCFFToCFF2(font):
|
||||
cff = font["CFF "].cff
|
||||
del font["CFF "]
|
||||
_convertCFFToCFF2(cff, font)
|
||||
table = font["CFF2"] = newTable("CFF2")
|
||||
table.cff = cff
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Convert CFF OTF font to CFF2 OTF font"""
|
||||
if args is None:
|
||||
import sys
|
||||
|
||||
args = sys.argv[1:]
|
||||
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools cffLib.CFFToCFF2",
|
||||
description="Upgrade a CFF font to CFF2.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"input", metavar="INPUT.ttf", help="Input OTF file with CFF table."
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--output",
|
||||
metavar="OUTPUT.ttf",
|
||||
default=None,
|
||||
help="Output instance OTF file (default: INPUT-CFF2.ttf).",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--no-recalc-timestamp",
|
||||
dest="recalc_timestamp",
|
||||
action="store_false",
|
||||
help="Don't set the output font's timestamp to the current time.",
|
||||
)
|
||||
loggingGroup = parser.add_mutually_exclusive_group(required=False)
|
||||
loggingGroup.add_argument(
|
||||
"-v", "--verbose", action="store_true", help="Run more verbosely."
|
||||
)
|
||||
loggingGroup.add_argument(
|
||||
"-q", "--quiet", action="store_true", help="Turn verbosity off."
|
||||
)
|
||||
options = parser.parse_args(args)
|
||||
|
||||
from fontTools import configLogger
|
||||
|
||||
configLogger(
|
||||
level=("DEBUG" if options.verbose else "ERROR" if options.quiet else "INFO")
|
||||
)
|
||||
|
||||
import os
|
||||
|
||||
infile = options.input
|
||||
if not os.path.isfile(infile):
|
||||
parser.error("No such file '{}'".format(infile))
|
||||
|
||||
outfile = (
|
||||
makeOutputFileName(infile, overWrite=True, suffix="-CFF2")
|
||||
if not options.output
|
||||
else options.output
|
||||
)
|
||||
|
||||
font = TTFont(infile, recalcTimestamp=options.recalc_timestamp, recalcBBoxes=False)
|
||||
|
||||
convertCFFToCFF2(font)
|
||||
|
||||
log.info(
|
||||
"Saving %s",
|
||||
outfile,
|
||||
)
|
||||
font.save(outfile)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
sys.exit(main(sys.argv[1:]))
|
||||
3659
venv/lib/python3.12/site-packages/fontTools/cffLib/__init__.py
Normal file
3659
venv/lib/python3.12/site-packages/fontTools/cffLib/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,924 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""T2CharString operator specializer and generalizer.
|
||||
|
||||
PostScript glyph drawing operations can be expressed in multiple different
|
||||
ways. For example, as well as the ``lineto`` operator, there is also a
|
||||
``hlineto`` operator which draws a horizontal line, removing the need to
|
||||
specify a ``dx`` coordinate, and a ``vlineto`` operator which draws a
|
||||
vertical line, removing the need to specify a ``dy`` coordinate. As well
|
||||
as decompiling :class:`fontTools.misc.psCharStrings.T2CharString` objects
|
||||
into lists of operations, this module allows for conversion between general
|
||||
and specific forms of the operation.
|
||||
|
||||
"""
|
||||
|
||||
from fontTools.cffLib import maxStackLimit
|
||||
|
||||
|
||||
def stringToProgram(string):
|
||||
if isinstance(string, str):
|
||||
string = string.split()
|
||||
program = []
|
||||
for token in string:
|
||||
try:
|
||||
token = int(token)
|
||||
except ValueError:
|
||||
try:
|
||||
token = float(token)
|
||||
except ValueError:
|
||||
pass
|
||||
program.append(token)
|
||||
return program
|
||||
|
||||
|
||||
def programToString(program):
|
||||
return " ".join(str(x) for x in program)
|
||||
|
||||
|
||||
def programToCommands(program, getNumRegions=None):
|
||||
"""Takes a T2CharString program list and returns list of commands.
|
||||
Each command is a two-tuple of commandname,arg-list. The commandname might
|
||||
be empty string if no commandname shall be emitted (used for glyph width,
|
||||
hintmask/cntrmask argument, as well as stray arguments at the end of the
|
||||
program (🤷).
|
||||
'getNumRegions' may be None, or a callable object. It must return the
|
||||
number of regions. 'getNumRegions' takes a single argument, vsindex. It
|
||||
returns the numRegions for the vsindex.
|
||||
The Charstring may or may not start with a width value. If the first
|
||||
non-blend operator has an odd number of arguments, then the first argument is
|
||||
a width, and is popped off. This is complicated with blend operators, as
|
||||
there may be more than one before the first hint or moveto operator, and each
|
||||
one reduces several arguments to just one list argument. We have to sum the
|
||||
number of arguments that are not part of the blend arguments, and all the
|
||||
'numBlends' values. We could instead have said that by definition, if there
|
||||
is a blend operator, there is no width value, since CFF2 Charstrings don't
|
||||
have width values. I discussed this with Behdad, and we are allowing for an
|
||||
initial width value in this case because developers may assemble a CFF2
|
||||
charstring from CFF Charstrings, which could have width values.
|
||||
"""
|
||||
|
||||
seenWidthOp = False
|
||||
vsIndex = 0
|
||||
lenBlendStack = 0
|
||||
lastBlendIndex = 0
|
||||
commands = []
|
||||
stack = []
|
||||
it = iter(program)
|
||||
|
||||
for token in it:
|
||||
if not isinstance(token, str):
|
||||
stack.append(token)
|
||||
continue
|
||||
|
||||
if token == "blend":
|
||||
assert getNumRegions is not None
|
||||
numSourceFonts = 1 + getNumRegions(vsIndex)
|
||||
# replace the blend op args on the stack with a single list
|
||||
# containing all the blend op args.
|
||||
numBlends = stack[-1]
|
||||
numBlendArgs = numBlends * numSourceFonts + 1
|
||||
# replace first blend op by a list of the blend ops.
|
||||
stack[-numBlendArgs:] = [stack[-numBlendArgs:]]
|
||||
lenStack = len(stack)
|
||||
lenBlendStack += numBlends + lenStack - 1
|
||||
lastBlendIndex = lenStack
|
||||
# if a blend op exists, this is or will be a CFF2 charstring.
|
||||
continue
|
||||
|
||||
elif token == "vsindex":
|
||||
vsIndex = stack[-1]
|
||||
assert type(vsIndex) is int
|
||||
|
||||
elif (not seenWidthOp) and token in {
|
||||
"hstem",
|
||||
"hstemhm",
|
||||
"vstem",
|
||||
"vstemhm",
|
||||
"cntrmask",
|
||||
"hintmask",
|
||||
"hmoveto",
|
||||
"vmoveto",
|
||||
"rmoveto",
|
||||
"endchar",
|
||||
}:
|
||||
seenWidthOp = True
|
||||
parity = token in {"hmoveto", "vmoveto"}
|
||||
if lenBlendStack:
|
||||
# lenBlendStack has the number of args represented by the last blend
|
||||
# arg and all the preceding args. We need to now add the number of
|
||||
# args following the last blend arg.
|
||||
numArgs = lenBlendStack + len(stack[lastBlendIndex:])
|
||||
else:
|
||||
numArgs = len(stack)
|
||||
if numArgs and (numArgs % 2) ^ parity:
|
||||
width = stack.pop(0)
|
||||
commands.append(("", [width]))
|
||||
|
||||
if token in {"hintmask", "cntrmask"}:
|
||||
if stack:
|
||||
commands.append(("", stack))
|
||||
commands.append((token, []))
|
||||
commands.append(("", [next(it)]))
|
||||
else:
|
||||
commands.append((token, stack))
|
||||
stack = []
|
||||
if stack:
|
||||
commands.append(("", stack))
|
||||
return commands
|
||||
|
||||
|
||||
def _flattenBlendArgs(args):
|
||||
token_list = []
|
||||
for arg in args:
|
||||
if isinstance(arg, list):
|
||||
token_list.extend(arg)
|
||||
token_list.append("blend")
|
||||
else:
|
||||
token_list.append(arg)
|
||||
return token_list
|
||||
|
||||
|
||||
def commandsToProgram(commands):
|
||||
"""Takes a commands list as returned by programToCommands() and converts
|
||||
it back to a T2CharString program list."""
|
||||
program = []
|
||||
for op, args in commands:
|
||||
if any(isinstance(arg, list) for arg in args):
|
||||
args = _flattenBlendArgs(args)
|
||||
program.extend(args)
|
||||
if op:
|
||||
program.append(op)
|
||||
return program
|
||||
|
||||
|
||||
def _everyN(el, n):
|
||||
"""Group the list el into groups of size n"""
|
||||
l = len(el)
|
||||
if l % n != 0:
|
||||
raise ValueError(el)
|
||||
for i in range(0, l, n):
|
||||
yield el[i : i + n]
|
||||
|
||||
|
||||
class _GeneralizerDecombinerCommandsMap(object):
|
||||
@staticmethod
|
||||
def rmoveto(args):
|
||||
if len(args) != 2:
|
||||
raise ValueError(args)
|
||||
yield ("rmoveto", args)
|
||||
|
||||
@staticmethod
|
||||
def hmoveto(args):
|
||||
if len(args) != 1:
|
||||
raise ValueError(args)
|
||||
yield ("rmoveto", [args[0], 0])
|
||||
|
||||
@staticmethod
|
||||
def vmoveto(args):
|
||||
if len(args) != 1:
|
||||
raise ValueError(args)
|
||||
yield ("rmoveto", [0, args[0]])
|
||||
|
||||
@staticmethod
|
||||
def rlineto(args):
|
||||
if not args:
|
||||
raise ValueError(args)
|
||||
for args in _everyN(args, 2):
|
||||
yield ("rlineto", args)
|
||||
|
||||
@staticmethod
|
||||
def hlineto(args):
|
||||
if not args:
|
||||
raise ValueError(args)
|
||||
it = iter(args)
|
||||
try:
|
||||
while True:
|
||||
yield ("rlineto", [next(it), 0])
|
||||
yield ("rlineto", [0, next(it)])
|
||||
except StopIteration:
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def vlineto(args):
|
||||
if not args:
|
||||
raise ValueError(args)
|
||||
it = iter(args)
|
||||
try:
|
||||
while True:
|
||||
yield ("rlineto", [0, next(it)])
|
||||
yield ("rlineto", [next(it), 0])
|
||||
except StopIteration:
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def rrcurveto(args):
|
||||
if not args:
|
||||
raise ValueError(args)
|
||||
for args in _everyN(args, 6):
|
||||
yield ("rrcurveto", args)
|
||||
|
||||
@staticmethod
|
||||
def hhcurveto(args):
|
||||
l = len(args)
|
||||
if l < 4 or l % 4 > 1:
|
||||
raise ValueError(args)
|
||||
if l % 2 == 1:
|
||||
yield ("rrcurveto", [args[1], args[0], args[2], args[3], args[4], 0])
|
||||
args = args[5:]
|
||||
for args in _everyN(args, 4):
|
||||
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[3], 0])
|
||||
|
||||
@staticmethod
|
||||
def vvcurveto(args):
|
||||
l = len(args)
|
||||
if l < 4 or l % 4 > 1:
|
||||
raise ValueError(args)
|
||||
if l % 2 == 1:
|
||||
yield ("rrcurveto", [args[0], args[1], args[2], args[3], 0, args[4]])
|
||||
args = args[5:]
|
||||
for args in _everyN(args, 4):
|
||||
yield ("rrcurveto", [0, args[0], args[1], args[2], 0, args[3]])
|
||||
|
||||
@staticmethod
|
||||
def hvcurveto(args):
|
||||
l = len(args)
|
||||
if l < 4 or l % 8 not in {0, 1, 4, 5}:
|
||||
raise ValueError(args)
|
||||
last_args = None
|
||||
if l % 2 == 1:
|
||||
lastStraight = l % 8 == 5
|
||||
args, last_args = args[:-5], args[-5:]
|
||||
it = _everyN(args, 4)
|
||||
try:
|
||||
while True:
|
||||
args = next(it)
|
||||
yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
|
||||
args = next(it)
|
||||
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
|
||||
except StopIteration:
|
||||
pass
|
||||
if last_args:
|
||||
args = last_args
|
||||
if lastStraight:
|
||||
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
|
||||
else:
|
||||
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
|
||||
|
||||
@staticmethod
|
||||
def vhcurveto(args):
|
||||
l = len(args)
|
||||
if l < 4 or l % 8 not in {0, 1, 4, 5}:
|
||||
raise ValueError(args)
|
||||
last_args = None
|
||||
if l % 2 == 1:
|
||||
lastStraight = l % 8 == 5
|
||||
args, last_args = args[:-5], args[-5:]
|
||||
it = _everyN(args, 4)
|
||||
try:
|
||||
while True:
|
||||
args = next(it)
|
||||
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], 0])
|
||||
args = next(it)
|
||||
yield ("rrcurveto", [args[0], 0, args[1], args[2], 0, args[3]])
|
||||
except StopIteration:
|
||||
pass
|
||||
if last_args:
|
||||
args = last_args
|
||||
if lastStraight:
|
||||
yield ("rrcurveto", [0, args[0], args[1], args[2], args[3], args[4]])
|
||||
else:
|
||||
yield ("rrcurveto", [args[0], 0, args[1], args[2], args[4], args[3]])
|
||||
|
||||
@staticmethod
|
||||
def rcurveline(args):
|
||||
l = len(args)
|
||||
if l < 8 or l % 6 != 2:
|
||||
raise ValueError(args)
|
||||
args, last_args = args[:-2], args[-2:]
|
||||
for args in _everyN(args, 6):
|
||||
yield ("rrcurveto", args)
|
||||
yield ("rlineto", last_args)
|
||||
|
||||
@staticmethod
|
||||
def rlinecurve(args):
|
||||
l = len(args)
|
||||
if l < 8 or l % 2 != 0:
|
||||
raise ValueError(args)
|
||||
args, last_args = args[:-6], args[-6:]
|
||||
for args in _everyN(args, 2):
|
||||
yield ("rlineto", args)
|
||||
yield ("rrcurveto", last_args)
|
||||
|
||||
|
||||
def _convertBlendOpToArgs(blendList):
|
||||
# args is list of blend op args. Since we are supporting
|
||||
# recursive blend op calls, some of these args may also
|
||||
# be a list of blend op args, and need to be converted before
|
||||
# we convert the current list.
|
||||
if any([isinstance(arg, list) for arg in blendList]):
|
||||
args = [
|
||||
i
|
||||
for e in blendList
|
||||
for i in (_convertBlendOpToArgs(e) if isinstance(e, list) else [e])
|
||||
]
|
||||
else:
|
||||
args = blendList
|
||||
|
||||
# We now know that blendList contains a blend op argument list, even if
|
||||
# some of the args are lists that each contain a blend op argument list.
|
||||
# Convert from:
|
||||
# [default font arg sequence x0,...,xn] + [delta tuple for x0] + ... + [delta tuple for xn]
|
||||
# to:
|
||||
# [ [x0] + [delta tuple for x0],
|
||||
# ...,
|
||||
# [xn] + [delta tuple for xn] ]
|
||||
numBlends = args[-1]
|
||||
# Can't use args.pop() when the args are being used in a nested list
|
||||
# comprehension. See calling context
|
||||
args = args[:-1]
|
||||
|
||||
l = len(args)
|
||||
numRegions = l // numBlends - 1
|
||||
if not (numBlends * (numRegions + 1) == l):
|
||||
raise ValueError(blendList)
|
||||
|
||||
defaultArgs = [[arg] for arg in args[:numBlends]]
|
||||
deltaArgs = args[numBlends:]
|
||||
numDeltaValues = len(deltaArgs)
|
||||
deltaList = [
|
||||
deltaArgs[i : i + numRegions] for i in range(0, numDeltaValues, numRegions)
|
||||
]
|
||||
blend_args = [a + b + [1] for a, b in zip(defaultArgs, deltaList)]
|
||||
return blend_args
|
||||
|
||||
|
||||
def generalizeCommands(commands, ignoreErrors=False):
|
||||
result = []
|
||||
mapping = _GeneralizerDecombinerCommandsMap
|
||||
for op, args in commands:
|
||||
# First, generalize any blend args in the arg list.
|
||||
if any([isinstance(arg, list) for arg in args]):
|
||||
try:
|
||||
args = [
|
||||
n
|
||||
for arg in args
|
||||
for n in (
|
||||
_convertBlendOpToArgs(arg) if isinstance(arg, list) else [arg]
|
||||
)
|
||||
]
|
||||
except ValueError:
|
||||
if ignoreErrors:
|
||||
# Store op as data, such that consumers of commands do not have to
|
||||
# deal with incorrect number of arguments.
|
||||
result.append(("", args))
|
||||
result.append(("", [op]))
|
||||
else:
|
||||
raise
|
||||
|
||||
func = getattr(mapping, op, None)
|
||||
if func is None:
|
||||
result.append((op, args))
|
||||
continue
|
||||
try:
|
||||
for command in func(args):
|
||||
result.append(command)
|
||||
except ValueError:
|
||||
if ignoreErrors:
|
||||
# Store op as data, such that consumers of commands do not have to
|
||||
# deal with incorrect number of arguments.
|
||||
result.append(("", args))
|
||||
result.append(("", [op]))
|
||||
else:
|
||||
raise
|
||||
return result
|
||||
|
||||
|
||||
def generalizeProgram(program, getNumRegions=None, **kwargs):
|
||||
return commandsToProgram(
|
||||
generalizeCommands(programToCommands(program, getNumRegions), **kwargs)
|
||||
)
|
||||
|
||||
|
||||
def _categorizeVector(v):
|
||||
"""
|
||||
Takes X,Y vector v and returns one of r, h, v, or 0 depending on which
|
||||
of X and/or Y are zero, plus tuple of nonzero ones. If both are zero,
|
||||
it returns a single zero still.
|
||||
|
||||
>>> _categorizeVector((0,0))
|
||||
('0', (0,))
|
||||
>>> _categorizeVector((1,0))
|
||||
('h', (1,))
|
||||
>>> _categorizeVector((0,2))
|
||||
('v', (2,))
|
||||
>>> _categorizeVector((1,2))
|
||||
('r', (1, 2))
|
||||
"""
|
||||
if not v[0]:
|
||||
if not v[1]:
|
||||
return "0", v[:1]
|
||||
else:
|
||||
return "v", v[1:]
|
||||
else:
|
||||
if not v[1]:
|
||||
return "h", v[:1]
|
||||
else:
|
||||
return "r", v
|
||||
|
||||
|
||||
def _mergeCategories(a, b):
|
||||
if a == "0":
|
||||
return b
|
||||
if b == "0":
|
||||
return a
|
||||
if a == b:
|
||||
return a
|
||||
return None
|
||||
|
||||
|
||||
def _negateCategory(a):
|
||||
if a == "h":
|
||||
return "v"
|
||||
if a == "v":
|
||||
return "h"
|
||||
assert a in "0r"
|
||||
return a
|
||||
|
||||
|
||||
def _convertToBlendCmds(args):
|
||||
# return a list of blend commands, and
|
||||
# the remaining non-blended args, if any.
|
||||
num_args = len(args)
|
||||
stack_use = 0
|
||||
new_args = []
|
||||
i = 0
|
||||
while i < num_args:
|
||||
arg = args[i]
|
||||
i += 1
|
||||
if not isinstance(arg, list):
|
||||
new_args.append(arg)
|
||||
stack_use += 1
|
||||
else:
|
||||
prev_stack_use = stack_use
|
||||
# The arg is a tuple of blend values.
|
||||
# These are each (master 0,delta 1..delta n, 1)
|
||||
# Combine as many successive tuples as we can,
|
||||
# up to the max stack limit.
|
||||
num_sources = len(arg) - 1
|
||||
blendlist = [arg]
|
||||
stack_use += 1 + num_sources # 1 for the num_blends arg
|
||||
|
||||
# if we are here, max stack is the CFF2 max stack.
|
||||
# I use the CFF2 max stack limit here rather than
|
||||
# the 'maxstack' chosen by the client, as the default
|
||||
# maxstack may have been used unintentionally. For all
|
||||
# the other operators, this just produces a little less
|
||||
# optimization, but here it puts a hard (and low) limit
|
||||
# on the number of source fonts that can be used.
|
||||
#
|
||||
# Make sure the stack depth does not exceed (maxstack - 1), so
|
||||
# that subroutinizer can insert subroutine calls at any point.
|
||||
while (
|
||||
(i < num_args)
|
||||
and isinstance(args[i], list)
|
||||
and stack_use + num_sources < maxStackLimit
|
||||
):
|
||||
blendlist.append(args[i])
|
||||
i += 1
|
||||
stack_use += num_sources
|
||||
# blendList now contains as many single blend tuples as can be
|
||||
# combined without exceeding the CFF2 stack limit.
|
||||
num_blends = len(blendlist)
|
||||
# append the 'num_blends' default font values
|
||||
blend_args = []
|
||||
for arg in blendlist:
|
||||
blend_args.append(arg[0])
|
||||
for arg in blendlist:
|
||||
assert arg[-1] == 1
|
||||
blend_args.extend(arg[1:-1])
|
||||
blend_args.append(num_blends)
|
||||
new_args.append(blend_args)
|
||||
stack_use = prev_stack_use + num_blends
|
||||
|
||||
return new_args
|
||||
|
||||
|
||||
def _addArgs(a, b):
|
||||
if isinstance(b, list):
|
||||
if isinstance(a, list):
|
||||
if len(a) != len(b) or a[-1] != b[-1]:
|
||||
raise ValueError()
|
||||
return [_addArgs(va, vb) for va, vb in zip(a[:-1], b[:-1])] + [a[-1]]
|
||||
else:
|
||||
a, b = b, a
|
||||
if isinstance(a, list):
|
||||
assert a[-1] == 1
|
||||
return [_addArgs(a[0], b)] + a[1:]
|
||||
return a + b
|
||||
|
||||
|
||||
def _argsStackUse(args):
|
||||
stackLen = 0
|
||||
maxLen = 0
|
||||
for arg in args:
|
||||
if type(arg) is list:
|
||||
# Blended arg
|
||||
maxLen = max(maxLen, stackLen + _argsStackUse(arg))
|
||||
stackLen += arg[-1]
|
||||
else:
|
||||
stackLen += 1
|
||||
return max(stackLen, maxLen)
|
||||
|
||||
|
||||
def specializeCommands(
|
||||
commands,
|
||||
ignoreErrors=False,
|
||||
generalizeFirst=True,
|
||||
preserveTopology=False,
|
||||
maxstack=48,
|
||||
):
|
||||
# We perform several rounds of optimizations. They are carefully ordered and are:
|
||||
#
|
||||
# 0. Generalize commands.
|
||||
# This ensures that they are in our expected simple form, with each line/curve only
|
||||
# having arguments for one segment, and using the generic form (rlineto/rrcurveto).
|
||||
# If caller is sure the input is in this form, they can turn off generalization to
|
||||
# save time.
|
||||
#
|
||||
# 1. Combine successive rmoveto operations.
|
||||
#
|
||||
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
|
||||
# We specialize into some, made-up, variants as well, which simplifies following
|
||||
# passes.
|
||||
#
|
||||
# 3. Merge or delete redundant operations, to the extent requested.
|
||||
# OpenType spec declares point numbers in CFF undefined. As such, we happily
|
||||
# change topology. If client relies on point numbers (in GPOS anchors, or for
|
||||
# hinting purposes(what?)) they can turn this off.
|
||||
#
|
||||
# 4. Peephole optimization to revert back some of the h/v variants back into their
|
||||
# original "relative" operator (rline/rrcurveto) if that saves a byte.
|
||||
#
|
||||
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
|
||||
#
|
||||
# 6. Resolve any remaining made-up operators into real operators.
|
||||
#
|
||||
# I have convinced myself that this produces optimal bytecode (except for, possibly
|
||||
# one byte each time maxstack size prohibits combining.) YMMV, but you'd be wrong. :-)
|
||||
# A dynamic-programming approach can do the same but would be significantly slower.
|
||||
#
|
||||
# 7. For any args which are blend lists, convert them to a blend command.
|
||||
|
||||
# 0. Generalize commands.
|
||||
if generalizeFirst:
|
||||
commands = generalizeCommands(commands, ignoreErrors=ignoreErrors)
|
||||
else:
|
||||
commands = list(commands) # Make copy since we modify in-place later.
|
||||
|
||||
# 1. Combine successive rmoveto operations.
|
||||
for i in range(len(commands) - 1, 0, -1):
|
||||
if "rmoveto" == commands[i][0] == commands[i - 1][0]:
|
||||
v1, v2 = commands[i - 1][1], commands[i][1]
|
||||
commands[i - 1] = ("rmoveto", [v1[0] + v2[0], v1[1] + v2[1]])
|
||||
del commands[i]
|
||||
|
||||
# 2. Specialize rmoveto/rlineto/rrcurveto operators into horizontal/vertical variants.
|
||||
#
|
||||
# We, in fact, specialize into more, made-up, variants that special-case when both
|
||||
# X and Y components are zero. This simplifies the following optimization passes.
|
||||
# This case is rare, but OCD does not let me skip it.
|
||||
#
|
||||
# After this round, we will have four variants that use the following mnemonics:
|
||||
#
|
||||
# - 'r' for relative, ie. non-zero X and non-zero Y,
|
||||
# - 'h' for horizontal, ie. zero X and non-zero Y,
|
||||
# - 'v' for vertical, ie. non-zero X and zero Y,
|
||||
# - '0' for zeros, ie. zero X and zero Y.
|
||||
#
|
||||
# The '0' pseudo-operators are not part of the spec, but help simplify the following
|
||||
# optimization rounds. We resolve them at the end. So, after this, we will have four
|
||||
# moveto and four lineto variants:
|
||||
#
|
||||
# - 0moveto, 0lineto
|
||||
# - hmoveto, hlineto
|
||||
# - vmoveto, vlineto
|
||||
# - rmoveto, rlineto
|
||||
#
|
||||
# and sixteen curveto variants. For example, a '0hcurveto' operator means a curve
|
||||
# dx0,dy0,dx1,dy1,dx2,dy2,dx3,dy3 where dx0, dx1, and dy3 are zero but not dx3.
|
||||
# An 'rvcurveto' means dx3 is zero but not dx0,dy0,dy3.
|
||||
#
|
||||
# There are nine different variants of curves without the '0'. Those nine map exactly
|
||||
# to the existing curve variants in the spec: rrcurveto, and the four variants hhcurveto,
|
||||
# vvcurveto, hvcurveto, and vhcurveto each cover two cases, one with an odd number of
|
||||
# arguments and one without. Eg. an hhcurveto with an extra argument (odd number of
|
||||
# arguments) is in fact an rhcurveto. The operators in the spec are designed such that
|
||||
# all four of rhcurveto, rvcurveto, hrcurveto, and vrcurveto are encodable for one curve.
|
||||
#
|
||||
# Of the curve types with '0', the 00curveto is equivalent to a lineto variant. The rest
|
||||
# of the curve types with a 0 need to be encoded as a h or v variant. Ie. a '0' can be
|
||||
# thought of a "don't care" and can be used as either an 'h' or a 'v'. As such, we always
|
||||
# encode a number 0 as argument when we use a '0' variant. Later on, we can just substitute
|
||||
# the '0' with either 'h' or 'v' and it works.
|
||||
#
|
||||
# When we get to curve splines however, things become more complicated... XXX finish this.
|
||||
# There's one more complexity with splines. If one side of the spline is not horizontal or
|
||||
# vertical (or zero), ie. if it's 'r', then it limits which spline types we can encode.
|
||||
# Only hhcurveto and vvcurveto operators can encode a spline starting with 'r', and
|
||||
# only hvcurveto and vhcurveto operators can encode a spline ending with 'r'.
|
||||
# This limits our merge opportunities later.
|
||||
#
|
||||
for i in range(len(commands)):
|
||||
op, args = commands[i]
|
||||
|
||||
if op in {"rmoveto", "rlineto"}:
|
||||
c, args = _categorizeVector(args)
|
||||
commands[i] = c + op[1:], args
|
||||
continue
|
||||
|
||||
if op == "rrcurveto":
|
||||
c1, args1 = _categorizeVector(args[:2])
|
||||
c2, args2 = _categorizeVector(args[-2:])
|
||||
commands[i] = c1 + c2 + "curveto", args1 + args[2:4] + args2
|
||||
continue
|
||||
|
||||
# 3. Merge or delete redundant operations, to the extent requested.
|
||||
#
|
||||
# TODO
|
||||
# A 0moveto that comes before all other path operations can be removed.
|
||||
# though I find conflicting evidence for this.
|
||||
#
|
||||
# TODO
|
||||
# "If hstem and vstem hints are both declared at the beginning of a
|
||||
# CharString, and this sequence is followed directly by the hintmask or
|
||||
# cntrmask operators, then the vstem hint operator (or, if applicable,
|
||||
# the vstemhm operator) need not be included."
|
||||
#
|
||||
# "The sequence and form of a CFF2 CharString program may be represented as:
|
||||
# {hs* vs* cm* hm* mt subpath}? {mt subpath}*"
|
||||
#
|
||||
# https://www.microsoft.com/typography/otspec/cff2charstr.htm#section3.1
|
||||
#
|
||||
# For Type2 CharStrings the sequence is:
|
||||
# w? {hs* vs* cm* hm* mt subpath}? {mt subpath}* endchar"
|
||||
|
||||
# Some other redundancies change topology (point numbers).
|
||||
if not preserveTopology:
|
||||
for i in range(len(commands) - 1, -1, -1):
|
||||
op, args = commands[i]
|
||||
|
||||
# A 00curveto is demoted to a (specialized) lineto.
|
||||
if op == "00curveto":
|
||||
assert len(args) == 4
|
||||
c, args = _categorizeVector(args[1:3])
|
||||
op = c + "lineto"
|
||||
commands[i] = op, args
|
||||
# and then...
|
||||
|
||||
# A 0lineto can be deleted.
|
||||
if op == "0lineto":
|
||||
del commands[i]
|
||||
continue
|
||||
|
||||
# Merge adjacent hlineto's and vlineto's.
|
||||
# In CFF2 charstrings from variable fonts, each
|
||||
# arg item may be a list of blendable values, one from
|
||||
# each source font.
|
||||
if i and op in {"hlineto", "vlineto"} and (op == commands[i - 1][0]):
|
||||
_, other_args = commands[i - 1]
|
||||
assert len(args) == 1 and len(other_args) == 1
|
||||
try:
|
||||
new_args = [_addArgs(args[0], other_args[0])]
|
||||
except ValueError:
|
||||
continue
|
||||
commands[i - 1] = (op, new_args)
|
||||
del commands[i]
|
||||
continue
|
||||
|
||||
# 4. Peephole optimization to revert back some of the h/v variants back into their
|
||||
# original "relative" operator (rline/rrcurveto) if that saves a byte.
|
||||
for i in range(1, len(commands) - 1):
|
||||
op, args = commands[i]
|
||||
prv, nxt = commands[i - 1][0], commands[i + 1][0]
|
||||
|
||||
if op in {"0lineto", "hlineto", "vlineto"} and prv == nxt == "rlineto":
|
||||
assert len(args) == 1
|
||||
args = [0, args[0]] if op[0] == "v" else [args[0], 0]
|
||||
commands[i] = ("rlineto", args)
|
||||
continue
|
||||
|
||||
if op[2:] == "curveto" and len(args) == 5 and prv == nxt == "rrcurveto":
|
||||
assert (op[0] == "r") ^ (op[1] == "r")
|
||||
if op[0] == "v":
|
||||
pos = 0
|
||||
elif op[0] != "r":
|
||||
pos = 1
|
||||
elif op[1] == "v":
|
||||
pos = 4
|
||||
else:
|
||||
pos = 5
|
||||
# Insert, while maintaining the type of args (can be tuple or list).
|
||||
args = args[:pos] + type(args)((0,)) + args[pos:]
|
||||
commands[i] = ("rrcurveto", args)
|
||||
continue
|
||||
|
||||
# 5. Combine adjacent operators when possible, minding not to go over max stack size.
|
||||
stackUse = _argsStackUse(commands[-1][1]) if commands else 0
|
||||
for i in range(len(commands) - 1, 0, -1):
|
||||
op1, args1 = commands[i - 1]
|
||||
op2, args2 = commands[i]
|
||||
new_op = None
|
||||
|
||||
# Merge logic...
|
||||
if {op1, op2} <= {"rlineto", "rrcurveto"}:
|
||||
if op1 == op2:
|
||||
new_op = op1
|
||||
else:
|
||||
l = len(args2)
|
||||
if op2 == "rrcurveto" and l == 6:
|
||||
new_op = "rlinecurve"
|
||||
elif l == 2:
|
||||
new_op = "rcurveline"
|
||||
|
||||
elif (op1, op2) in {("rlineto", "rlinecurve"), ("rrcurveto", "rcurveline")}:
|
||||
new_op = op2
|
||||
|
||||
elif {op1, op2} == {"vlineto", "hlineto"}:
|
||||
new_op = op1
|
||||
|
||||
elif "curveto" == op1[2:] == op2[2:]:
|
||||
d0, d1 = op1[:2]
|
||||
d2, d3 = op2[:2]
|
||||
|
||||
if d1 == "r" or d2 == "r" or d0 == d3 == "r":
|
||||
continue
|
||||
|
||||
d = _mergeCategories(d1, d2)
|
||||
if d is None:
|
||||
continue
|
||||
if d0 == "r":
|
||||
d = _mergeCategories(d, d3)
|
||||
if d is None:
|
||||
continue
|
||||
new_op = "r" + d + "curveto"
|
||||
elif d3 == "r":
|
||||
d0 = _mergeCategories(d0, _negateCategory(d))
|
||||
if d0 is None:
|
||||
continue
|
||||
new_op = d0 + "r" + "curveto"
|
||||
else:
|
||||
d0 = _mergeCategories(d0, d3)
|
||||
if d0 is None:
|
||||
continue
|
||||
new_op = d0 + d + "curveto"
|
||||
|
||||
# Make sure the stack depth does not exceed (maxstack - 1), so
|
||||
# that subroutinizer can insert subroutine calls at any point.
|
||||
args1StackUse = _argsStackUse(args1)
|
||||
combinedStackUse = max(args1StackUse, len(args1) + stackUse)
|
||||
if new_op and combinedStackUse < maxstack:
|
||||
commands[i - 1] = (new_op, args1 + args2)
|
||||
del commands[i]
|
||||
stackUse = combinedStackUse
|
||||
else:
|
||||
stackUse = args1StackUse
|
||||
|
||||
# 6. Resolve any remaining made-up operators into real operators.
|
||||
for i in range(len(commands)):
|
||||
op, args = commands[i]
|
||||
|
||||
if op in {"0moveto", "0lineto"}:
|
||||
commands[i] = "h" + op[1:], args
|
||||
continue
|
||||
|
||||
if op[2:] == "curveto" and op[:2] not in {"rr", "hh", "vv", "vh", "hv"}:
|
||||
l = len(args)
|
||||
|
||||
op0, op1 = op[:2]
|
||||
if (op0 == "r") ^ (op1 == "r"):
|
||||
assert l % 2 == 1
|
||||
if op0 == "0":
|
||||
op0 = "h"
|
||||
if op1 == "0":
|
||||
op1 = "h"
|
||||
if op0 == "r":
|
||||
op0 = op1
|
||||
if op1 == "r":
|
||||
op1 = _negateCategory(op0)
|
||||
assert {op0, op1} <= {"h", "v"}, (op0, op1)
|
||||
|
||||
if l % 2:
|
||||
if op0 != op1: # vhcurveto / hvcurveto
|
||||
if (op0 == "h") ^ (l % 8 == 1):
|
||||
# Swap last two args order
|
||||
args = args[:-2] + args[-1:] + args[-2:-1]
|
||||
else: # hhcurveto / vvcurveto
|
||||
if op0 == "h": # hhcurveto
|
||||
# Swap first two args order
|
||||
args = args[1:2] + args[:1] + args[2:]
|
||||
|
||||
commands[i] = op0 + op1 + "curveto", args
|
||||
continue
|
||||
|
||||
# 7. For any series of args which are blend lists, convert the series to a single blend arg.
|
||||
for i in range(len(commands)):
|
||||
op, args = commands[i]
|
||||
if any(isinstance(arg, list) for arg in args):
|
||||
commands[i] = op, _convertToBlendCmds(args)
|
||||
|
||||
return commands
|
||||
|
||||
|
||||
def specializeProgram(program, getNumRegions=None, **kwargs):
|
||||
return commandsToProgram(
|
||||
specializeCommands(programToCommands(program, getNumRegions), **kwargs)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
if len(sys.argv) == 1:
|
||||
import doctest
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools cffLib.specializer",
|
||||
description="CFF CharString generalizer/specializer",
|
||||
)
|
||||
parser.add_argument("program", metavar="command", nargs="*", help="Commands.")
|
||||
parser.add_argument(
|
||||
"--num-regions",
|
||||
metavar="NumRegions",
|
||||
nargs="*",
|
||||
default=None,
|
||||
help="Number of variable-font regions for blend opertaions.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--font",
|
||||
metavar="FONTFILE",
|
||||
default=None,
|
||||
help="CFF2 font to specialize.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--output-file",
|
||||
type=str,
|
||||
help="Output font file name.",
|
||||
)
|
||||
|
||||
options = parser.parse_args(sys.argv[1:])
|
||||
|
||||
if options.program:
|
||||
getNumRegions = (
|
||||
None
|
||||
if options.num_regions is None
|
||||
else lambda vsIndex: int(
|
||||
options.num_regions[0 if vsIndex is None else vsIndex]
|
||||
)
|
||||
)
|
||||
|
||||
program = stringToProgram(options.program)
|
||||
print("Program:")
|
||||
print(programToString(program))
|
||||
commands = programToCommands(program, getNumRegions)
|
||||
print("Commands:")
|
||||
print(commands)
|
||||
program2 = commandsToProgram(commands)
|
||||
print("Program from commands:")
|
||||
print(programToString(program2))
|
||||
assert program == program2
|
||||
print("Generalized program:")
|
||||
print(programToString(generalizeProgram(program, getNumRegions)))
|
||||
print("Specialized program:")
|
||||
print(programToString(specializeProgram(program, getNumRegions)))
|
||||
|
||||
if options.font:
|
||||
from fontTools.ttLib import TTFont
|
||||
|
||||
font = TTFont(options.font)
|
||||
cff2 = font["CFF2"].cff.topDictIndex[0]
|
||||
charstrings = cff2.CharStrings
|
||||
for glyphName in charstrings.keys():
|
||||
charstring = charstrings[glyphName]
|
||||
charstring.decompile()
|
||||
getNumRegions = charstring.private.getNumRegions
|
||||
charstring.program = specializeProgram(
|
||||
charstring.program, getNumRegions, maxstack=maxStackLimit
|
||||
)
|
||||
|
||||
if options.output_file is None:
|
||||
from fontTools.misc.cliTools import makeOutputFileName
|
||||
|
||||
outfile = makeOutputFileName(
|
||||
options.font, overWrite=True, suffix=".specialized"
|
||||
)
|
||||
else:
|
||||
outfile = options.output_file
|
||||
if outfile:
|
||||
print("Saving", outfile)
|
||||
font.save(outfile)
|
||||
485
venv/lib/python3.12/site-packages/fontTools/cffLib/transforms.py
Normal file
485
venv/lib/python3.12/site-packages/fontTools/cffLib/transforms.py
Normal file
@ -0,0 +1,485 @@
|
||||
from fontTools.misc.psCharStrings import (
|
||||
SimpleT2Decompiler,
|
||||
T2WidthExtractor,
|
||||
calcSubrBias,
|
||||
)
|
||||
|
||||
|
||||
def _uniq_sort(l):
|
||||
return sorted(set(l))
|
||||
|
||||
|
||||
class StopHintCountEvent(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class _DesubroutinizingT2Decompiler(SimpleT2Decompiler):
|
||||
stop_hintcount_ops = (
|
||||
"op_hintmask",
|
||||
"op_cntrmask",
|
||||
"op_rmoveto",
|
||||
"op_hmoveto",
|
||||
"op_vmoveto",
|
||||
)
|
||||
|
||||
def __init__(self, localSubrs, globalSubrs, private=None):
|
||||
SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private)
|
||||
|
||||
def execute(self, charString):
|
||||
self.need_hintcount = True # until proven otherwise
|
||||
for op_name in self.stop_hintcount_ops:
|
||||
setattr(self, op_name, self.stop_hint_count)
|
||||
|
||||
if hasattr(charString, "_desubroutinized"):
|
||||
# If a charstring has already been desubroutinized, we will still
|
||||
# need to execute it if we need to count hints in order to
|
||||
# compute the byte length for mask arguments, and haven't finished
|
||||
# counting hints pairs.
|
||||
if self.need_hintcount and self.callingStack:
|
||||
try:
|
||||
SimpleT2Decompiler.execute(self, charString)
|
||||
except StopHintCountEvent:
|
||||
del self.callingStack[-1]
|
||||
return
|
||||
|
||||
charString._patches = []
|
||||
SimpleT2Decompiler.execute(self, charString)
|
||||
desubroutinized = charString.program[:]
|
||||
for idx, expansion in reversed(charString._patches):
|
||||
assert idx >= 2
|
||||
assert desubroutinized[idx - 1] in [
|
||||
"callsubr",
|
||||
"callgsubr",
|
||||
], desubroutinized[idx - 1]
|
||||
assert type(desubroutinized[idx - 2]) == int
|
||||
if expansion[-1] == "return":
|
||||
expansion = expansion[:-1]
|
||||
desubroutinized[idx - 2 : idx] = expansion
|
||||
if not self.private.in_cff2:
|
||||
if "endchar" in desubroutinized:
|
||||
# Cut off after first endchar
|
||||
desubroutinized = desubroutinized[
|
||||
: desubroutinized.index("endchar") + 1
|
||||
]
|
||||
|
||||
charString._desubroutinized = desubroutinized
|
||||
del charString._patches
|
||||
|
||||
def op_callsubr(self, index):
|
||||
subr = self.localSubrs[self.operandStack[-1] + self.localBias]
|
||||
SimpleT2Decompiler.op_callsubr(self, index)
|
||||
self.processSubr(index, subr)
|
||||
|
||||
def op_callgsubr(self, index):
|
||||
subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
|
||||
SimpleT2Decompiler.op_callgsubr(self, index)
|
||||
self.processSubr(index, subr)
|
||||
|
||||
def stop_hint_count(self, *args):
|
||||
self.need_hintcount = False
|
||||
for op_name in self.stop_hintcount_ops:
|
||||
setattr(self, op_name, None)
|
||||
cs = self.callingStack[-1]
|
||||
if hasattr(cs, "_desubroutinized"):
|
||||
raise StopHintCountEvent()
|
||||
|
||||
def op_hintmask(self, index):
|
||||
SimpleT2Decompiler.op_hintmask(self, index)
|
||||
if self.need_hintcount:
|
||||
self.stop_hint_count()
|
||||
|
||||
def processSubr(self, index, subr):
|
||||
cs = self.callingStack[-1]
|
||||
if not hasattr(cs, "_desubroutinized"):
|
||||
cs._patches.append((index, subr._desubroutinized))
|
||||
|
||||
|
||||
def desubroutinize(cff):
|
||||
for fontName in cff.fontNames:
|
||||
font = cff[fontName]
|
||||
cs = font.CharStrings
|
||||
for c in cs.values():
|
||||
c.decompile()
|
||||
subrs = getattr(c.private, "Subrs", [])
|
||||
decompiler = _DesubroutinizingT2Decompiler(subrs, c.globalSubrs, c.private)
|
||||
decompiler.execute(c)
|
||||
c.program = c._desubroutinized
|
||||
del c._desubroutinized
|
||||
# Delete all the local subrs
|
||||
if hasattr(font, "FDArray"):
|
||||
for fd in font.FDArray:
|
||||
pd = fd.Private
|
||||
if hasattr(pd, "Subrs"):
|
||||
del pd.Subrs
|
||||
if "Subrs" in pd.rawDict:
|
||||
del pd.rawDict["Subrs"]
|
||||
else:
|
||||
pd = font.Private
|
||||
if hasattr(pd, "Subrs"):
|
||||
del pd.Subrs
|
||||
if "Subrs" in pd.rawDict:
|
||||
del pd.rawDict["Subrs"]
|
||||
# as well as the global subrs
|
||||
cff.GlobalSubrs.clear()
|
||||
|
||||
|
||||
class _MarkingT2Decompiler(SimpleT2Decompiler):
|
||||
def __init__(self, localSubrs, globalSubrs, private):
|
||||
SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs, private)
|
||||
for subrs in [localSubrs, globalSubrs]:
|
||||
if subrs and not hasattr(subrs, "_used"):
|
||||
subrs._used = set()
|
||||
|
||||
def op_callsubr(self, index):
|
||||
self.localSubrs._used.add(self.operandStack[-1] + self.localBias)
|
||||
SimpleT2Decompiler.op_callsubr(self, index)
|
||||
|
||||
def op_callgsubr(self, index):
|
||||
self.globalSubrs._used.add(self.operandStack[-1] + self.globalBias)
|
||||
SimpleT2Decompiler.op_callgsubr(self, index)
|
||||
|
||||
|
||||
class _DehintingT2Decompiler(T2WidthExtractor):
|
||||
class Hints(object):
|
||||
def __init__(self):
|
||||
# Whether calling this charstring produces any hint stems
|
||||
# Note that if a charstring starts with hintmask, it will
|
||||
# have has_hint set to True, because it *might* produce an
|
||||
# implicit vstem if called under certain conditions.
|
||||
self.has_hint = False
|
||||
# Index to start at to drop all hints
|
||||
self.last_hint = 0
|
||||
# Index up to which we know more hints are possible.
|
||||
# Only relevant if status is 0 or 1.
|
||||
self.last_checked = 0
|
||||
# The status means:
|
||||
# 0: after dropping hints, this charstring is empty
|
||||
# 1: after dropping hints, there may be more hints
|
||||
# continuing after this, or there might be
|
||||
# other things. Not clear yet.
|
||||
# 2: no more hints possible after this charstring
|
||||
self.status = 0
|
||||
# Has hintmask instructions; not recursive
|
||||
self.has_hintmask = False
|
||||
# List of indices of calls to empty subroutines to remove.
|
||||
self.deletions = []
|
||||
|
||||
pass
|
||||
|
||||
def __init__(
|
||||
self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None
|
||||
):
|
||||
self._css = css
|
||||
T2WidthExtractor.__init__(
|
||||
self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX
|
||||
)
|
||||
self.private = private
|
||||
|
||||
def execute(self, charString):
|
||||
old_hints = charString._hints if hasattr(charString, "_hints") else None
|
||||
charString._hints = self.Hints()
|
||||
|
||||
T2WidthExtractor.execute(self, charString)
|
||||
|
||||
hints = charString._hints
|
||||
|
||||
if hints.has_hint or hints.has_hintmask:
|
||||
self._css.add(charString)
|
||||
|
||||
if hints.status != 2:
|
||||
# Check from last_check, make sure we didn't have any operators.
|
||||
for i in range(hints.last_checked, len(charString.program) - 1):
|
||||
if isinstance(charString.program[i], str):
|
||||
hints.status = 2
|
||||
break
|
||||
else:
|
||||
hints.status = 1 # There's *something* here
|
||||
hints.last_checked = len(charString.program)
|
||||
|
||||
if old_hints:
|
||||
assert hints.__dict__ == old_hints.__dict__
|
||||
|
||||
def op_callsubr(self, index):
|
||||
subr = self.localSubrs[self.operandStack[-1] + self.localBias]
|
||||
T2WidthExtractor.op_callsubr(self, index)
|
||||
self.processSubr(index, subr)
|
||||
|
||||
def op_callgsubr(self, index):
|
||||
subr = self.globalSubrs[self.operandStack[-1] + self.globalBias]
|
||||
T2WidthExtractor.op_callgsubr(self, index)
|
||||
self.processSubr(index, subr)
|
||||
|
||||
def op_hstem(self, index):
|
||||
T2WidthExtractor.op_hstem(self, index)
|
||||
self.processHint(index)
|
||||
|
||||
def op_vstem(self, index):
|
||||
T2WidthExtractor.op_vstem(self, index)
|
||||
self.processHint(index)
|
||||
|
||||
def op_hstemhm(self, index):
|
||||
T2WidthExtractor.op_hstemhm(self, index)
|
||||
self.processHint(index)
|
||||
|
||||
def op_vstemhm(self, index):
|
||||
T2WidthExtractor.op_vstemhm(self, index)
|
||||
self.processHint(index)
|
||||
|
||||
def op_hintmask(self, index):
|
||||
rv = T2WidthExtractor.op_hintmask(self, index)
|
||||
self.processHintmask(index)
|
||||
return rv
|
||||
|
||||
def op_cntrmask(self, index):
|
||||
rv = T2WidthExtractor.op_cntrmask(self, index)
|
||||
self.processHintmask(index)
|
||||
return rv
|
||||
|
||||
def processHintmask(self, index):
|
||||
cs = self.callingStack[-1]
|
||||
hints = cs._hints
|
||||
hints.has_hintmask = True
|
||||
if hints.status != 2:
|
||||
# Check from last_check, see if we may be an implicit vstem
|
||||
for i in range(hints.last_checked, index - 1):
|
||||
if isinstance(cs.program[i], str):
|
||||
hints.status = 2
|
||||
break
|
||||
else:
|
||||
# We are an implicit vstem
|
||||
hints.has_hint = True
|
||||
hints.last_hint = index + 1
|
||||
hints.status = 0
|
||||
hints.last_checked = index + 1
|
||||
|
||||
def processHint(self, index):
|
||||
cs = self.callingStack[-1]
|
||||
hints = cs._hints
|
||||
hints.has_hint = True
|
||||
hints.last_hint = index
|
||||
hints.last_checked = index
|
||||
|
||||
def processSubr(self, index, subr):
|
||||
cs = self.callingStack[-1]
|
||||
hints = cs._hints
|
||||
subr_hints = subr._hints
|
||||
|
||||
# Check from last_check, make sure we didn't have
|
||||
# any operators.
|
||||
if hints.status != 2:
|
||||
for i in range(hints.last_checked, index - 1):
|
||||
if isinstance(cs.program[i], str):
|
||||
hints.status = 2
|
||||
break
|
||||
hints.last_checked = index
|
||||
|
||||
if hints.status != 2:
|
||||
if subr_hints.has_hint:
|
||||
hints.has_hint = True
|
||||
|
||||
# Decide where to chop off from
|
||||
if subr_hints.status == 0:
|
||||
hints.last_hint = index
|
||||
else:
|
||||
hints.last_hint = index - 2 # Leave the subr call in
|
||||
|
||||
elif subr_hints.status == 0:
|
||||
hints.deletions.append(index)
|
||||
|
||||
hints.status = max(hints.status, subr_hints.status)
|
||||
|
||||
|
||||
def _cs_subset_subroutines(charstring, subrs, gsubrs):
|
||||
p = charstring.program
|
||||
for i in range(1, len(p)):
|
||||
if p[i] == "callsubr":
|
||||
assert isinstance(p[i - 1], int)
|
||||
p[i - 1] = subrs._used.index(p[i - 1] + subrs._old_bias) - subrs._new_bias
|
||||
elif p[i] == "callgsubr":
|
||||
assert isinstance(p[i - 1], int)
|
||||
p[i - 1] = (
|
||||
gsubrs._used.index(p[i - 1] + gsubrs._old_bias) - gsubrs._new_bias
|
||||
)
|
||||
|
||||
|
||||
def _cs_drop_hints(charstring):
|
||||
hints = charstring._hints
|
||||
|
||||
if hints.deletions:
|
||||
p = charstring.program
|
||||
for idx in reversed(hints.deletions):
|
||||
del p[idx - 2 : idx]
|
||||
|
||||
if hints.has_hint:
|
||||
assert not hints.deletions or hints.last_hint <= hints.deletions[0]
|
||||
charstring.program = charstring.program[hints.last_hint :]
|
||||
if not charstring.program:
|
||||
# TODO CFF2 no need for endchar.
|
||||
charstring.program.append("endchar")
|
||||
if hasattr(charstring, "width"):
|
||||
# Insert width back if needed
|
||||
if charstring.width != charstring.private.defaultWidthX:
|
||||
# For CFF2 charstrings, this should never happen
|
||||
assert (
|
||||
charstring.private.defaultWidthX is not None
|
||||
), "CFF2 CharStrings must not have an initial width value"
|
||||
charstring.program.insert(
|
||||
0, charstring.width - charstring.private.nominalWidthX
|
||||
)
|
||||
|
||||
if hints.has_hintmask:
|
||||
i = 0
|
||||
p = charstring.program
|
||||
while i < len(p):
|
||||
if p[i] in ["hintmask", "cntrmask"]:
|
||||
assert i + 1 <= len(p)
|
||||
del p[i : i + 2]
|
||||
continue
|
||||
i += 1
|
||||
|
||||
assert len(charstring.program)
|
||||
|
||||
del charstring._hints
|
||||
|
||||
|
||||
def remove_hints(cff, *, removeUnusedSubrs: bool = True):
|
||||
for fontname in cff.keys():
|
||||
font = cff[fontname]
|
||||
cs = font.CharStrings
|
||||
# This can be tricky, but doesn't have to. What we do is:
|
||||
#
|
||||
# - Run all used glyph charstrings and recurse into subroutines,
|
||||
# - For each charstring (including subroutines), if it has any
|
||||
# of the hint stem operators, we mark it as such.
|
||||
# Upon returning, for each charstring we note all the
|
||||
# subroutine calls it makes that (recursively) contain a stem,
|
||||
# - Dropping hinting then consists of the following two ops:
|
||||
# * Drop the piece of the program in each charstring before the
|
||||
# last call to a stem op or a stem-calling subroutine,
|
||||
# * Drop all hintmask operations.
|
||||
# - It's trickier... A hintmask right after hints and a few numbers
|
||||
# will act as an implicit vstemhm. As such, we track whether
|
||||
# we have seen any non-hint operators so far and do the right
|
||||
# thing, recursively... Good luck understanding that :(
|
||||
css = set()
|
||||
for c in cs.values():
|
||||
c.decompile()
|
||||
subrs = getattr(c.private, "Subrs", [])
|
||||
decompiler = _DehintingT2Decompiler(
|
||||
css,
|
||||
subrs,
|
||||
c.globalSubrs,
|
||||
c.private.nominalWidthX,
|
||||
c.private.defaultWidthX,
|
||||
c.private,
|
||||
)
|
||||
decompiler.execute(c)
|
||||
c.width = decompiler.width
|
||||
for charstring in css:
|
||||
_cs_drop_hints(charstring)
|
||||
del css
|
||||
|
||||
# Drop font-wide hinting values
|
||||
all_privs = []
|
||||
if hasattr(font, "FDArray"):
|
||||
all_privs.extend(fd.Private for fd in font.FDArray)
|
||||
else:
|
||||
all_privs.append(font.Private)
|
||||
for priv in all_privs:
|
||||
for k in [
|
||||
"BlueValues",
|
||||
"OtherBlues",
|
||||
"FamilyBlues",
|
||||
"FamilyOtherBlues",
|
||||
"BlueScale",
|
||||
"BlueShift",
|
||||
"BlueFuzz",
|
||||
"StemSnapH",
|
||||
"StemSnapV",
|
||||
"StdHW",
|
||||
"StdVW",
|
||||
"ForceBold",
|
||||
"LanguageGroup",
|
||||
"ExpansionFactor",
|
||||
]:
|
||||
if hasattr(priv, k):
|
||||
setattr(priv, k, None)
|
||||
if removeUnusedSubrs:
|
||||
remove_unused_subroutines(cff)
|
||||
|
||||
|
||||
def _pd_delete_empty_subrs(private_dict):
|
||||
if hasattr(private_dict, "Subrs") and not private_dict.Subrs:
|
||||
if "Subrs" in private_dict.rawDict:
|
||||
del private_dict.rawDict["Subrs"]
|
||||
del private_dict.Subrs
|
||||
|
||||
|
||||
def remove_unused_subroutines(cff):
|
||||
for fontname in cff.keys():
|
||||
font = cff[fontname]
|
||||
cs = font.CharStrings
|
||||
# Renumber subroutines to remove unused ones
|
||||
|
||||
# Mark all used subroutines
|
||||
for c in cs.values():
|
||||
subrs = getattr(c.private, "Subrs", [])
|
||||
decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs, c.private)
|
||||
decompiler.execute(c)
|
||||
|
||||
all_subrs = [font.GlobalSubrs]
|
||||
if hasattr(font, "FDArray"):
|
||||
all_subrs.extend(
|
||||
fd.Private.Subrs
|
||||
for fd in font.FDArray
|
||||
if hasattr(fd.Private, "Subrs") and fd.Private.Subrs
|
||||
)
|
||||
elif hasattr(font.Private, "Subrs") and font.Private.Subrs:
|
||||
all_subrs.append(font.Private.Subrs)
|
||||
|
||||
subrs = set(subrs) # Remove duplicates
|
||||
|
||||
# Prepare
|
||||
for subrs in all_subrs:
|
||||
if not hasattr(subrs, "_used"):
|
||||
subrs._used = set()
|
||||
subrs._used = _uniq_sort(subrs._used)
|
||||
subrs._old_bias = calcSubrBias(subrs)
|
||||
subrs._new_bias = calcSubrBias(subrs._used)
|
||||
|
||||
# Renumber glyph charstrings
|
||||
for c in cs.values():
|
||||
subrs = getattr(c.private, "Subrs", None)
|
||||
_cs_subset_subroutines(c, subrs, font.GlobalSubrs)
|
||||
|
||||
# Renumber subroutines themselves
|
||||
for subrs in all_subrs:
|
||||
if subrs == font.GlobalSubrs:
|
||||
if not hasattr(font, "FDArray") and hasattr(font.Private, "Subrs"):
|
||||
local_subrs = font.Private.Subrs
|
||||
elif hasattr(font, "FDArray") and len(font.FDArray) == 1:
|
||||
local_subrs = font.FDArray[0].Private.Subrs
|
||||
else:
|
||||
local_subrs = None
|
||||
else:
|
||||
local_subrs = subrs
|
||||
|
||||
subrs.items = [subrs.items[i] for i in subrs._used]
|
||||
if hasattr(subrs, "file"):
|
||||
del subrs.file
|
||||
if hasattr(subrs, "offsets"):
|
||||
del subrs.offsets
|
||||
|
||||
for subr in subrs.items:
|
||||
_cs_subset_subroutines(subr, local_subrs, font.GlobalSubrs)
|
||||
|
||||
# Delete local SubrsIndex if empty
|
||||
if hasattr(font, "FDArray"):
|
||||
for fd in font.FDArray:
|
||||
_pd_delete_empty_subrs(fd.Private)
|
||||
else:
|
||||
_pd_delete_empty_subrs(font.Private)
|
||||
|
||||
# Cleanup
|
||||
for subrs in all_subrs:
|
||||
del subrs._used, subrs._old_bias, subrs._new_bias
|
||||
210
venv/lib/python3.12/site-packages/fontTools/cffLib/width.py
Normal file
210
venv/lib/python3.12/site-packages/fontTools/cffLib/width.py
Normal file
@ -0,0 +1,210 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""T2CharString glyph width optimizer.
|
||||
|
||||
CFF glyphs whose width equals the CFF Private dictionary's ``defaultWidthX``
|
||||
value do not need to specify their width in their charstring, saving bytes.
|
||||
This module determines the optimum ``defaultWidthX`` and ``nominalWidthX``
|
||||
values for a font, when provided with a list of glyph widths."""
|
||||
|
||||
from fontTools.ttLib import TTFont
|
||||
from collections import defaultdict
|
||||
from operator import add
|
||||
from functools import reduce
|
||||
|
||||
|
||||
__all__ = ["optimizeWidths", "main"]
|
||||
|
||||
|
||||
class missingdict(dict):
|
||||
def __init__(self, missing_func):
|
||||
self.missing_func = missing_func
|
||||
|
||||
def __missing__(self, v):
|
||||
return self.missing_func(v)
|
||||
|
||||
|
||||
def cumSum(f, op=add, start=0, decreasing=False):
|
||||
keys = sorted(f.keys())
|
||||
minx, maxx = keys[0], keys[-1]
|
||||
|
||||
total = reduce(op, f.values(), start)
|
||||
|
||||
if decreasing:
|
||||
missing = lambda x: start if x > maxx else total
|
||||
domain = range(maxx, minx - 1, -1)
|
||||
else:
|
||||
missing = lambda x: start if x < minx else total
|
||||
domain = range(minx, maxx + 1)
|
||||
|
||||
out = missingdict(missing)
|
||||
|
||||
v = start
|
||||
for x in domain:
|
||||
v = op(v, f[x])
|
||||
out[x] = v
|
||||
|
||||
return out
|
||||
|
||||
|
||||
def byteCost(widths, default, nominal):
|
||||
if not hasattr(widths, "items"):
|
||||
d = defaultdict(int)
|
||||
for w in widths:
|
||||
d[w] += 1
|
||||
widths = d
|
||||
|
||||
cost = 0
|
||||
for w, freq in widths.items():
|
||||
if w == default:
|
||||
continue
|
||||
diff = abs(w - nominal)
|
||||
if diff <= 107:
|
||||
cost += freq
|
||||
elif diff <= 1131:
|
||||
cost += freq * 2
|
||||
else:
|
||||
cost += freq * 5
|
||||
return cost
|
||||
|
||||
|
||||
def optimizeWidthsBruteforce(widths):
|
||||
"""Bruteforce version. Veeeeeeeeeeeeeeeeery slow. Only works for smallests of fonts."""
|
||||
|
||||
d = defaultdict(int)
|
||||
for w in widths:
|
||||
d[w] += 1
|
||||
|
||||
# Maximum number of bytes using default can possibly save
|
||||
maxDefaultAdvantage = 5 * max(d.values())
|
||||
|
||||
minw, maxw = min(widths), max(widths)
|
||||
domain = list(range(minw, maxw + 1))
|
||||
|
||||
bestCostWithoutDefault = min(byteCost(widths, None, nominal) for nominal in domain)
|
||||
|
||||
bestCost = len(widths) * 5 + 1
|
||||
for nominal in domain:
|
||||
if byteCost(widths, None, nominal) > bestCost + maxDefaultAdvantage:
|
||||
continue
|
||||
for default in domain:
|
||||
cost = byteCost(widths, default, nominal)
|
||||
if cost < bestCost:
|
||||
bestCost = cost
|
||||
bestDefault = default
|
||||
bestNominal = nominal
|
||||
|
||||
return bestDefault, bestNominal
|
||||
|
||||
|
||||
def optimizeWidths(widths):
|
||||
"""Given a list of glyph widths, or dictionary mapping glyph width to number of
|
||||
glyphs having that, returns a tuple of best CFF default and nominal glyph widths.
|
||||
|
||||
This algorithm is linear in UPEM+numGlyphs."""
|
||||
|
||||
if not hasattr(widths, "items"):
|
||||
d = defaultdict(int)
|
||||
for w in widths:
|
||||
d[w] += 1
|
||||
widths = d
|
||||
|
||||
keys = sorted(widths.keys())
|
||||
minw, maxw = keys[0], keys[-1]
|
||||
domain = list(range(minw, maxw + 1))
|
||||
|
||||
# Cumulative sum/max forward/backward.
|
||||
cumFrqU = cumSum(widths, op=add)
|
||||
cumMaxU = cumSum(widths, op=max)
|
||||
cumFrqD = cumSum(widths, op=add, decreasing=True)
|
||||
cumMaxD = cumSum(widths, op=max, decreasing=True)
|
||||
|
||||
# Cost per nominal choice, without default consideration.
|
||||
nomnCostU = missingdict(
|
||||
lambda x: cumFrqU[x] + cumFrqU[x - 108] + cumFrqU[x - 1132] * 3
|
||||
)
|
||||
nomnCostD = missingdict(
|
||||
lambda x: cumFrqD[x] + cumFrqD[x + 108] + cumFrqD[x + 1132] * 3
|
||||
)
|
||||
nomnCost = missingdict(lambda x: nomnCostU[x] + nomnCostD[x] - widths[x])
|
||||
|
||||
# Cost-saving per nominal choice, by best default choice.
|
||||
dfltCostU = missingdict(
|
||||
lambda x: max(cumMaxU[x], cumMaxU[x - 108] * 2, cumMaxU[x - 1132] * 5)
|
||||
)
|
||||
dfltCostD = missingdict(
|
||||
lambda x: max(cumMaxD[x], cumMaxD[x + 108] * 2, cumMaxD[x + 1132] * 5)
|
||||
)
|
||||
dfltCost = missingdict(lambda x: max(dfltCostU[x], dfltCostD[x]))
|
||||
|
||||
# Combined cost per nominal choice.
|
||||
bestCost = missingdict(lambda x: nomnCost[x] - dfltCost[x])
|
||||
|
||||
# Best nominal.
|
||||
nominal = min(domain, key=lambda x: bestCost[x])
|
||||
|
||||
# Work back the best default.
|
||||
bestC = bestCost[nominal]
|
||||
dfltC = nomnCost[nominal] - bestCost[nominal]
|
||||
ends = []
|
||||
if dfltC == dfltCostU[nominal]:
|
||||
starts = [nominal, nominal - 108, nominal - 1132]
|
||||
for start in starts:
|
||||
while cumMaxU[start] and cumMaxU[start] == cumMaxU[start - 1]:
|
||||
start -= 1
|
||||
ends.append(start)
|
||||
else:
|
||||
starts = [nominal, nominal + 108, nominal + 1132]
|
||||
for start in starts:
|
||||
while cumMaxD[start] and cumMaxD[start] == cumMaxD[start + 1]:
|
||||
start += 1
|
||||
ends.append(start)
|
||||
default = min(ends, key=lambda default: byteCost(widths, default, nominal))
|
||||
|
||||
return default, nominal
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Calculate optimum defaultWidthX/nominalWidthX values"""
|
||||
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
"fonttools cffLib.width",
|
||||
description=main.__doc__,
|
||||
)
|
||||
parser.add_argument(
|
||||
"inputs", metavar="FILE", type=str, nargs="+", help="Input TTF files"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-b",
|
||||
"--brute-force",
|
||||
dest="brute",
|
||||
action="store_true",
|
||||
help="Use brute-force approach (VERY slow)",
|
||||
)
|
||||
|
||||
args = parser.parse_args(args)
|
||||
|
||||
for fontfile in args.inputs:
|
||||
font = TTFont(fontfile)
|
||||
hmtx = font["hmtx"]
|
||||
widths = [m[0] for m in hmtx.metrics.values()]
|
||||
if args.brute:
|
||||
default, nominal = optimizeWidthsBruteforce(widths)
|
||||
else:
|
||||
default, nominal = optimizeWidths(widths)
|
||||
print(
|
||||
"glyphs=%d default=%d nominal=%d byteCost=%d"
|
||||
% (len(widths), default, nominal, byteCost(widths, default, nominal))
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
|
||||
if len(sys.argv) == 1:
|
||||
import doctest
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
main()
|
||||
664
venv/lib/python3.12/site-packages/fontTools/colorLib/builder.py
Normal file
664
venv/lib/python3.12/site-packages/fontTools/colorLib/builder.py
Normal file
@ -0,0 +1,664 @@
|
||||
"""
|
||||
colorLib.builder: Build COLR/CPAL tables from scratch
|
||||
|
||||
"""
|
||||
|
||||
import collections
|
||||
import copy
|
||||
import enum
|
||||
from functools import partial
|
||||
from math import ceil, log
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
Generator,
|
||||
Iterable,
|
||||
List,
|
||||
Mapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
Type,
|
||||
TypeVar,
|
||||
Union,
|
||||
)
|
||||
from fontTools.misc.arrayTools import intRect
|
||||
from fontTools.misc.fixedTools import fixedToFloat
|
||||
from fontTools.misc.treeTools import build_n_ary_tree
|
||||
from fontTools.ttLib.tables import C_O_L_R_
|
||||
from fontTools.ttLib.tables import C_P_A_L_
|
||||
from fontTools.ttLib.tables import _n_a_m_e
|
||||
from fontTools.ttLib.tables import otTables as ot
|
||||
from fontTools.ttLib.tables.otTables import ExtendMode, CompositeMode
|
||||
from .errors import ColorLibError
|
||||
from .geometry import round_start_circle_stable_containment
|
||||
from .table_builder import BuildCallback, TableBuilder
|
||||
|
||||
|
||||
# TODO move type aliases to colorLib.types?
|
||||
T = TypeVar("T")
|
||||
_Kwargs = Mapping[str, Any]
|
||||
_PaintInput = Union[int, _Kwargs, ot.Paint, Tuple[str, "_PaintInput"]]
|
||||
_PaintInputList = Sequence[_PaintInput]
|
||||
_ColorGlyphsDict = Dict[str, Union[_PaintInputList, _PaintInput]]
|
||||
_ColorGlyphsV0Dict = Dict[str, Sequence[Tuple[str, int]]]
|
||||
_ClipBoxInput = Union[
|
||||
Tuple[int, int, int, int, int], # format 1, variable
|
||||
Tuple[int, int, int, int], # format 0, non-variable
|
||||
ot.ClipBox,
|
||||
]
|
||||
|
||||
|
||||
MAX_PAINT_COLR_LAYER_COUNT = 255
|
||||
_DEFAULT_ALPHA = 1.0
|
||||
_MAX_REUSE_LEN = 32
|
||||
|
||||
|
||||
def _beforeBuildPaintRadialGradient(paint, source):
|
||||
x0 = source["x0"]
|
||||
y0 = source["y0"]
|
||||
r0 = source["r0"]
|
||||
x1 = source["x1"]
|
||||
y1 = source["y1"]
|
||||
r1 = source["r1"]
|
||||
|
||||
# TODO apparently no builder_test confirms this works (?)
|
||||
|
||||
# avoid abrupt change after rounding when c0 is near c1's perimeter
|
||||
c = round_start_circle_stable_containment((x0, y0), r0, (x1, y1), r1)
|
||||
x0, y0 = c.centre
|
||||
r0 = c.radius
|
||||
|
||||
# update source to ensure paint is built with corrected values
|
||||
source["x0"] = x0
|
||||
source["y0"] = y0
|
||||
source["r0"] = r0
|
||||
source["x1"] = x1
|
||||
source["y1"] = y1
|
||||
source["r1"] = r1
|
||||
|
||||
return paint, source
|
||||
|
||||
|
||||
def _defaultColorStop():
|
||||
colorStop = ot.ColorStop()
|
||||
colorStop.Alpha = _DEFAULT_ALPHA
|
||||
return colorStop
|
||||
|
||||
|
||||
def _defaultVarColorStop():
|
||||
colorStop = ot.VarColorStop()
|
||||
colorStop.Alpha = _DEFAULT_ALPHA
|
||||
return colorStop
|
||||
|
||||
|
||||
def _defaultColorLine():
|
||||
colorLine = ot.ColorLine()
|
||||
colorLine.Extend = ExtendMode.PAD
|
||||
return colorLine
|
||||
|
||||
|
||||
def _defaultVarColorLine():
|
||||
colorLine = ot.VarColorLine()
|
||||
colorLine.Extend = ExtendMode.PAD
|
||||
return colorLine
|
||||
|
||||
|
||||
def _defaultPaintSolid():
|
||||
paint = ot.Paint()
|
||||
paint.Alpha = _DEFAULT_ALPHA
|
||||
return paint
|
||||
|
||||
|
||||
def _buildPaintCallbacks():
|
||||
return {
|
||||
(
|
||||
BuildCallback.BEFORE_BUILD,
|
||||
ot.Paint,
|
||||
ot.PaintFormat.PaintRadialGradient,
|
||||
): _beforeBuildPaintRadialGradient,
|
||||
(
|
||||
BuildCallback.BEFORE_BUILD,
|
||||
ot.Paint,
|
||||
ot.PaintFormat.PaintVarRadialGradient,
|
||||
): _beforeBuildPaintRadialGradient,
|
||||
(BuildCallback.CREATE_DEFAULT, ot.ColorStop): _defaultColorStop,
|
||||
(BuildCallback.CREATE_DEFAULT, ot.VarColorStop): _defaultVarColorStop,
|
||||
(BuildCallback.CREATE_DEFAULT, ot.ColorLine): _defaultColorLine,
|
||||
(BuildCallback.CREATE_DEFAULT, ot.VarColorLine): _defaultVarColorLine,
|
||||
(
|
||||
BuildCallback.CREATE_DEFAULT,
|
||||
ot.Paint,
|
||||
ot.PaintFormat.PaintSolid,
|
||||
): _defaultPaintSolid,
|
||||
(
|
||||
BuildCallback.CREATE_DEFAULT,
|
||||
ot.Paint,
|
||||
ot.PaintFormat.PaintVarSolid,
|
||||
): _defaultPaintSolid,
|
||||
}
|
||||
|
||||
|
||||
def populateCOLRv0(
|
||||
table: ot.COLR,
|
||||
colorGlyphsV0: _ColorGlyphsV0Dict,
|
||||
glyphMap: Optional[Mapping[str, int]] = None,
|
||||
):
|
||||
"""Build v0 color layers and add to existing COLR table.
|
||||
|
||||
Args:
|
||||
table: a raw ``otTables.COLR()`` object (not ttLib's ``table_C_O_L_R_``).
|
||||
colorGlyphsV0: map of base glyph names to lists of (layer glyph names,
|
||||
color palette index) tuples. Can be empty.
|
||||
glyphMap: a map from glyph names to glyph indices, as returned from
|
||||
``TTFont.getReverseGlyphMap()``, to optionally sort base records by GID.
|
||||
"""
|
||||
if glyphMap is not None:
|
||||
colorGlyphItems = sorted(
|
||||
colorGlyphsV0.items(), key=lambda item: glyphMap[item[0]]
|
||||
)
|
||||
else:
|
||||
colorGlyphItems = colorGlyphsV0.items()
|
||||
baseGlyphRecords = []
|
||||
layerRecords = []
|
||||
for baseGlyph, layers in colorGlyphItems:
|
||||
baseRec = ot.BaseGlyphRecord()
|
||||
baseRec.BaseGlyph = baseGlyph
|
||||
baseRec.FirstLayerIndex = len(layerRecords)
|
||||
baseRec.NumLayers = len(layers)
|
||||
baseGlyphRecords.append(baseRec)
|
||||
|
||||
for layerGlyph, paletteIndex in layers:
|
||||
layerRec = ot.LayerRecord()
|
||||
layerRec.LayerGlyph = layerGlyph
|
||||
layerRec.PaletteIndex = paletteIndex
|
||||
layerRecords.append(layerRec)
|
||||
|
||||
table.BaseGlyphRecordArray = table.LayerRecordArray = None
|
||||
if baseGlyphRecords:
|
||||
table.BaseGlyphRecordArray = ot.BaseGlyphRecordArray()
|
||||
table.BaseGlyphRecordArray.BaseGlyphRecord = baseGlyphRecords
|
||||
if layerRecords:
|
||||
table.LayerRecordArray = ot.LayerRecordArray()
|
||||
table.LayerRecordArray.LayerRecord = layerRecords
|
||||
table.BaseGlyphRecordCount = len(baseGlyphRecords)
|
||||
table.LayerRecordCount = len(layerRecords)
|
||||
|
||||
|
||||
def buildCOLR(
|
||||
colorGlyphs: _ColorGlyphsDict,
|
||||
version: Optional[int] = None,
|
||||
*,
|
||||
glyphMap: Optional[Mapping[str, int]] = None,
|
||||
varStore: Optional[ot.VarStore] = None,
|
||||
varIndexMap: Optional[ot.DeltaSetIndexMap] = None,
|
||||
clipBoxes: Optional[Dict[str, _ClipBoxInput]] = None,
|
||||
allowLayerReuse: bool = True,
|
||||
) -> C_O_L_R_.table_C_O_L_R_:
|
||||
"""Build COLR table from color layers mapping.
|
||||
|
||||
Args:
|
||||
|
||||
colorGlyphs: map of base glyph name to, either list of (layer glyph name,
|
||||
color palette index) tuples for COLRv0; or a single ``Paint`` (dict) or
|
||||
list of ``Paint`` for COLRv1.
|
||||
version: the version of COLR table. If None, the version is determined
|
||||
by the presence of COLRv1 paints or variation data (varStore), which
|
||||
require version 1; otherwise, if all base glyphs use only simple color
|
||||
layers, version 0 is used.
|
||||
glyphMap: a map from glyph names to glyph indices, as returned from
|
||||
TTFont.getReverseGlyphMap(), to optionally sort base records by GID.
|
||||
varStore: Optional ItemVarationStore for deltas associated with v1 layer.
|
||||
varIndexMap: Optional DeltaSetIndexMap for deltas associated with v1 layer.
|
||||
clipBoxes: Optional map of base glyph name to clip box 4- or 5-tuples:
|
||||
(xMin, yMin, xMax, yMax) or (xMin, yMin, xMax, yMax, varIndexBase).
|
||||
|
||||
Returns:
|
||||
A new COLR table.
|
||||
"""
|
||||
self = C_O_L_R_.table_C_O_L_R_()
|
||||
|
||||
if varStore is not None and version == 0:
|
||||
raise ValueError("Can't add VarStore to COLRv0")
|
||||
|
||||
if version in (None, 0) and not varStore:
|
||||
# split color glyphs into v0 and v1 and encode separately
|
||||
colorGlyphsV0, colorGlyphsV1 = _split_color_glyphs_by_version(colorGlyphs)
|
||||
if version == 0 and colorGlyphsV1:
|
||||
raise ValueError("Can't encode COLRv1 glyphs in COLRv0")
|
||||
else:
|
||||
# unless explicitly requested for v1 or have variations, in which case
|
||||
# we encode all color glyph as v1
|
||||
colorGlyphsV0, colorGlyphsV1 = {}, colorGlyphs
|
||||
|
||||
colr = ot.COLR()
|
||||
|
||||
populateCOLRv0(colr, colorGlyphsV0, glyphMap)
|
||||
|
||||
colr.LayerList, colr.BaseGlyphList = buildColrV1(
|
||||
colorGlyphsV1,
|
||||
glyphMap,
|
||||
allowLayerReuse=allowLayerReuse,
|
||||
)
|
||||
|
||||
if version is None:
|
||||
version = 1 if (varStore or colorGlyphsV1) else 0
|
||||
elif version not in (0, 1):
|
||||
raise NotImplementedError(version)
|
||||
self.version = colr.Version = version
|
||||
|
||||
if version == 0:
|
||||
self.ColorLayers = self._decompileColorLayersV0(colr)
|
||||
else:
|
||||
colr.ClipList = buildClipList(clipBoxes) if clipBoxes else None
|
||||
colr.VarIndexMap = varIndexMap
|
||||
colr.VarStore = varStore
|
||||
self.table = colr
|
||||
|
||||
return self
|
||||
|
||||
|
||||
def buildClipList(clipBoxes: Dict[str, _ClipBoxInput]) -> ot.ClipList:
|
||||
clipList = ot.ClipList()
|
||||
clipList.Format = 1
|
||||
clipList.clips = {name: buildClipBox(box) for name, box in clipBoxes.items()}
|
||||
return clipList
|
||||
|
||||
|
||||
def buildClipBox(clipBox: _ClipBoxInput) -> ot.ClipBox:
|
||||
if isinstance(clipBox, ot.ClipBox):
|
||||
return clipBox
|
||||
n = len(clipBox)
|
||||
clip = ot.ClipBox()
|
||||
if n not in (4, 5):
|
||||
raise ValueError(f"Invalid ClipBox: expected 4 or 5 values, found {n}")
|
||||
clip.xMin, clip.yMin, clip.xMax, clip.yMax = intRect(clipBox[:4])
|
||||
clip.Format = int(n == 5) + 1
|
||||
if n == 5:
|
||||
clip.VarIndexBase = int(clipBox[4])
|
||||
return clip
|
||||
|
||||
|
||||
class ColorPaletteType(enum.IntFlag):
|
||||
USABLE_WITH_LIGHT_BACKGROUND = 0x0001
|
||||
USABLE_WITH_DARK_BACKGROUND = 0x0002
|
||||
|
||||
@classmethod
|
||||
def _missing_(cls, value):
|
||||
# enforce reserved bits
|
||||
if isinstance(value, int) and (value < 0 or value & 0xFFFC != 0):
|
||||
raise ValueError(f"{value} is not a valid {cls.__name__}")
|
||||
return super()._missing_(value)
|
||||
|
||||
|
||||
# None, 'abc' or {'en': 'abc', 'de': 'xyz'}
|
||||
_OptionalLocalizedString = Union[None, str, Dict[str, str]]
|
||||
|
||||
|
||||
def buildPaletteLabels(
|
||||
labels: Iterable[_OptionalLocalizedString], nameTable: _n_a_m_e.table__n_a_m_e
|
||||
) -> List[Optional[int]]:
|
||||
return [
|
||||
(
|
||||
nameTable.addMultilingualName(l, mac=False)
|
||||
if isinstance(l, dict)
|
||||
else (
|
||||
C_P_A_L_.table_C_P_A_L_.NO_NAME_ID
|
||||
if l is None
|
||||
else nameTable.addMultilingualName({"en": l}, mac=False)
|
||||
)
|
||||
)
|
||||
for l in labels
|
||||
]
|
||||
|
||||
|
||||
def buildCPAL(
|
||||
palettes: Sequence[Sequence[Tuple[float, float, float, float]]],
|
||||
paletteTypes: Optional[Sequence[ColorPaletteType]] = None,
|
||||
paletteLabels: Optional[Sequence[_OptionalLocalizedString]] = None,
|
||||
paletteEntryLabels: Optional[Sequence[_OptionalLocalizedString]] = None,
|
||||
nameTable: Optional[_n_a_m_e.table__n_a_m_e] = None,
|
||||
) -> C_P_A_L_.table_C_P_A_L_:
|
||||
"""Build CPAL table from list of color palettes.
|
||||
|
||||
Args:
|
||||
palettes: list of lists of colors encoded as tuples of (R, G, B, A) floats
|
||||
in the range [0..1].
|
||||
paletteTypes: optional list of ColorPaletteType, one for each palette.
|
||||
paletteLabels: optional list of palette labels. Each lable can be either:
|
||||
None (no label), a string (for for default English labels), or a
|
||||
localized string (as a dict keyed with BCP47 language codes).
|
||||
paletteEntryLabels: optional list of palette entry labels, one for each
|
||||
palette entry (see paletteLabels).
|
||||
nameTable: optional name table where to store palette and palette entry
|
||||
labels. Required if either paletteLabels or paletteEntryLabels is set.
|
||||
|
||||
Return:
|
||||
A new CPAL v0 or v1 table, if custom palette types or labels are specified.
|
||||
"""
|
||||
if len({len(p) for p in palettes}) != 1:
|
||||
raise ColorLibError("color palettes have different lengths")
|
||||
|
||||
if (paletteLabels or paletteEntryLabels) and not nameTable:
|
||||
raise TypeError(
|
||||
"nameTable is required if palette or palette entries have labels"
|
||||
)
|
||||
|
||||
cpal = C_P_A_L_.table_C_P_A_L_()
|
||||
cpal.numPaletteEntries = len(palettes[0])
|
||||
|
||||
cpal.palettes = []
|
||||
for i, palette in enumerate(palettes):
|
||||
colors = []
|
||||
for j, color in enumerate(palette):
|
||||
if not isinstance(color, tuple) or len(color) != 4:
|
||||
raise ColorLibError(
|
||||
f"In palette[{i}][{j}]: expected (R, G, B, A) tuple, got {color!r}"
|
||||
)
|
||||
if any(v > 1 or v < 0 for v in color):
|
||||
raise ColorLibError(
|
||||
f"palette[{i}][{j}] has invalid out-of-range [0..1] color: {color!r}"
|
||||
)
|
||||
# input colors are RGBA, CPAL encodes them as BGRA
|
||||
red, green, blue, alpha = color
|
||||
colors.append(
|
||||
C_P_A_L_.Color(*(round(v * 255) for v in (blue, green, red, alpha)))
|
||||
)
|
||||
cpal.palettes.append(colors)
|
||||
|
||||
if any(v is not None for v in (paletteTypes, paletteLabels, paletteEntryLabels)):
|
||||
cpal.version = 1
|
||||
|
||||
if paletteTypes is not None:
|
||||
if len(paletteTypes) != len(palettes):
|
||||
raise ColorLibError(
|
||||
f"Expected {len(palettes)} paletteTypes, got {len(paletteTypes)}"
|
||||
)
|
||||
cpal.paletteTypes = [ColorPaletteType(t).value for t in paletteTypes]
|
||||
else:
|
||||
cpal.paletteTypes = [C_P_A_L_.table_C_P_A_L_.DEFAULT_PALETTE_TYPE] * len(
|
||||
palettes
|
||||
)
|
||||
|
||||
if paletteLabels is not None:
|
||||
if len(paletteLabels) != len(palettes):
|
||||
raise ColorLibError(
|
||||
f"Expected {len(palettes)} paletteLabels, got {len(paletteLabels)}"
|
||||
)
|
||||
cpal.paletteLabels = buildPaletteLabels(paletteLabels, nameTable)
|
||||
else:
|
||||
cpal.paletteLabels = [C_P_A_L_.table_C_P_A_L_.NO_NAME_ID] * len(palettes)
|
||||
|
||||
if paletteEntryLabels is not None:
|
||||
if len(paletteEntryLabels) != cpal.numPaletteEntries:
|
||||
raise ColorLibError(
|
||||
f"Expected {cpal.numPaletteEntries} paletteEntryLabels, "
|
||||
f"got {len(paletteEntryLabels)}"
|
||||
)
|
||||
cpal.paletteEntryLabels = buildPaletteLabels(paletteEntryLabels, nameTable)
|
||||
else:
|
||||
cpal.paletteEntryLabels = [
|
||||
C_P_A_L_.table_C_P_A_L_.NO_NAME_ID
|
||||
] * cpal.numPaletteEntries
|
||||
else:
|
||||
cpal.version = 0
|
||||
|
||||
return cpal
|
||||
|
||||
|
||||
# COLR v1 tables
|
||||
# See draft proposal at: https://github.com/googlefonts/colr-gradients-spec
|
||||
|
||||
|
||||
def _is_colrv0_layer(layer: Any) -> bool:
|
||||
# Consider as COLRv0 layer any sequence of length 2 (be it tuple or list) in which
|
||||
# the first element is a str (the layerGlyph) and the second element is an int
|
||||
# (CPAL paletteIndex).
|
||||
# https://github.com/googlefonts/ufo2ft/issues/426
|
||||
try:
|
||||
layerGlyph, paletteIndex = layer
|
||||
except (TypeError, ValueError):
|
||||
return False
|
||||
else:
|
||||
return isinstance(layerGlyph, str) and isinstance(paletteIndex, int)
|
||||
|
||||
|
||||
def _split_color_glyphs_by_version(
|
||||
colorGlyphs: _ColorGlyphsDict,
|
||||
) -> Tuple[_ColorGlyphsV0Dict, _ColorGlyphsDict]:
|
||||
colorGlyphsV0 = {}
|
||||
colorGlyphsV1 = {}
|
||||
for baseGlyph, layers in colorGlyphs.items():
|
||||
if all(_is_colrv0_layer(l) for l in layers):
|
||||
colorGlyphsV0[baseGlyph] = layers
|
||||
else:
|
||||
colorGlyphsV1[baseGlyph] = layers
|
||||
|
||||
# sanity check
|
||||
assert set(colorGlyphs) == (set(colorGlyphsV0) | set(colorGlyphsV1))
|
||||
|
||||
return colorGlyphsV0, colorGlyphsV1
|
||||
|
||||
|
||||
def _reuse_ranges(num_layers: int) -> Generator[Tuple[int, int], None, None]:
|
||||
# TODO feels like something itertools might have already
|
||||
for lbound in range(num_layers):
|
||||
# Reuse of very large #s of layers is relatively unlikely
|
||||
# +2: we want sequences of at least 2
|
||||
# otData handles single-record duplication
|
||||
for ubound in range(
|
||||
lbound + 2, min(num_layers + 1, lbound + 2 + _MAX_REUSE_LEN)
|
||||
):
|
||||
yield (lbound, ubound)
|
||||
|
||||
|
||||
class LayerReuseCache:
|
||||
reusePool: Mapping[Tuple[Any, ...], int]
|
||||
tuples: Mapping[int, Tuple[Any, ...]]
|
||||
keepAlive: List[ot.Paint] # we need id to remain valid
|
||||
|
||||
def __init__(self):
|
||||
self.reusePool = {}
|
||||
self.tuples = {}
|
||||
self.keepAlive = []
|
||||
|
||||
def _paint_tuple(self, paint: ot.Paint):
|
||||
# start simple, who even cares about cyclic graphs or interesting field types
|
||||
def _tuple_safe(value):
|
||||
if isinstance(value, enum.Enum):
|
||||
return value
|
||||
elif hasattr(value, "__dict__"):
|
||||
return tuple(
|
||||
(k, _tuple_safe(v)) for k, v in sorted(value.__dict__.items())
|
||||
)
|
||||
elif isinstance(value, collections.abc.MutableSequence):
|
||||
return tuple(_tuple_safe(e) for e in value)
|
||||
return value
|
||||
|
||||
# Cache the tuples for individual Paint instead of the whole sequence
|
||||
# because the seq could be a transient slice
|
||||
result = self.tuples.get(id(paint), None)
|
||||
if result is None:
|
||||
result = _tuple_safe(paint)
|
||||
self.tuples[id(paint)] = result
|
||||
self.keepAlive.append(paint)
|
||||
return result
|
||||
|
||||
def _as_tuple(self, paints: Sequence[ot.Paint]) -> Tuple[Any, ...]:
|
||||
return tuple(self._paint_tuple(p) for p in paints)
|
||||
|
||||
def try_reuse(self, layers: List[ot.Paint]) -> List[ot.Paint]:
|
||||
found_reuse = True
|
||||
while found_reuse:
|
||||
found_reuse = False
|
||||
|
||||
ranges = sorted(
|
||||
_reuse_ranges(len(layers)),
|
||||
key=lambda t: (t[1] - t[0], t[1], t[0]),
|
||||
reverse=True,
|
||||
)
|
||||
for lbound, ubound in ranges:
|
||||
reuse_lbound = self.reusePool.get(
|
||||
self._as_tuple(layers[lbound:ubound]), -1
|
||||
)
|
||||
if reuse_lbound == -1:
|
||||
continue
|
||||
new_slice = ot.Paint()
|
||||
new_slice.Format = int(ot.PaintFormat.PaintColrLayers)
|
||||
new_slice.NumLayers = ubound - lbound
|
||||
new_slice.FirstLayerIndex = reuse_lbound
|
||||
layers = layers[:lbound] + [new_slice] + layers[ubound:]
|
||||
found_reuse = True
|
||||
break
|
||||
return layers
|
||||
|
||||
def add(self, layers: List[ot.Paint], first_layer_index: int):
|
||||
for lbound, ubound in _reuse_ranges(len(layers)):
|
||||
self.reusePool[self._as_tuple(layers[lbound:ubound])] = (
|
||||
lbound + first_layer_index
|
||||
)
|
||||
|
||||
|
||||
class LayerListBuilder:
|
||||
layers: List[ot.Paint]
|
||||
cache: LayerReuseCache
|
||||
allowLayerReuse: bool
|
||||
|
||||
def __init__(self, *, allowLayerReuse=True):
|
||||
self.layers = []
|
||||
if allowLayerReuse:
|
||||
self.cache = LayerReuseCache()
|
||||
else:
|
||||
self.cache = None
|
||||
|
||||
# We need to intercept construction of PaintColrLayers
|
||||
callbacks = _buildPaintCallbacks()
|
||||
callbacks[
|
||||
(
|
||||
BuildCallback.BEFORE_BUILD,
|
||||
ot.Paint,
|
||||
ot.PaintFormat.PaintColrLayers,
|
||||
)
|
||||
] = self._beforeBuildPaintColrLayers
|
||||
self.tableBuilder = TableBuilder(callbacks)
|
||||
|
||||
# COLR layers is unusual in that it modifies shared state
|
||||
# so we need a callback into an object
|
||||
def _beforeBuildPaintColrLayers(self, dest, source):
|
||||
# Sketchy gymnastics: a sequence input will have dropped it's layers
|
||||
# into NumLayers; get it back
|
||||
if isinstance(source.get("NumLayers", None), collections.abc.Sequence):
|
||||
layers = source["NumLayers"]
|
||||
else:
|
||||
layers = source["Layers"]
|
||||
|
||||
# Convert maps seqs or whatever into typed objects
|
||||
layers = [self.buildPaint(l) for l in layers]
|
||||
|
||||
# No reason to have a colr layers with just one entry
|
||||
if len(layers) == 1:
|
||||
return layers[0], {}
|
||||
|
||||
if self.cache is not None:
|
||||
# Look for reuse, with preference to longer sequences
|
||||
# This may make the layer list smaller
|
||||
layers = self.cache.try_reuse(layers)
|
||||
|
||||
# The layer list is now final; if it's too big we need to tree it
|
||||
is_tree = len(layers) > MAX_PAINT_COLR_LAYER_COUNT
|
||||
layers = build_n_ary_tree(layers, n=MAX_PAINT_COLR_LAYER_COUNT)
|
||||
|
||||
# We now have a tree of sequences with Paint leaves.
|
||||
# Convert the sequences into PaintColrLayers.
|
||||
def listToColrLayers(layer):
|
||||
if isinstance(layer, collections.abc.Sequence):
|
||||
return self.buildPaint(
|
||||
{
|
||||
"Format": ot.PaintFormat.PaintColrLayers,
|
||||
"Layers": [listToColrLayers(l) for l in layer],
|
||||
}
|
||||
)
|
||||
return layer
|
||||
|
||||
layers = [listToColrLayers(l) for l in layers]
|
||||
|
||||
# No reason to have a colr layers with just one entry
|
||||
if len(layers) == 1:
|
||||
return layers[0], {}
|
||||
|
||||
paint = ot.Paint()
|
||||
paint.Format = int(ot.PaintFormat.PaintColrLayers)
|
||||
paint.NumLayers = len(layers)
|
||||
paint.FirstLayerIndex = len(self.layers)
|
||||
self.layers.extend(layers)
|
||||
|
||||
# Register our parts for reuse provided we aren't a tree
|
||||
# If we are a tree the leaves registered for reuse and that will suffice
|
||||
if self.cache is not None and not is_tree:
|
||||
self.cache.add(layers, paint.FirstLayerIndex)
|
||||
|
||||
# we've fully built dest; empty source prevents generalized build from kicking in
|
||||
return paint, {}
|
||||
|
||||
def buildPaint(self, paint: _PaintInput) -> ot.Paint:
|
||||
return self.tableBuilder.build(ot.Paint, paint)
|
||||
|
||||
def build(self) -> Optional[ot.LayerList]:
|
||||
if not self.layers:
|
||||
return None
|
||||
layers = ot.LayerList()
|
||||
layers.LayerCount = len(self.layers)
|
||||
layers.Paint = self.layers
|
||||
return layers
|
||||
|
||||
|
||||
def buildBaseGlyphPaintRecord(
|
||||
baseGlyph: str, layerBuilder: LayerListBuilder, paint: _PaintInput
|
||||
) -> ot.BaseGlyphList:
|
||||
self = ot.BaseGlyphPaintRecord()
|
||||
self.BaseGlyph = baseGlyph
|
||||
self.Paint = layerBuilder.buildPaint(paint)
|
||||
return self
|
||||
|
||||
|
||||
def _format_glyph_errors(errors: Mapping[str, Exception]) -> str:
|
||||
lines = []
|
||||
for baseGlyph, error in sorted(errors.items()):
|
||||
lines.append(f" {baseGlyph} => {type(error).__name__}: {error}")
|
||||
return "\n".join(lines)
|
||||
|
||||
|
||||
def buildColrV1(
|
||||
colorGlyphs: _ColorGlyphsDict,
|
||||
glyphMap: Optional[Mapping[str, int]] = None,
|
||||
*,
|
||||
allowLayerReuse: bool = True,
|
||||
) -> Tuple[Optional[ot.LayerList], ot.BaseGlyphList]:
|
||||
if glyphMap is not None:
|
||||
colorGlyphItems = sorted(
|
||||
colorGlyphs.items(), key=lambda item: glyphMap[item[0]]
|
||||
)
|
||||
else:
|
||||
colorGlyphItems = colorGlyphs.items()
|
||||
|
||||
errors = {}
|
||||
baseGlyphs = []
|
||||
layerBuilder = LayerListBuilder(allowLayerReuse=allowLayerReuse)
|
||||
for baseGlyph, paint in colorGlyphItems:
|
||||
try:
|
||||
baseGlyphs.append(buildBaseGlyphPaintRecord(baseGlyph, layerBuilder, paint))
|
||||
|
||||
except (ColorLibError, OverflowError, ValueError, TypeError) as e:
|
||||
errors[baseGlyph] = e
|
||||
|
||||
if errors:
|
||||
failed_glyphs = _format_glyph_errors(errors)
|
||||
exc = ColorLibError(f"Failed to build BaseGlyphList:\n{failed_glyphs}")
|
||||
exc.errors = errors
|
||||
raise exc from next(iter(errors.values()))
|
||||
|
||||
layers = layerBuilder.build()
|
||||
glyphs = ot.BaseGlyphList()
|
||||
glyphs.BaseGlyphCount = len(baseGlyphs)
|
||||
glyphs.BaseGlyphPaintRecord = baseGlyphs
|
||||
return (layers, glyphs)
|
||||
@ -0,0 +1,2 @@
|
||||
class ColorLibError(Exception):
|
||||
pass
|
||||
143
venv/lib/python3.12/site-packages/fontTools/colorLib/geometry.py
Normal file
143
venv/lib/python3.12/site-packages/fontTools/colorLib/geometry.py
Normal file
@ -0,0 +1,143 @@
|
||||
"""Helpers for manipulating 2D points and vectors in COLR table."""
|
||||
|
||||
from math import copysign, cos, hypot, isclose, pi
|
||||
from fontTools.misc.roundTools import otRound
|
||||
|
||||
|
||||
def _vector_between(origin, target):
|
||||
return (target[0] - origin[0], target[1] - origin[1])
|
||||
|
||||
|
||||
def _round_point(pt):
|
||||
return (otRound(pt[0]), otRound(pt[1]))
|
||||
|
||||
|
||||
def _unit_vector(vec):
|
||||
length = hypot(*vec)
|
||||
if length == 0:
|
||||
return None
|
||||
return (vec[0] / length, vec[1] / length)
|
||||
|
||||
|
||||
_CIRCLE_INSIDE_TOLERANCE = 1e-4
|
||||
|
||||
|
||||
# The unit vector's X and Y components are respectively
|
||||
# U = (cos(α), sin(α))
|
||||
# where α is the angle between the unit vector and the positive x axis.
|
||||
_UNIT_VECTOR_THRESHOLD = cos(3 / 8 * pi) # == sin(1/8 * pi) == 0.38268343236508984
|
||||
|
||||
|
||||
def _rounding_offset(direction):
|
||||
# Return 2-tuple of -/+ 1.0 or 0.0 approximately based on the direction vector.
|
||||
# We divide the unit circle in 8 equal slices oriented towards the cardinal
|
||||
# (N, E, S, W) and intermediate (NE, SE, SW, NW) directions. To each slice we
|
||||
# map one of the possible cases: -1, 0, +1 for either X and Y coordinate.
|
||||
# E.g. Return (+1.0, -1.0) if unit vector is oriented towards SE, or
|
||||
# (-1.0, 0.0) if it's pointing West, etc.
|
||||
uv = _unit_vector(direction)
|
||||
if not uv:
|
||||
return (0, 0)
|
||||
|
||||
result = []
|
||||
for uv_component in uv:
|
||||
if -_UNIT_VECTOR_THRESHOLD <= uv_component < _UNIT_VECTOR_THRESHOLD:
|
||||
# unit vector component near 0: direction almost orthogonal to the
|
||||
# direction of the current axis, thus keep coordinate unchanged
|
||||
result.append(0)
|
||||
else:
|
||||
# nudge coord by +/- 1.0 in direction of unit vector
|
||||
result.append(copysign(1.0, uv_component))
|
||||
return tuple(result)
|
||||
|
||||
|
||||
class Circle:
|
||||
def __init__(self, centre, radius):
|
||||
self.centre = centre
|
||||
self.radius = radius
|
||||
|
||||
def __repr__(self):
|
||||
return f"Circle(centre={self.centre}, radius={self.radius})"
|
||||
|
||||
def round(self):
|
||||
return Circle(_round_point(self.centre), otRound(self.radius))
|
||||
|
||||
def inside(self, outer_circle, tolerance=_CIRCLE_INSIDE_TOLERANCE):
|
||||
dist = self.radius + hypot(*_vector_between(self.centre, outer_circle.centre))
|
||||
return (
|
||||
isclose(outer_circle.radius, dist, rel_tol=_CIRCLE_INSIDE_TOLERANCE)
|
||||
or outer_circle.radius > dist
|
||||
)
|
||||
|
||||
def concentric(self, other):
|
||||
return self.centre == other.centre
|
||||
|
||||
def move(self, dx, dy):
|
||||
self.centre = (self.centre[0] + dx, self.centre[1] + dy)
|
||||
|
||||
|
||||
def round_start_circle_stable_containment(c0, r0, c1, r1):
|
||||
"""Round start circle so that it stays inside/outside end circle after rounding.
|
||||
|
||||
The rounding of circle coordinates to integers may cause an abrupt change
|
||||
if the start circle c0 is so close to the end circle c1's perimiter that
|
||||
it ends up falling outside (or inside) as a result of the rounding.
|
||||
To keep the gradient unchanged, we nudge it in the right direction.
|
||||
|
||||
See:
|
||||
https://github.com/googlefonts/colr-gradients-spec/issues/204
|
||||
https://github.com/googlefonts/picosvg/issues/158
|
||||
"""
|
||||
start, end = Circle(c0, r0), Circle(c1, r1)
|
||||
|
||||
inside_before_round = start.inside(end)
|
||||
|
||||
round_start = start.round()
|
||||
round_end = end.round()
|
||||
inside_after_round = round_start.inside(round_end)
|
||||
|
||||
if inside_before_round == inside_after_round:
|
||||
return round_start
|
||||
elif inside_after_round:
|
||||
# start was outside before rounding: we need to push start away from end
|
||||
direction = _vector_between(round_end.centre, round_start.centre)
|
||||
radius_delta = +1.0
|
||||
else:
|
||||
# start was inside before rounding: we need to push start towards end
|
||||
direction = _vector_between(round_start.centre, round_end.centre)
|
||||
radius_delta = -1.0
|
||||
dx, dy = _rounding_offset(direction)
|
||||
|
||||
# At most 2 iterations ought to be enough to converge. Before the loop, we
|
||||
# know the start circle didn't keep containment after normal rounding; thus
|
||||
# we continue adjusting by -/+ 1.0 until containment is restored.
|
||||
# Normal rounding can at most move each coordinates -/+0.5; in the worst case
|
||||
# both the start and end circle's centres and radii will be rounded in opposite
|
||||
# directions, e.g. when they move along a 45 degree diagonal:
|
||||
# c0 = (1.5, 1.5) ===> (2.0, 2.0)
|
||||
# r0 = 0.5 ===> 1.0
|
||||
# c1 = (0.499, 0.499) ===> (0.0, 0.0)
|
||||
# r1 = 2.499 ===> 2.0
|
||||
# In this example, the relative distance between the circles, calculated
|
||||
# as r1 - (r0 + distance(c0, c1)) is initially 0.57437 (c0 is inside c1), and
|
||||
# -1.82842 after rounding (c0 is now outside c1). Nudging c0 by -1.0 on both
|
||||
# x and y axes moves it towards c1 by hypot(-1.0, -1.0) = 1.41421. Two of these
|
||||
# moves cover twice that distance, which is enough to restore containment.
|
||||
max_attempts = 2
|
||||
for _ in range(max_attempts):
|
||||
if round_start.concentric(round_end):
|
||||
# can't move c0 towards c1 (they are the same), so we change the radius
|
||||
round_start.radius += radius_delta
|
||||
assert round_start.radius >= 0
|
||||
else:
|
||||
round_start.move(dx, dy)
|
||||
if inside_before_round == round_start.inside(round_end):
|
||||
break
|
||||
else: # likely a bug
|
||||
raise AssertionError(
|
||||
f"Rounding circle {start} "
|
||||
f"{'inside' if inside_before_round else 'outside'} "
|
||||
f"{end} failed after {max_attempts} attempts!"
|
||||
)
|
||||
|
||||
return round_start
|
||||
@ -0,0 +1,223 @@
|
||||
"""
|
||||
colorLib.table_builder: Generic helper for filling in BaseTable derivatives from tuples and maps and such.
|
||||
|
||||
"""
|
||||
|
||||
import collections
|
||||
import enum
|
||||
from fontTools.ttLib.tables.otBase import (
|
||||
BaseTable,
|
||||
FormatSwitchingBaseTable,
|
||||
UInt8FormatSwitchingBaseTable,
|
||||
)
|
||||
from fontTools.ttLib.tables.otConverters import (
|
||||
ComputedInt,
|
||||
SimpleValue,
|
||||
Struct,
|
||||
Short,
|
||||
UInt8,
|
||||
UShort,
|
||||
IntValue,
|
||||
FloatValue,
|
||||
OptionalValue,
|
||||
)
|
||||
from fontTools.misc.roundTools import otRound
|
||||
|
||||
|
||||
class BuildCallback(enum.Enum):
|
||||
"""Keyed on (BEFORE_BUILD, class[, Format if available]).
|
||||
Receives (dest, source).
|
||||
Should return (dest, source), which can be new objects.
|
||||
"""
|
||||
|
||||
BEFORE_BUILD = enum.auto()
|
||||
|
||||
"""Keyed on (AFTER_BUILD, class[, Format if available]).
|
||||
Receives (dest).
|
||||
Should return dest, which can be a new object.
|
||||
"""
|
||||
AFTER_BUILD = enum.auto()
|
||||
|
||||
"""Keyed on (CREATE_DEFAULT, class[, Format if available]).
|
||||
Receives no arguments.
|
||||
Should return a new instance of class.
|
||||
"""
|
||||
CREATE_DEFAULT = enum.auto()
|
||||
|
||||
|
||||
def _assignable(convertersByName):
|
||||
return {k: v for k, v in convertersByName.items() if not isinstance(v, ComputedInt)}
|
||||
|
||||
|
||||
def _isNonStrSequence(value):
|
||||
return isinstance(value, collections.abc.Sequence) and not isinstance(value, str)
|
||||
|
||||
|
||||
def _split_format(cls, source):
|
||||
if _isNonStrSequence(source):
|
||||
assert len(source) > 0, f"{cls} needs at least format from {source}"
|
||||
fmt, remainder = source[0], source[1:]
|
||||
elif isinstance(source, collections.abc.Mapping):
|
||||
assert "Format" in source, f"{cls} needs at least Format from {source}"
|
||||
remainder = source.copy()
|
||||
fmt = remainder.pop("Format")
|
||||
else:
|
||||
raise ValueError(f"Not sure how to populate {cls} from {source}")
|
||||
|
||||
assert isinstance(
|
||||
fmt, collections.abc.Hashable
|
||||
), f"{cls} Format is not hashable: {fmt!r}"
|
||||
assert fmt in cls.convertersByName, f"{cls} invalid Format: {fmt!r}"
|
||||
|
||||
return fmt, remainder
|
||||
|
||||
|
||||
class TableBuilder:
|
||||
"""
|
||||
Helps to populate things derived from BaseTable from maps, tuples, etc.
|
||||
|
||||
A table of lifecycle callbacks may be provided to add logic beyond what is possible
|
||||
based on otData info for the target class. See BuildCallbacks.
|
||||
"""
|
||||
|
||||
def __init__(self, callbackTable=None):
|
||||
if callbackTable is None:
|
||||
callbackTable = {}
|
||||
self._callbackTable = callbackTable
|
||||
|
||||
def _convert(self, dest, field, converter, value):
|
||||
enumClass = getattr(converter, "enumClass", None)
|
||||
|
||||
if enumClass:
|
||||
if isinstance(value, enumClass):
|
||||
pass
|
||||
elif isinstance(value, str):
|
||||
try:
|
||||
value = getattr(enumClass, value.upper())
|
||||
except AttributeError:
|
||||
raise ValueError(f"{value} is not a valid {enumClass}")
|
||||
else:
|
||||
value = enumClass(value)
|
||||
|
||||
elif isinstance(converter, IntValue):
|
||||
value = otRound(value)
|
||||
elif isinstance(converter, FloatValue):
|
||||
value = float(value)
|
||||
|
||||
elif isinstance(converter, Struct):
|
||||
if converter.repeat:
|
||||
if _isNonStrSequence(value):
|
||||
value = [self.build(converter.tableClass, v) for v in value]
|
||||
else:
|
||||
value = [self.build(converter.tableClass, value)]
|
||||
setattr(dest, converter.repeat, len(value))
|
||||
else:
|
||||
value = self.build(converter.tableClass, value)
|
||||
elif callable(converter):
|
||||
value = converter(value)
|
||||
|
||||
setattr(dest, field, value)
|
||||
|
||||
def build(self, cls, source):
|
||||
assert issubclass(cls, BaseTable)
|
||||
|
||||
if isinstance(source, cls):
|
||||
return source
|
||||
|
||||
callbackKey = (cls,)
|
||||
fmt = None
|
||||
if issubclass(cls, FormatSwitchingBaseTable):
|
||||
fmt, source = _split_format(cls, source)
|
||||
callbackKey = (cls, fmt)
|
||||
|
||||
dest = self._callbackTable.get(
|
||||
(BuildCallback.CREATE_DEFAULT,) + callbackKey, lambda: cls()
|
||||
)()
|
||||
assert isinstance(dest, cls)
|
||||
|
||||
convByName = _assignable(cls.convertersByName)
|
||||
skippedFields = set()
|
||||
|
||||
# For format switchers we need to resolve converters based on format
|
||||
if issubclass(cls, FormatSwitchingBaseTable):
|
||||
dest.Format = fmt
|
||||
convByName = _assignable(convByName[dest.Format])
|
||||
skippedFields.add("Format")
|
||||
|
||||
# Convert sequence => mapping so before thunk only has to handle one format
|
||||
if _isNonStrSequence(source):
|
||||
# Sequence (typically list or tuple) assumed to match fields in declaration order
|
||||
assert len(source) <= len(
|
||||
convByName
|
||||
), f"Sequence of {len(source)} too long for {cls}; expected <= {len(convByName)} values"
|
||||
source = dict(zip(convByName.keys(), source))
|
||||
|
||||
dest, source = self._callbackTable.get(
|
||||
(BuildCallback.BEFORE_BUILD,) + callbackKey, lambda d, s: (d, s)
|
||||
)(dest, source)
|
||||
|
||||
if isinstance(source, collections.abc.Mapping):
|
||||
for field, value in source.items():
|
||||
if field in skippedFields:
|
||||
continue
|
||||
converter = convByName.get(field, None)
|
||||
if not converter:
|
||||
raise ValueError(
|
||||
f"Unrecognized field {field} for {cls}; expected one of {sorted(convByName.keys())}"
|
||||
)
|
||||
self._convert(dest, field, converter, value)
|
||||
else:
|
||||
# let's try as a 1-tuple
|
||||
dest = self.build(cls, (source,))
|
||||
|
||||
for field, conv in convByName.items():
|
||||
if not hasattr(dest, field) and isinstance(conv, OptionalValue):
|
||||
setattr(dest, field, conv.DEFAULT)
|
||||
|
||||
dest = self._callbackTable.get(
|
||||
(BuildCallback.AFTER_BUILD,) + callbackKey, lambda d: d
|
||||
)(dest)
|
||||
|
||||
return dest
|
||||
|
||||
|
||||
class TableUnbuilder:
|
||||
def __init__(self, callbackTable=None):
|
||||
if callbackTable is None:
|
||||
callbackTable = {}
|
||||
self._callbackTable = callbackTable
|
||||
|
||||
def unbuild(self, table):
|
||||
assert isinstance(table, BaseTable)
|
||||
|
||||
source = {}
|
||||
|
||||
callbackKey = (type(table),)
|
||||
if isinstance(table, FormatSwitchingBaseTable):
|
||||
source["Format"] = int(table.Format)
|
||||
callbackKey += (table.Format,)
|
||||
|
||||
for converter in table.getConverters():
|
||||
if isinstance(converter, ComputedInt):
|
||||
continue
|
||||
value = getattr(table, converter.name)
|
||||
|
||||
enumClass = getattr(converter, "enumClass", None)
|
||||
if enumClass:
|
||||
source[converter.name] = value.name.lower()
|
||||
elif isinstance(converter, Struct):
|
||||
if converter.repeat:
|
||||
source[converter.name] = [self.unbuild(v) for v in value]
|
||||
else:
|
||||
source[converter.name] = self.unbuild(value)
|
||||
elif isinstance(converter, SimpleValue):
|
||||
# "simple" values (e.g. int, float, str) need no further un-building
|
||||
source[converter.name] = value
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
"Don't know how unbuild {value!r} with {converter!r}"
|
||||
)
|
||||
|
||||
source = self._callbackTable.get(callbackKey, lambda s: s)(source)
|
||||
|
||||
return source
|
||||
@ -0,0 +1,81 @@
|
||||
from fontTools.ttLib.tables import otTables as ot
|
||||
from .table_builder import TableUnbuilder
|
||||
|
||||
|
||||
def unbuildColrV1(layerList, baseGlyphList):
|
||||
layers = []
|
||||
if layerList:
|
||||
layers = layerList.Paint
|
||||
unbuilder = LayerListUnbuilder(layers)
|
||||
return {
|
||||
rec.BaseGlyph: unbuilder.unbuildPaint(rec.Paint)
|
||||
for rec in baseGlyphList.BaseGlyphPaintRecord
|
||||
}
|
||||
|
||||
|
||||
def _flatten_layers(lst):
|
||||
for paint in lst:
|
||||
if paint["Format"] == ot.PaintFormat.PaintColrLayers:
|
||||
yield from _flatten_layers(paint["Layers"])
|
||||
else:
|
||||
yield paint
|
||||
|
||||
|
||||
class LayerListUnbuilder:
|
||||
def __init__(self, layers):
|
||||
self.layers = layers
|
||||
|
||||
callbacks = {
|
||||
(
|
||||
ot.Paint,
|
||||
ot.PaintFormat.PaintColrLayers,
|
||||
): self._unbuildPaintColrLayers,
|
||||
}
|
||||
self.tableUnbuilder = TableUnbuilder(callbacks)
|
||||
|
||||
def unbuildPaint(self, paint):
|
||||
assert isinstance(paint, ot.Paint)
|
||||
return self.tableUnbuilder.unbuild(paint)
|
||||
|
||||
def _unbuildPaintColrLayers(self, source):
|
||||
assert source["Format"] == ot.PaintFormat.PaintColrLayers
|
||||
|
||||
layers = list(
|
||||
_flatten_layers(
|
||||
[
|
||||
self.unbuildPaint(childPaint)
|
||||
for childPaint in self.layers[
|
||||
source["FirstLayerIndex"] : source["FirstLayerIndex"]
|
||||
+ source["NumLayers"]
|
||||
]
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
if len(layers) == 1:
|
||||
return layers[0]
|
||||
|
||||
return {"Format": source["Format"], "Layers": layers}
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
from pprint import pprint
|
||||
import sys
|
||||
from fontTools.ttLib import TTFont
|
||||
|
||||
try:
|
||||
fontfile = sys.argv[1]
|
||||
except IndexError:
|
||||
sys.exit("usage: fonttools colorLib.unbuilder FONTFILE")
|
||||
|
||||
font = TTFont(fontfile)
|
||||
colr = font["COLR"]
|
||||
if colr.version < 1:
|
||||
sys.exit(f"error: No COLR table version=1 found in {fontfile}")
|
||||
|
||||
colorGlyphs = unbuildColrV1(
|
||||
colr.table.LayerList,
|
||||
colr.table.BaseGlyphList,
|
||||
)
|
||||
|
||||
pprint(colorGlyphs)
|
||||
@ -0,0 +1,75 @@
|
||||
"""
|
||||
Define all configuration options that can affect the working of fontTools
|
||||
modules. E.g. optimization levels of varLib IUP, otlLib GPOS compression level,
|
||||
etc. If this file gets too big, split it into smaller files per-module.
|
||||
|
||||
An instance of the Config class can be attached to a TTFont object, so that
|
||||
the various modules can access their configuration options from it.
|
||||
"""
|
||||
|
||||
from textwrap import dedent
|
||||
|
||||
from fontTools.misc.configTools import *
|
||||
|
||||
|
||||
class Config(AbstractConfig):
|
||||
options = Options()
|
||||
|
||||
|
||||
OPTIONS = Config.options
|
||||
|
||||
|
||||
Config.register_option(
|
||||
name="fontTools.otlLib.optimize.gpos:COMPRESSION_LEVEL",
|
||||
help=dedent(
|
||||
"""\
|
||||
GPOS Lookup type 2 (PairPos) compression level:
|
||||
0 = do not attempt to compact PairPos lookups;
|
||||
1 to 8 = create at most 1 to 8 new subtables for each existing
|
||||
subtable, provided that it would yield a 50%% file size saving;
|
||||
9 = create as many new subtables as needed to yield a file size saving.
|
||||
Default: 0.
|
||||
|
||||
This compaction aims to save file size, by splitting large class
|
||||
kerning subtables (Format 2) that contain many zero values into
|
||||
smaller and denser subtables. It's a trade-off between the overhead
|
||||
of several subtables versus the sparseness of one big subtable.
|
||||
|
||||
See the pull request: https://github.com/fonttools/fonttools/pull/2326
|
||||
"""
|
||||
),
|
||||
default=0,
|
||||
parse=int,
|
||||
validate=lambda v: v in range(10),
|
||||
)
|
||||
|
||||
Config.register_option(
|
||||
name="fontTools.ttLib.tables.otBase:USE_HARFBUZZ_REPACKER",
|
||||
help=dedent(
|
||||
"""\
|
||||
FontTools tries to use the HarfBuzz Repacker to serialize GPOS/GSUB tables
|
||||
if the uharfbuzz python bindings are importable, otherwise falls back to its
|
||||
slower, less efficient serializer. Set to False to always use the latter.
|
||||
Set to True to explicitly request the HarfBuzz Repacker (will raise an
|
||||
error if uharfbuzz cannot be imported).
|
||||
"""
|
||||
),
|
||||
default=None,
|
||||
parse=Option.parse_optional_bool,
|
||||
validate=Option.validate_optional_bool,
|
||||
)
|
||||
|
||||
Config.register_option(
|
||||
name="fontTools.otlLib.builder:WRITE_GPOS7",
|
||||
help=dedent(
|
||||
"""\
|
||||
macOS before 13.2 didn’t support GPOS LookupType 7 (non-chaining
|
||||
ContextPos lookups), so FontTools.otlLib.builder disables a file size
|
||||
optimisation that would use LookupType 7 instead of 8 when there is no
|
||||
chaining (no prefix or suffix). Set to True to enable the optimization.
|
||||
"""
|
||||
),
|
||||
default=False,
|
||||
parse=Option.parse_optional_bool,
|
||||
validate=Option.validate_optional_bool,
|
||||
)
|
||||
@ -0,0 +1,15 @@
|
||||
# Copyright 2016 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from .cu2qu import *
|
||||
@ -0,0 +1,6 @@
|
||||
import sys
|
||||
from .cli import _main as main
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@ -0,0 +1,54 @@
|
||||
"""Benchmark the cu2qu algorithm performance."""
|
||||
|
||||
from .cu2qu import *
|
||||
import random
|
||||
import timeit
|
||||
|
||||
MAX_ERR = 0.05
|
||||
|
||||
|
||||
def generate_curve():
|
||||
return [
|
||||
tuple(float(random.randint(0, 2048)) for coord in range(2))
|
||||
for point in range(4)
|
||||
]
|
||||
|
||||
|
||||
def setup_curve_to_quadratic():
|
||||
return generate_curve(), MAX_ERR
|
||||
|
||||
|
||||
def setup_curves_to_quadratic():
|
||||
num_curves = 3
|
||||
return ([generate_curve() for curve in range(num_curves)], [MAX_ERR] * num_curves)
|
||||
|
||||
|
||||
def run_benchmark(module, function, setup_suffix="", repeat=5, number=1000):
|
||||
setup_func = "setup_" + function
|
||||
if setup_suffix:
|
||||
print("%s with %s:" % (function, setup_suffix), end="")
|
||||
setup_func += "_" + setup_suffix
|
||||
else:
|
||||
print("%s:" % function, end="")
|
||||
|
||||
def wrapper(function, setup_func):
|
||||
function = globals()[function]
|
||||
setup_func = globals()[setup_func]
|
||||
|
||||
def wrapped():
|
||||
return function(*setup_func())
|
||||
|
||||
return wrapped
|
||||
|
||||
results = timeit.repeat(wrapper(function, setup_func), repeat=repeat, number=number)
|
||||
print("\t%5.1fus" % (min(results) * 1000000.0 / number))
|
||||
|
||||
|
||||
def main():
|
||||
run_benchmark("cu2qu", "curve_to_quadratic")
|
||||
run_benchmark("cu2qu", "curves_to_quadratic")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
random.seed(1)
|
||||
main()
|
||||
198
venv/lib/python3.12/site-packages/fontTools/cu2qu/cli.py
Normal file
198
venv/lib/python3.12/site-packages/fontTools/cu2qu/cli.py
Normal file
@ -0,0 +1,198 @@
|
||||
import os
|
||||
import argparse
|
||||
import logging
|
||||
import shutil
|
||||
import multiprocessing as mp
|
||||
from contextlib import closing
|
||||
from functools import partial
|
||||
|
||||
import fontTools
|
||||
from .ufo import font_to_quadratic, fonts_to_quadratic
|
||||
|
||||
ufo_module = None
|
||||
try:
|
||||
import ufoLib2 as ufo_module
|
||||
except ImportError:
|
||||
try:
|
||||
import defcon as ufo_module
|
||||
except ImportError as e:
|
||||
pass
|
||||
|
||||
|
||||
logger = logging.getLogger("fontTools.cu2qu")
|
||||
|
||||
|
||||
def _cpu_count():
|
||||
try:
|
||||
return mp.cpu_count()
|
||||
except NotImplementedError: # pragma: no cover
|
||||
return 1
|
||||
|
||||
|
||||
def open_ufo(path):
|
||||
if hasattr(ufo_module.Font, "open"): # ufoLib2
|
||||
return ufo_module.Font.open(path)
|
||||
return ufo_module.Font(path) # defcon
|
||||
|
||||
|
||||
def _font_to_quadratic(input_path, output_path=None, **kwargs):
|
||||
ufo = open_ufo(input_path)
|
||||
logger.info("Converting curves for %s", input_path)
|
||||
if font_to_quadratic(ufo, **kwargs):
|
||||
logger.info("Saving %s", output_path)
|
||||
if output_path:
|
||||
ufo.save(output_path)
|
||||
else:
|
||||
ufo.save() # save in-place
|
||||
elif output_path:
|
||||
_copytree(input_path, output_path)
|
||||
|
||||
|
||||
def _samepath(path1, path2):
|
||||
# TODO on python3+, there's os.path.samefile
|
||||
path1 = os.path.normcase(os.path.abspath(os.path.realpath(path1)))
|
||||
path2 = os.path.normcase(os.path.abspath(os.path.realpath(path2)))
|
||||
return path1 == path2
|
||||
|
||||
|
||||
def _copytree(input_path, output_path):
|
||||
if _samepath(input_path, output_path):
|
||||
logger.debug("input and output paths are the same file; skipped copy")
|
||||
return
|
||||
if os.path.exists(output_path):
|
||||
shutil.rmtree(output_path)
|
||||
shutil.copytree(input_path, output_path)
|
||||
|
||||
|
||||
def _main(args=None):
|
||||
"""Convert a UFO font from cubic to quadratic curves"""
|
||||
parser = argparse.ArgumentParser(prog="cu2qu")
|
||||
parser.add_argument("--version", action="version", version=fontTools.__version__)
|
||||
parser.add_argument(
|
||||
"infiles",
|
||||
nargs="+",
|
||||
metavar="INPUT",
|
||||
help="one or more input UFO source file(s).",
|
||||
)
|
||||
parser.add_argument("-v", "--verbose", action="count", default=0)
|
||||
parser.add_argument(
|
||||
"-e",
|
||||
"--conversion-error",
|
||||
type=float,
|
||||
metavar="ERROR",
|
||||
default=None,
|
||||
help="maxiumum approximation error measured in EM (default: 0.001)",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-m",
|
||||
"--mixed",
|
||||
default=False,
|
||||
action="store_true",
|
||||
help="whether to used mixed quadratic and cubic curves",
|
||||
)
|
||||
parser.add_argument(
|
||||
"--keep-direction",
|
||||
dest="reverse_direction",
|
||||
action="store_false",
|
||||
help="do not reverse the contour direction",
|
||||
)
|
||||
|
||||
mode_parser = parser.add_mutually_exclusive_group()
|
||||
mode_parser.add_argument(
|
||||
"-i",
|
||||
"--interpolatable",
|
||||
action="store_true",
|
||||
help="whether curve conversion should keep interpolation compatibility",
|
||||
)
|
||||
mode_parser.add_argument(
|
||||
"-j",
|
||||
"--jobs",
|
||||
type=int,
|
||||
nargs="?",
|
||||
default=1,
|
||||
const=_cpu_count(),
|
||||
metavar="N",
|
||||
help="Convert using N multiple processes (default: %(default)s)",
|
||||
)
|
||||
|
||||
output_parser = parser.add_mutually_exclusive_group()
|
||||
output_parser.add_argument(
|
||||
"-o",
|
||||
"--output-file",
|
||||
default=None,
|
||||
metavar="OUTPUT",
|
||||
help=(
|
||||
"output filename for the converted UFO. By default fonts are "
|
||||
"modified in place. This only works with a single input."
|
||||
),
|
||||
)
|
||||
output_parser.add_argument(
|
||||
"-d",
|
||||
"--output-dir",
|
||||
default=None,
|
||||
metavar="DIRECTORY",
|
||||
help="output directory where to save converted UFOs",
|
||||
)
|
||||
|
||||
options = parser.parse_args(args)
|
||||
|
||||
if ufo_module is None:
|
||||
parser.error("Either ufoLib2 or defcon are required to run this script.")
|
||||
|
||||
if not options.verbose:
|
||||
level = "WARNING"
|
||||
elif options.verbose == 1:
|
||||
level = "INFO"
|
||||
else:
|
||||
level = "DEBUG"
|
||||
logging.basicConfig(level=level)
|
||||
|
||||
if len(options.infiles) > 1 and options.output_file:
|
||||
parser.error("-o/--output-file can't be used with multile inputs")
|
||||
|
||||
if options.output_dir:
|
||||
output_dir = options.output_dir
|
||||
if not os.path.exists(output_dir):
|
||||
os.mkdir(output_dir)
|
||||
elif not os.path.isdir(output_dir):
|
||||
parser.error("'%s' is not a directory" % output_dir)
|
||||
output_paths = [
|
||||
os.path.join(output_dir, os.path.basename(p)) for p in options.infiles
|
||||
]
|
||||
elif options.output_file:
|
||||
output_paths = [options.output_file]
|
||||
else:
|
||||
# save in-place
|
||||
output_paths = [None] * len(options.infiles)
|
||||
|
||||
kwargs = dict(
|
||||
dump_stats=options.verbose > 0,
|
||||
max_err_em=options.conversion_error,
|
||||
reverse_direction=options.reverse_direction,
|
||||
all_quadratic=False if options.mixed else True,
|
||||
)
|
||||
|
||||
if options.interpolatable:
|
||||
logger.info("Converting curves compatibly")
|
||||
ufos = [open_ufo(infile) for infile in options.infiles]
|
||||
if fonts_to_quadratic(ufos, **kwargs):
|
||||
for ufo, output_path in zip(ufos, output_paths):
|
||||
logger.info("Saving %s", output_path)
|
||||
if output_path:
|
||||
ufo.save(output_path)
|
||||
else:
|
||||
ufo.save()
|
||||
else:
|
||||
for input_path, output_path in zip(options.infiles, output_paths):
|
||||
if output_path:
|
||||
_copytree(input_path, output_path)
|
||||
else:
|
||||
jobs = min(len(options.infiles), options.jobs) if options.jobs > 1 else 1
|
||||
if jobs > 1:
|
||||
func = partial(_font_to_quadratic, **kwargs)
|
||||
logger.info("Running %d parallel processes", jobs)
|
||||
with closing(mp.Pool(jobs)) as pool:
|
||||
pool.starmap(func, zip(options.infiles, output_paths))
|
||||
else:
|
||||
for input_path, output_path in zip(options.infiles, output_paths):
|
||||
_font_to_quadratic(input_path, output_path, **kwargs)
|
||||
14929
venv/lib/python3.12/site-packages/fontTools/cu2qu/cu2qu.c
Normal file
14929
venv/lib/python3.12/site-packages/fontTools/cu2qu/cu2qu.c
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
534
venv/lib/python3.12/site-packages/fontTools/cu2qu/cu2qu.py
Normal file
534
venv/lib/python3.12/site-packages/fontTools/cu2qu/cu2qu.py
Normal file
@ -0,0 +1,534 @@
|
||||
# cython: language_level=3
|
||||
# distutils: define_macros=CYTHON_TRACE_NOGIL=1
|
||||
|
||||
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
try:
|
||||
import cython
|
||||
|
||||
COMPILED = cython.compiled
|
||||
except (AttributeError, ImportError):
|
||||
# if cython not installed, use mock module with no-op decorators and types
|
||||
from fontTools.misc import cython
|
||||
|
||||
COMPILED = False
|
||||
|
||||
import math
|
||||
|
||||
from .errors import Error as Cu2QuError, ApproxNotFoundError
|
||||
|
||||
|
||||
__all__ = ["curve_to_quadratic", "curves_to_quadratic"]
|
||||
|
||||
MAX_N = 100
|
||||
|
||||
NAN = float("NaN")
|
||||
|
||||
|
||||
@cython.cfunc
|
||||
@cython.inline
|
||||
@cython.returns(cython.double)
|
||||
@cython.locals(v1=cython.complex, v2=cython.complex)
|
||||
def dot(v1, v2):
|
||||
"""Return the dot product of two vectors.
|
||||
|
||||
Args:
|
||||
v1 (complex): First vector.
|
||||
v2 (complex): Second vector.
|
||||
|
||||
Returns:
|
||||
double: Dot product.
|
||||
"""
|
||||
return (v1 * v2.conjugate()).real
|
||||
|
||||
|
||||
@cython.cfunc
|
||||
@cython.inline
|
||||
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
|
||||
@cython.locals(
|
||||
_1=cython.complex, _2=cython.complex, _3=cython.complex, _4=cython.complex
|
||||
)
|
||||
def calc_cubic_points(a, b, c, d):
|
||||
_1 = d
|
||||
_2 = (c / 3.0) + d
|
||||
_3 = (b + c) / 3.0 + _2
|
||||
_4 = a + d + c + b
|
||||
return _1, _2, _3, _4
|
||||
|
||||
|
||||
@cython.cfunc
|
||||
@cython.inline
|
||||
@cython.locals(
|
||||
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
|
||||
)
|
||||
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
|
||||
def calc_cubic_parameters(p0, p1, p2, p3):
|
||||
c = (p1 - p0) * 3.0
|
||||
b = (p2 - p1) * 3.0 - c
|
||||
d = p0
|
||||
a = p3 - d - c - b
|
||||
return a, b, c, d
|
||||
|
||||
|
||||
@cython.cfunc
|
||||
@cython.inline
|
||||
@cython.locals(
|
||||
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
|
||||
)
|
||||
def split_cubic_into_n_iter(p0, p1, p2, p3, n):
|
||||
"""Split a cubic Bezier into n equal parts.
|
||||
|
||||
Splits the curve into `n` equal parts by curve time.
|
||||
(t=0..1/n, t=1/n..2/n, ...)
|
||||
|
||||
Args:
|
||||
p0 (complex): Start point of curve.
|
||||
p1 (complex): First handle of curve.
|
||||
p2 (complex): Second handle of curve.
|
||||
p3 (complex): End point of curve.
|
||||
|
||||
Returns:
|
||||
An iterator yielding the control points (four complex values) of the
|
||||
subcurves.
|
||||
"""
|
||||
# Hand-coded special-cases
|
||||
if n == 2:
|
||||
return iter(split_cubic_into_two(p0, p1, p2, p3))
|
||||
if n == 3:
|
||||
return iter(split_cubic_into_three(p0, p1, p2, p3))
|
||||
if n == 4:
|
||||
a, b = split_cubic_into_two(p0, p1, p2, p3)
|
||||
return iter(
|
||||
split_cubic_into_two(a[0], a[1], a[2], a[3])
|
||||
+ split_cubic_into_two(b[0], b[1], b[2], b[3])
|
||||
)
|
||||
if n == 6:
|
||||
a, b = split_cubic_into_two(p0, p1, p2, p3)
|
||||
return iter(
|
||||
split_cubic_into_three(a[0], a[1], a[2], a[3])
|
||||
+ split_cubic_into_three(b[0], b[1], b[2], b[3])
|
||||
)
|
||||
|
||||
return _split_cubic_into_n_gen(p0, p1, p2, p3, n)
|
||||
|
||||
|
||||
@cython.locals(
|
||||
p0=cython.complex,
|
||||
p1=cython.complex,
|
||||
p2=cython.complex,
|
||||
p3=cython.complex,
|
||||
n=cython.int,
|
||||
)
|
||||
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
|
||||
@cython.locals(
|
||||
dt=cython.double, delta_2=cython.double, delta_3=cython.double, i=cython.int
|
||||
)
|
||||
@cython.locals(
|
||||
a1=cython.complex, b1=cython.complex, c1=cython.complex, d1=cython.complex
|
||||
)
|
||||
def _split_cubic_into_n_gen(p0, p1, p2, p3, n):
|
||||
a, b, c, d = calc_cubic_parameters(p0, p1, p2, p3)
|
||||
dt = 1 / n
|
||||
delta_2 = dt * dt
|
||||
delta_3 = dt * delta_2
|
||||
for i in range(n):
|
||||
t1 = i * dt
|
||||
t1_2 = t1 * t1
|
||||
# calc new a, b, c and d
|
||||
a1 = a * delta_3
|
||||
b1 = (3 * a * t1 + b) * delta_2
|
||||
c1 = (2 * b * t1 + c + 3 * a * t1_2) * dt
|
||||
d1 = a * t1 * t1_2 + b * t1_2 + c * t1 + d
|
||||
yield calc_cubic_points(a1, b1, c1, d1)
|
||||
|
||||
|
||||
@cython.cfunc
|
||||
@cython.inline
|
||||
@cython.locals(
|
||||
p0=cython.complex, p1=cython.complex, p2=cython.complex, p3=cython.complex
|
||||
)
|
||||
@cython.locals(mid=cython.complex, deriv3=cython.complex)
|
||||
def split_cubic_into_two(p0, p1, p2, p3):
|
||||
"""Split a cubic Bezier into two equal parts.
|
||||
|
||||
Splits the curve into two equal parts at t = 0.5
|
||||
|
||||
Args:
|
||||
p0 (complex): Start point of curve.
|
||||
p1 (complex): First handle of curve.
|
||||
p2 (complex): Second handle of curve.
|
||||
p3 (complex): End point of curve.
|
||||
|
||||
Returns:
|
||||
tuple: Two cubic Beziers (each expressed as a tuple of four complex
|
||||
values).
|
||||
"""
|
||||
mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
|
||||
deriv3 = (p3 + p2 - p1 - p0) * 0.125
|
||||
return (
|
||||
(p0, (p0 + p1) * 0.5, mid - deriv3, mid),
|
||||
(mid, mid + deriv3, (p2 + p3) * 0.5, p3),
|
||||
)
|
||||
|
||||
|
||||
@cython.cfunc
|
||||
@cython.inline
|
||||
@cython.locals(
|
||||
p0=cython.complex,
|
||||
p1=cython.complex,
|
||||
p2=cython.complex,
|
||||
p3=cython.complex,
|
||||
)
|
||||
@cython.locals(
|
||||
mid1=cython.complex,
|
||||
deriv1=cython.complex,
|
||||
mid2=cython.complex,
|
||||
deriv2=cython.complex,
|
||||
)
|
||||
def split_cubic_into_three(p0, p1, p2, p3):
|
||||
"""Split a cubic Bezier into three equal parts.
|
||||
|
||||
Splits the curve into three equal parts at t = 1/3 and t = 2/3
|
||||
|
||||
Args:
|
||||
p0 (complex): Start point of curve.
|
||||
p1 (complex): First handle of curve.
|
||||
p2 (complex): Second handle of curve.
|
||||
p3 (complex): End point of curve.
|
||||
|
||||
Returns:
|
||||
tuple: Three cubic Beziers (each expressed as a tuple of four complex
|
||||
values).
|
||||
"""
|
||||
mid1 = (8 * p0 + 12 * p1 + 6 * p2 + p3) * (1 / 27)
|
||||
deriv1 = (p3 + 3 * p2 - 4 * p0) * (1 / 27)
|
||||
mid2 = (p0 + 6 * p1 + 12 * p2 + 8 * p3) * (1 / 27)
|
||||
deriv2 = (4 * p3 - 3 * p1 - p0) * (1 / 27)
|
||||
return (
|
||||
(p0, (2 * p0 + p1) / 3.0, mid1 - deriv1, mid1),
|
||||
(mid1, mid1 + deriv1, mid2 - deriv2, mid2),
|
||||
(mid2, mid2 + deriv2, (p2 + 2 * p3) / 3.0, p3),
|
||||
)
|
||||
|
||||
|
||||
@cython.cfunc
|
||||
@cython.inline
|
||||
@cython.returns(cython.complex)
|
||||
@cython.locals(
|
||||
t=cython.double,
|
||||
p0=cython.complex,
|
||||
p1=cython.complex,
|
||||
p2=cython.complex,
|
||||
p3=cython.complex,
|
||||
)
|
||||
@cython.locals(_p1=cython.complex, _p2=cython.complex)
|
||||
def cubic_approx_control(t, p0, p1, p2, p3):
|
||||
"""Approximate a cubic Bezier using a quadratic one.
|
||||
|
||||
Args:
|
||||
t (double): Position of control point.
|
||||
p0 (complex): Start point of curve.
|
||||
p1 (complex): First handle of curve.
|
||||
p2 (complex): Second handle of curve.
|
||||
p3 (complex): End point of curve.
|
||||
|
||||
Returns:
|
||||
complex: Location of candidate control point on quadratic curve.
|
||||
"""
|
||||
_p1 = p0 + (p1 - p0) * 1.5
|
||||
_p2 = p3 + (p2 - p3) * 1.5
|
||||
return _p1 + (_p2 - _p1) * t
|
||||
|
||||
|
||||
@cython.cfunc
|
||||
@cython.inline
|
||||
@cython.returns(cython.complex)
|
||||
@cython.locals(a=cython.complex, b=cython.complex, c=cython.complex, d=cython.complex)
|
||||
@cython.locals(ab=cython.complex, cd=cython.complex, p=cython.complex, h=cython.double)
|
||||
def calc_intersect(a, b, c, d):
|
||||
"""Calculate the intersection of two lines.
|
||||
|
||||
Args:
|
||||
a (complex): Start point of first line.
|
||||
b (complex): End point of first line.
|
||||
c (complex): Start point of second line.
|
||||
d (complex): End point of second line.
|
||||
|
||||
Returns:
|
||||
complex: Location of intersection if one present, ``complex(NaN,NaN)``
|
||||
if no intersection was found.
|
||||
"""
|
||||
ab = b - a
|
||||
cd = d - c
|
||||
p = ab * 1j
|
||||
try:
|
||||
h = dot(p, a - c) / dot(p, cd)
|
||||
except ZeroDivisionError:
|
||||
return complex(NAN, NAN)
|
||||
return c + cd * h
|
||||
|
||||
|
||||
@cython.cfunc
|
||||
@cython.returns(cython.int)
|
||||
@cython.locals(
|
||||
tolerance=cython.double,
|
||||
p0=cython.complex,
|
||||
p1=cython.complex,
|
||||
p2=cython.complex,
|
||||
p3=cython.complex,
|
||||
)
|
||||
@cython.locals(mid=cython.complex, deriv3=cython.complex)
|
||||
def cubic_farthest_fit_inside(p0, p1, p2, p3, tolerance):
|
||||
"""Check if a cubic Bezier lies within a given distance of the origin.
|
||||
|
||||
"Origin" means *the* origin (0,0), not the start of the curve. Note that no
|
||||
checks are made on the start and end positions of the curve; this function
|
||||
only checks the inside of the curve.
|
||||
|
||||
Args:
|
||||
p0 (complex): Start point of curve.
|
||||
p1 (complex): First handle of curve.
|
||||
p2 (complex): Second handle of curve.
|
||||
p3 (complex): End point of curve.
|
||||
tolerance (double): Distance from origin.
|
||||
|
||||
Returns:
|
||||
bool: True if the cubic Bezier ``p`` entirely lies within a distance
|
||||
``tolerance`` of the origin, False otherwise.
|
||||
"""
|
||||
# First check p2 then p1, as p2 has higher error early on.
|
||||
if abs(p2) <= tolerance and abs(p1) <= tolerance:
|
||||
return True
|
||||
|
||||
# Split.
|
||||
mid = (p0 + 3 * (p1 + p2) + p3) * 0.125
|
||||
if abs(mid) > tolerance:
|
||||
return False
|
||||
deriv3 = (p3 + p2 - p1 - p0) * 0.125
|
||||
return cubic_farthest_fit_inside(
|
||||
p0, (p0 + p1) * 0.5, mid - deriv3, mid, tolerance
|
||||
) and cubic_farthest_fit_inside(mid, mid + deriv3, (p2 + p3) * 0.5, p3, tolerance)
|
||||
|
||||
|
||||
@cython.cfunc
|
||||
@cython.inline
|
||||
@cython.locals(tolerance=cython.double)
|
||||
@cython.locals(
|
||||
q1=cython.complex,
|
||||
c0=cython.complex,
|
||||
c1=cython.complex,
|
||||
c2=cython.complex,
|
||||
c3=cython.complex,
|
||||
)
|
||||
def cubic_approx_quadratic(cubic, tolerance):
|
||||
"""Approximate a cubic Bezier with a single quadratic within a given tolerance.
|
||||
|
||||
Args:
|
||||
cubic (sequence): Four complex numbers representing control points of
|
||||
the cubic Bezier curve.
|
||||
tolerance (double): Permitted deviation from the original curve.
|
||||
|
||||
Returns:
|
||||
Three complex numbers representing control points of the quadratic
|
||||
curve if it fits within the given tolerance, or ``None`` if no suitable
|
||||
curve could be calculated.
|
||||
"""
|
||||
|
||||
q1 = calc_intersect(cubic[0], cubic[1], cubic[2], cubic[3])
|
||||
if math.isnan(q1.imag):
|
||||
return None
|
||||
c0 = cubic[0]
|
||||
c3 = cubic[3]
|
||||
c1 = c0 + (q1 - c0) * (2 / 3)
|
||||
c2 = c3 + (q1 - c3) * (2 / 3)
|
||||
if not cubic_farthest_fit_inside(0, c1 - cubic[1], c2 - cubic[2], 0, tolerance):
|
||||
return None
|
||||
return c0, q1, c3
|
||||
|
||||
|
||||
@cython.cfunc
|
||||
@cython.locals(n=cython.int, tolerance=cython.double)
|
||||
@cython.locals(i=cython.int)
|
||||
@cython.locals(all_quadratic=cython.int)
|
||||
@cython.locals(
|
||||
c0=cython.complex, c1=cython.complex, c2=cython.complex, c3=cython.complex
|
||||
)
|
||||
@cython.locals(
|
||||
q0=cython.complex,
|
||||
q1=cython.complex,
|
||||
next_q1=cython.complex,
|
||||
q2=cython.complex,
|
||||
d1=cython.complex,
|
||||
)
|
||||
def cubic_approx_spline(cubic, n, tolerance, all_quadratic):
|
||||
"""Approximate a cubic Bezier curve with a spline of n quadratics.
|
||||
|
||||
Args:
|
||||
cubic (sequence): Four complex numbers representing control points of
|
||||
the cubic Bezier curve.
|
||||
n (int): Number of quadratic Bezier curves in the spline.
|
||||
tolerance (double): Permitted deviation from the original curve.
|
||||
|
||||
Returns:
|
||||
A list of ``n+2`` complex numbers, representing control points of the
|
||||
quadratic spline if it fits within the given tolerance, or ``None`` if
|
||||
no suitable spline could be calculated.
|
||||
"""
|
||||
|
||||
if n == 1:
|
||||
return cubic_approx_quadratic(cubic, tolerance)
|
||||
if n == 2 and all_quadratic == False:
|
||||
return cubic
|
||||
|
||||
cubics = split_cubic_into_n_iter(cubic[0], cubic[1], cubic[2], cubic[3], n)
|
||||
|
||||
# calculate the spline of quadratics and check errors at the same time.
|
||||
next_cubic = next(cubics)
|
||||
next_q1 = cubic_approx_control(
|
||||
0, next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3]
|
||||
)
|
||||
q2 = cubic[0]
|
||||
d1 = 0j
|
||||
spline = [cubic[0], next_q1]
|
||||
for i in range(1, n + 1):
|
||||
# Current cubic to convert
|
||||
c0, c1, c2, c3 = next_cubic
|
||||
|
||||
# Current quadratic approximation of current cubic
|
||||
q0 = q2
|
||||
q1 = next_q1
|
||||
if i < n:
|
||||
next_cubic = next(cubics)
|
||||
next_q1 = cubic_approx_control(
|
||||
i / (n - 1), next_cubic[0], next_cubic[1], next_cubic[2], next_cubic[3]
|
||||
)
|
||||
spline.append(next_q1)
|
||||
q2 = (q1 + next_q1) * 0.5
|
||||
else:
|
||||
q2 = c3
|
||||
|
||||
# End-point deltas
|
||||
d0 = d1
|
||||
d1 = q2 - c3
|
||||
|
||||
if abs(d1) > tolerance or not cubic_farthest_fit_inside(
|
||||
d0,
|
||||
q0 + (q1 - q0) * (2 / 3) - c1,
|
||||
q2 + (q1 - q2) * (2 / 3) - c2,
|
||||
d1,
|
||||
tolerance,
|
||||
):
|
||||
return None
|
||||
spline.append(cubic[3])
|
||||
|
||||
return spline
|
||||
|
||||
|
||||
@cython.locals(max_err=cython.double)
|
||||
@cython.locals(n=cython.int)
|
||||
@cython.locals(all_quadratic=cython.int)
|
||||
def curve_to_quadratic(curve, max_err, all_quadratic=True):
|
||||
"""Approximate a cubic Bezier curve with a spline of n quadratics.
|
||||
|
||||
Args:
|
||||
cubic (sequence): Four 2D tuples representing control points of
|
||||
the cubic Bezier curve.
|
||||
max_err (double): Permitted deviation from the original curve.
|
||||
all_quadratic (bool): If True (default) returned value is a
|
||||
quadratic spline. If False, it's either a single quadratic
|
||||
curve or a single cubic curve.
|
||||
|
||||
Returns:
|
||||
If all_quadratic is True: A list of 2D tuples, representing
|
||||
control points of the quadratic spline if it fits within the
|
||||
given tolerance, or ``None`` if no suitable spline could be
|
||||
calculated.
|
||||
|
||||
If all_quadratic is False: Either a quadratic curve (if length
|
||||
of output is 3), or a cubic curve (if length of output is 4).
|
||||
"""
|
||||
|
||||
curve = [complex(*p) for p in curve]
|
||||
|
||||
for n in range(1, MAX_N + 1):
|
||||
spline = cubic_approx_spline(curve, n, max_err, all_quadratic)
|
||||
if spline is not None:
|
||||
# done. go home
|
||||
return [(s.real, s.imag) for s in spline]
|
||||
|
||||
raise ApproxNotFoundError(curve)
|
||||
|
||||
|
||||
@cython.locals(l=cython.int, last_i=cython.int, i=cython.int)
|
||||
@cython.locals(all_quadratic=cython.int)
|
||||
def curves_to_quadratic(curves, max_errors, all_quadratic=True):
|
||||
"""Return quadratic Bezier splines approximating the input cubic Beziers.
|
||||
|
||||
Args:
|
||||
curves: A sequence of *n* curves, each curve being a sequence of four
|
||||
2D tuples.
|
||||
max_errors: A sequence of *n* floats representing the maximum permissible
|
||||
deviation from each of the cubic Bezier curves.
|
||||
all_quadratic (bool): If True (default) returned values are a
|
||||
quadratic spline. If False, they are either a single quadratic
|
||||
curve or a single cubic curve.
|
||||
|
||||
Example::
|
||||
|
||||
>>> curves_to_quadratic( [
|
||||
... [ (50,50), (100,100), (150,100), (200,50) ],
|
||||
... [ (75,50), (120,100), (150,75), (200,60) ]
|
||||
... ], [1,1] )
|
||||
[[(50.0, 50.0), (75.0, 75.0), (125.0, 91.66666666666666), (175.0, 75.0), (200.0, 50.0)], [(75.0, 50.0), (97.5, 75.0), (135.41666666666666, 82.08333333333333), (175.0, 67.5), (200.0, 60.0)]]
|
||||
|
||||
The returned splines have "implied oncurve points" suitable for use in
|
||||
TrueType ``glif`` outlines - i.e. in the first spline returned above,
|
||||
the first quadratic segment runs from (50,50) to
|
||||
( (75 + 125)/2 , (120 + 91.666..)/2 ) = (100, 83.333...).
|
||||
|
||||
Returns:
|
||||
If all_quadratic is True, a list of splines, each spline being a list
|
||||
of 2D tuples.
|
||||
|
||||
If all_quadratic is False, a list of curves, each curve being a quadratic
|
||||
(length 3), or cubic (length 4).
|
||||
|
||||
Raises:
|
||||
fontTools.cu2qu.Errors.ApproxNotFoundError: if no suitable approximation
|
||||
can be found for all curves with the given parameters.
|
||||
"""
|
||||
|
||||
curves = [[complex(*p) for p in curve] for curve in curves]
|
||||
assert len(max_errors) == len(curves)
|
||||
|
||||
l = len(curves)
|
||||
splines = [None] * l
|
||||
last_i = i = 0
|
||||
n = 1
|
||||
while True:
|
||||
spline = cubic_approx_spline(curves[i], n, max_errors[i], all_quadratic)
|
||||
if spline is None:
|
||||
if n == MAX_N:
|
||||
break
|
||||
n += 1
|
||||
last_i = i
|
||||
continue
|
||||
splines[i] = spline
|
||||
i = (i + 1) % l
|
||||
if i == last_i:
|
||||
# done. go home
|
||||
return [[(s.real, s.imag) for s in spline] for spline in splines]
|
||||
|
||||
raise ApproxNotFoundError(curves)
|
||||
77
venv/lib/python3.12/site-packages/fontTools/cu2qu/errors.py
Normal file
77
venv/lib/python3.12/site-packages/fontTools/cu2qu/errors.py
Normal file
@ -0,0 +1,77 @@
|
||||
# Copyright 2016 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
"""Base Cu2Qu exception class for all other errors."""
|
||||
|
||||
|
||||
class ApproxNotFoundError(Error):
|
||||
def __init__(self, curve):
|
||||
message = "no approximation found: %s" % curve
|
||||
super().__init__(message)
|
||||
self.curve = curve
|
||||
|
||||
|
||||
class UnequalZipLengthsError(Error):
|
||||
pass
|
||||
|
||||
|
||||
class IncompatibleGlyphsError(Error):
|
||||
def __init__(self, glyphs):
|
||||
assert len(glyphs) > 1
|
||||
self.glyphs = glyphs
|
||||
names = set(repr(g.name) for g in glyphs)
|
||||
if len(names) > 1:
|
||||
self.combined_name = "{%s}" % ", ".join(sorted(names))
|
||||
else:
|
||||
self.combined_name = names.pop()
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %s>" % (type(self).__name__, self.combined_name)
|
||||
|
||||
|
||||
class IncompatibleSegmentNumberError(IncompatibleGlyphsError):
|
||||
def __str__(self):
|
||||
return "Glyphs named %s have different number of segments" % (
|
||||
self.combined_name
|
||||
)
|
||||
|
||||
|
||||
class IncompatibleSegmentTypesError(IncompatibleGlyphsError):
|
||||
def __init__(self, glyphs, segments):
|
||||
IncompatibleGlyphsError.__init__(self, glyphs)
|
||||
self.segments = segments
|
||||
|
||||
def __str__(self):
|
||||
lines = []
|
||||
ndigits = len(str(max(self.segments)))
|
||||
for i, tags in sorted(self.segments.items()):
|
||||
lines.append(
|
||||
"%s: (%s)" % (str(i).rjust(ndigits), ", ".join(repr(t) for t in tags))
|
||||
)
|
||||
return "Glyphs named %s have incompatible segment types:\n %s" % (
|
||||
self.combined_name,
|
||||
"\n ".join(lines),
|
||||
)
|
||||
|
||||
|
||||
class IncompatibleFontsError(Error):
|
||||
def __init__(self, glyph_errors):
|
||||
self.glyph_errors = glyph_errors
|
||||
|
||||
def __str__(self):
|
||||
return "fonts contains incompatible glyphs: %s" % (
|
||||
", ".join(repr(g) for g in sorted(self.glyph_errors.keys()))
|
||||
)
|
||||
349
venv/lib/python3.12/site-packages/fontTools/cu2qu/ufo.py
Normal file
349
venv/lib/python3.12/site-packages/fontTools/cu2qu/ufo.py
Normal file
@ -0,0 +1,349 @@
|
||||
# Copyright 2015 Google Inc. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
"""Converts cubic bezier curves to quadratic splines.
|
||||
|
||||
Conversion is performed such that the quadratic splines keep the same end-curve
|
||||
tangents as the original cubics. The approach is iterative, increasing the
|
||||
number of segments for a spline until the error gets below a bound.
|
||||
|
||||
Respective curves from multiple fonts will be converted at once to ensure that
|
||||
the resulting splines are interpolation-compatible.
|
||||
"""
|
||||
|
||||
import logging
|
||||
from fontTools.pens.basePen import AbstractPen
|
||||
from fontTools.pens.pointPen import PointToSegmentPen
|
||||
from fontTools.pens.reverseContourPen import ReverseContourPen
|
||||
|
||||
from . import curves_to_quadratic
|
||||
from .errors import (
|
||||
UnequalZipLengthsError,
|
||||
IncompatibleSegmentNumberError,
|
||||
IncompatibleSegmentTypesError,
|
||||
IncompatibleGlyphsError,
|
||||
IncompatibleFontsError,
|
||||
)
|
||||
|
||||
|
||||
__all__ = ["fonts_to_quadratic", "font_to_quadratic"]
|
||||
|
||||
# The default approximation error below is a relative value (1/1000 of the EM square).
|
||||
# Later on, we convert it to absolute font units by multiplying it by a font's UPEM
|
||||
# (see fonts_to_quadratic).
|
||||
DEFAULT_MAX_ERR = 0.001
|
||||
CURVE_TYPE_LIB_KEY = "com.github.googlei18n.cu2qu.curve_type"
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
_zip = zip
|
||||
|
||||
|
||||
def zip(*args):
|
||||
"""Ensure each argument to zip has the same length. Also make sure a list is
|
||||
returned for python 2/3 compatibility.
|
||||
"""
|
||||
|
||||
if len(set(len(a) for a in args)) != 1:
|
||||
raise UnequalZipLengthsError(*args)
|
||||
return list(_zip(*args))
|
||||
|
||||
|
||||
class GetSegmentsPen(AbstractPen):
|
||||
"""Pen to collect segments into lists of points for conversion.
|
||||
|
||||
Curves always include their initial on-curve point, so some points are
|
||||
duplicated between segments.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
self._last_pt = None
|
||||
self.segments = []
|
||||
|
||||
def _add_segment(self, tag, *args):
|
||||
if tag in ["move", "line", "qcurve", "curve"]:
|
||||
self._last_pt = args[-1]
|
||||
self.segments.append((tag, args))
|
||||
|
||||
def moveTo(self, pt):
|
||||
self._add_segment("move", pt)
|
||||
|
||||
def lineTo(self, pt):
|
||||
self._add_segment("line", pt)
|
||||
|
||||
def qCurveTo(self, *points):
|
||||
self._add_segment("qcurve", self._last_pt, *points)
|
||||
|
||||
def curveTo(self, *points):
|
||||
self._add_segment("curve", self._last_pt, *points)
|
||||
|
||||
def closePath(self):
|
||||
self._add_segment("close")
|
||||
|
||||
def endPath(self):
|
||||
self._add_segment("end")
|
||||
|
||||
def addComponent(self, glyphName, transformation):
|
||||
pass
|
||||
|
||||
|
||||
def _get_segments(glyph):
|
||||
"""Get a glyph's segments as extracted by GetSegmentsPen."""
|
||||
|
||||
pen = GetSegmentsPen()
|
||||
# glyph.draw(pen)
|
||||
# We can't simply draw the glyph with the pen, but we must initialize the
|
||||
# PointToSegmentPen explicitly with outputImpliedClosingLine=True.
|
||||
# By default PointToSegmentPen does not outputImpliedClosingLine -- unless
|
||||
# last and first point on closed contour are duplicated. Because we are
|
||||
# converting multiple glyphs at the same time, we want to make sure
|
||||
# this function returns the same number of segments, whether or not
|
||||
# the last and first point overlap.
|
||||
# https://github.com/googlefonts/fontmake/issues/572
|
||||
# https://github.com/fonttools/fonttools/pull/1720
|
||||
pointPen = PointToSegmentPen(pen, outputImpliedClosingLine=True)
|
||||
glyph.drawPoints(pointPen)
|
||||
return pen.segments
|
||||
|
||||
|
||||
def _set_segments(glyph, segments, reverse_direction):
|
||||
"""Draw segments as extracted by GetSegmentsPen back to a glyph."""
|
||||
|
||||
glyph.clearContours()
|
||||
pen = glyph.getPen()
|
||||
if reverse_direction:
|
||||
pen = ReverseContourPen(pen)
|
||||
for tag, args in segments:
|
||||
if tag == "move":
|
||||
pen.moveTo(*args)
|
||||
elif tag == "line":
|
||||
pen.lineTo(*args)
|
||||
elif tag == "curve":
|
||||
pen.curveTo(*args[1:])
|
||||
elif tag == "qcurve":
|
||||
pen.qCurveTo(*args[1:])
|
||||
elif tag == "close":
|
||||
pen.closePath()
|
||||
elif tag == "end":
|
||||
pen.endPath()
|
||||
else:
|
||||
raise AssertionError('Unhandled segment type "%s"' % tag)
|
||||
|
||||
|
||||
def _segments_to_quadratic(segments, max_err, stats, all_quadratic=True):
|
||||
"""Return quadratic approximations of cubic segments."""
|
||||
|
||||
assert all(s[0] == "curve" for s in segments), "Non-cubic given to convert"
|
||||
|
||||
new_points = curves_to_quadratic([s[1] for s in segments], max_err, all_quadratic)
|
||||
n = len(new_points[0])
|
||||
assert all(len(s) == n for s in new_points[1:]), "Converted incompatibly"
|
||||
|
||||
spline_length = str(n - 2)
|
||||
stats[spline_length] = stats.get(spline_length, 0) + 1
|
||||
|
||||
if all_quadratic or n == 3:
|
||||
return [("qcurve", p) for p in new_points]
|
||||
else:
|
||||
return [("curve", p) for p in new_points]
|
||||
|
||||
|
||||
def _glyphs_to_quadratic(glyphs, max_err, reverse_direction, stats, all_quadratic=True):
|
||||
"""Do the actual conversion of a set of compatible glyphs, after arguments
|
||||
have been set up.
|
||||
|
||||
Return True if the glyphs were modified, else return False.
|
||||
"""
|
||||
|
||||
try:
|
||||
segments_by_location = zip(*[_get_segments(g) for g in glyphs])
|
||||
except UnequalZipLengthsError:
|
||||
raise IncompatibleSegmentNumberError(glyphs)
|
||||
if not any(segments_by_location):
|
||||
return False
|
||||
|
||||
# always modify input glyphs if reverse_direction is True
|
||||
glyphs_modified = reverse_direction
|
||||
|
||||
new_segments_by_location = []
|
||||
incompatible = {}
|
||||
for i, segments in enumerate(segments_by_location):
|
||||
tag = segments[0][0]
|
||||
if not all(s[0] == tag for s in segments[1:]):
|
||||
incompatible[i] = [s[0] for s in segments]
|
||||
elif tag == "curve":
|
||||
new_segments = _segments_to_quadratic(
|
||||
segments, max_err, stats, all_quadratic
|
||||
)
|
||||
if all_quadratic or new_segments != segments:
|
||||
glyphs_modified = True
|
||||
segments = new_segments
|
||||
new_segments_by_location.append(segments)
|
||||
|
||||
if glyphs_modified:
|
||||
new_segments_by_glyph = zip(*new_segments_by_location)
|
||||
for glyph, new_segments in zip(glyphs, new_segments_by_glyph):
|
||||
_set_segments(glyph, new_segments, reverse_direction)
|
||||
|
||||
if incompatible:
|
||||
raise IncompatibleSegmentTypesError(glyphs, segments=incompatible)
|
||||
return glyphs_modified
|
||||
|
||||
|
||||
def glyphs_to_quadratic(
|
||||
glyphs, max_err=None, reverse_direction=False, stats=None, all_quadratic=True
|
||||
):
|
||||
"""Convert the curves of a set of compatible of glyphs to quadratic.
|
||||
|
||||
All curves will be converted to quadratic at once, ensuring interpolation
|
||||
compatibility. If this is not required, calling glyphs_to_quadratic with one
|
||||
glyph at a time may yield slightly more optimized results.
|
||||
|
||||
Return True if glyphs were modified, else return False.
|
||||
|
||||
Raises IncompatibleGlyphsError if glyphs have non-interpolatable outlines.
|
||||
"""
|
||||
if stats is None:
|
||||
stats = {}
|
||||
|
||||
if not max_err:
|
||||
# assume 1000 is the default UPEM
|
||||
max_err = DEFAULT_MAX_ERR * 1000
|
||||
|
||||
if isinstance(max_err, (list, tuple)):
|
||||
max_errors = max_err
|
||||
else:
|
||||
max_errors = [max_err] * len(glyphs)
|
||||
assert len(max_errors) == len(glyphs)
|
||||
|
||||
return _glyphs_to_quadratic(
|
||||
glyphs, max_errors, reverse_direction, stats, all_quadratic
|
||||
)
|
||||
|
||||
|
||||
def fonts_to_quadratic(
|
||||
fonts,
|
||||
max_err_em=None,
|
||||
max_err=None,
|
||||
reverse_direction=False,
|
||||
stats=None,
|
||||
dump_stats=False,
|
||||
remember_curve_type=True,
|
||||
all_quadratic=True,
|
||||
):
|
||||
"""Convert the curves of a collection of fonts to quadratic.
|
||||
|
||||
All curves will be converted to quadratic at once, ensuring interpolation
|
||||
compatibility. If this is not required, calling fonts_to_quadratic with one
|
||||
font at a time may yield slightly more optimized results.
|
||||
|
||||
Return the set of modified glyph names if any, else return an empty set.
|
||||
|
||||
By default, cu2qu stores the curve type in the fonts' lib, under a private
|
||||
key "com.github.googlei18n.cu2qu.curve_type", and will not try to convert
|
||||
them again if the curve type is already set to "quadratic".
|
||||
Setting 'remember_curve_type' to False disables this optimization.
|
||||
|
||||
Raises IncompatibleFontsError if same-named glyphs from different fonts
|
||||
have non-interpolatable outlines.
|
||||
"""
|
||||
|
||||
if remember_curve_type:
|
||||
curve_types = {f.lib.get(CURVE_TYPE_LIB_KEY, "cubic") for f in fonts}
|
||||
if len(curve_types) == 1:
|
||||
curve_type = next(iter(curve_types))
|
||||
if curve_type in ("quadratic", "mixed"):
|
||||
logger.info("Curves already converted to quadratic")
|
||||
return False
|
||||
elif curve_type == "cubic":
|
||||
pass # keep converting
|
||||
else:
|
||||
raise NotImplementedError(curve_type)
|
||||
elif len(curve_types) > 1:
|
||||
# going to crash later if they do differ
|
||||
logger.warning("fonts may contain different curve types")
|
||||
|
||||
if stats is None:
|
||||
stats = {}
|
||||
|
||||
if max_err_em and max_err:
|
||||
raise TypeError("Only one of max_err and max_err_em can be specified.")
|
||||
if not (max_err_em or max_err):
|
||||
max_err_em = DEFAULT_MAX_ERR
|
||||
|
||||
if isinstance(max_err, (list, tuple)):
|
||||
assert len(max_err) == len(fonts)
|
||||
max_errors = max_err
|
||||
elif max_err:
|
||||
max_errors = [max_err] * len(fonts)
|
||||
|
||||
if isinstance(max_err_em, (list, tuple)):
|
||||
assert len(fonts) == len(max_err_em)
|
||||
max_errors = [f.info.unitsPerEm * e for f, e in zip(fonts, max_err_em)]
|
||||
elif max_err_em:
|
||||
max_errors = [f.info.unitsPerEm * max_err_em for f in fonts]
|
||||
|
||||
modified = set()
|
||||
glyph_errors = {}
|
||||
for name in set().union(*(f.keys() for f in fonts)):
|
||||
glyphs = []
|
||||
cur_max_errors = []
|
||||
for font, error in zip(fonts, max_errors):
|
||||
if name in font:
|
||||
glyphs.append(font[name])
|
||||
cur_max_errors.append(error)
|
||||
try:
|
||||
if _glyphs_to_quadratic(
|
||||
glyphs, cur_max_errors, reverse_direction, stats, all_quadratic
|
||||
):
|
||||
modified.add(name)
|
||||
except IncompatibleGlyphsError as exc:
|
||||
logger.error(exc)
|
||||
glyph_errors[name] = exc
|
||||
|
||||
if glyph_errors:
|
||||
raise IncompatibleFontsError(glyph_errors)
|
||||
|
||||
if modified and dump_stats:
|
||||
spline_lengths = sorted(stats.keys())
|
||||
logger.info(
|
||||
"New spline lengths: %s"
|
||||
% (", ".join("%s: %d" % (l, stats[l]) for l in spline_lengths))
|
||||
)
|
||||
|
||||
if remember_curve_type:
|
||||
for font in fonts:
|
||||
curve_type = font.lib.get(CURVE_TYPE_LIB_KEY, "cubic")
|
||||
new_curve_type = "quadratic" if all_quadratic else "mixed"
|
||||
if curve_type != new_curve_type:
|
||||
font.lib[CURVE_TYPE_LIB_KEY] = new_curve_type
|
||||
return modified
|
||||
|
||||
|
||||
def glyph_to_quadratic(glyph, **kwargs):
|
||||
"""Convenience wrapper around glyphs_to_quadratic, for just one glyph.
|
||||
Return True if the glyph was modified, else return False.
|
||||
"""
|
||||
|
||||
return glyphs_to_quadratic([glyph], **kwargs)
|
||||
|
||||
|
||||
def font_to_quadratic(font, **kwargs):
|
||||
"""Convenience wrapper around fonts_to_quadratic, for just one font.
|
||||
Return the set of modified glyph names if any, else return empty set.
|
||||
"""
|
||||
|
||||
return fonts_to_quadratic([font], **kwargs)
|
||||
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,6 @@
|
||||
import sys
|
||||
from fontTools.designspaceLib import main
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@ -0,0 +1,475 @@
|
||||
"""Allows building all the variable fonts of a DesignSpace version 5 by
|
||||
splitting the document into interpolable sub-space, then into each VF.
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import itertools
|
||||
import logging
|
||||
import math
|
||||
from typing import Any, Callable, Dict, Iterator, List, Tuple, cast
|
||||
|
||||
from fontTools.designspaceLib import (
|
||||
AxisDescriptor,
|
||||
AxisMappingDescriptor,
|
||||
DesignSpaceDocument,
|
||||
DiscreteAxisDescriptor,
|
||||
InstanceDescriptor,
|
||||
RuleDescriptor,
|
||||
SimpleLocationDict,
|
||||
SourceDescriptor,
|
||||
VariableFontDescriptor,
|
||||
)
|
||||
from fontTools.designspaceLib.statNames import StatNames, getStatNames
|
||||
from fontTools.designspaceLib.types import (
|
||||
ConditionSet,
|
||||
Range,
|
||||
Region,
|
||||
getVFUserRegion,
|
||||
locationInRegion,
|
||||
regionInRegion,
|
||||
userRegionToDesignRegion,
|
||||
)
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
MakeInstanceFilenameCallable = Callable[
|
||||
[DesignSpaceDocument, InstanceDescriptor, StatNames], str
|
||||
]
|
||||
|
||||
|
||||
def defaultMakeInstanceFilename(
|
||||
doc: DesignSpaceDocument, instance: InstanceDescriptor, statNames: StatNames
|
||||
) -> str:
|
||||
"""Default callable to synthesize an instance filename
|
||||
when makeNames=True, for instances that don't specify an instance name
|
||||
in the designspace. This part of the name generation can be overriden
|
||||
because it's not specified by the STAT table.
|
||||
"""
|
||||
familyName = instance.familyName or statNames.familyNames.get("en")
|
||||
styleName = instance.styleName or statNames.styleNames.get("en")
|
||||
return f"{familyName}-{styleName}.ttf"
|
||||
|
||||
|
||||
def splitInterpolable(
|
||||
doc: DesignSpaceDocument,
|
||||
makeNames: bool = True,
|
||||
expandLocations: bool = True,
|
||||
makeInstanceFilename: MakeInstanceFilenameCallable = defaultMakeInstanceFilename,
|
||||
) -> Iterator[Tuple[SimpleLocationDict, DesignSpaceDocument]]:
|
||||
"""Split the given DS5 into several interpolable sub-designspaces.
|
||||
There are as many interpolable sub-spaces as there are combinations of
|
||||
discrete axis values.
|
||||
|
||||
E.g. with axes:
|
||||
- italic (discrete) Upright or Italic
|
||||
- style (discrete) Sans or Serif
|
||||
- weight (continuous) 100 to 900
|
||||
|
||||
There are 4 sub-spaces in which the Weight axis should interpolate:
|
||||
(Upright, Sans), (Upright, Serif), (Italic, Sans) and (Italic, Serif).
|
||||
|
||||
The sub-designspaces still include the full axis definitions and STAT data,
|
||||
but the rules, sources, variable fonts, instances are trimmed down to only
|
||||
keep what falls within the interpolable sub-space.
|
||||
|
||||
Args:
|
||||
- ``makeNames``: Whether to compute the instance family and style
|
||||
names using the STAT data.
|
||||
- ``expandLocations``: Whether to turn all locations into "full"
|
||||
locations, including implicit default axis values where missing.
|
||||
- ``makeInstanceFilename``: Callable to synthesize an instance filename
|
||||
when makeNames=True, for instances that don't specify an instance name
|
||||
in the designspace. This part of the name generation can be overridden
|
||||
because it's not specified by the STAT table.
|
||||
|
||||
.. versionadded:: 5.0
|
||||
"""
|
||||
discreteAxes = []
|
||||
interpolableUserRegion: Region = {}
|
||||
for axis in doc.axes:
|
||||
if hasattr(axis, "values"):
|
||||
# Mypy doesn't support narrowing union types via hasattr()
|
||||
# TODO(Python 3.10): use TypeGuard
|
||||
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
|
||||
axis = cast(DiscreteAxisDescriptor, axis)
|
||||
discreteAxes.append(axis)
|
||||
else:
|
||||
axis = cast(AxisDescriptor, axis)
|
||||
interpolableUserRegion[axis.name] = Range(
|
||||
axis.minimum,
|
||||
axis.maximum,
|
||||
axis.default,
|
||||
)
|
||||
valueCombinations = itertools.product(*[axis.values for axis in discreteAxes])
|
||||
for values in valueCombinations:
|
||||
discreteUserLocation = {
|
||||
discreteAxis.name: value
|
||||
for discreteAxis, value in zip(discreteAxes, values)
|
||||
}
|
||||
subDoc = _extractSubSpace(
|
||||
doc,
|
||||
{**interpolableUserRegion, **discreteUserLocation},
|
||||
keepVFs=True,
|
||||
makeNames=makeNames,
|
||||
expandLocations=expandLocations,
|
||||
makeInstanceFilename=makeInstanceFilename,
|
||||
)
|
||||
yield discreteUserLocation, subDoc
|
||||
|
||||
|
||||
def splitVariableFonts(
|
||||
doc: DesignSpaceDocument,
|
||||
makeNames: bool = False,
|
||||
expandLocations: bool = False,
|
||||
makeInstanceFilename: MakeInstanceFilenameCallable = defaultMakeInstanceFilename,
|
||||
) -> Iterator[Tuple[str, DesignSpaceDocument]]:
|
||||
"""Convert each variable font listed in this document into a standalone
|
||||
designspace. This can be used to compile all the variable fonts from a
|
||||
format 5 designspace using tools that can only deal with 1 VF at a time.
|
||||
|
||||
Args:
|
||||
- ``makeNames``: Whether to compute the instance family and style
|
||||
names using the STAT data.
|
||||
- ``expandLocations``: Whether to turn all locations into "full"
|
||||
locations, including implicit default axis values where missing.
|
||||
- ``makeInstanceFilename``: Callable to synthesize an instance filename
|
||||
when makeNames=True, for instances that don't specify an instance name
|
||||
in the designspace. This part of the name generation can be overridden
|
||||
because it's not specified by the STAT table.
|
||||
|
||||
.. versionadded:: 5.0
|
||||
"""
|
||||
# Make one DesignspaceDoc v5 for each variable font
|
||||
for vf in doc.getVariableFonts():
|
||||
vfUserRegion = getVFUserRegion(doc, vf)
|
||||
vfDoc = _extractSubSpace(
|
||||
doc,
|
||||
vfUserRegion,
|
||||
keepVFs=False,
|
||||
makeNames=makeNames,
|
||||
expandLocations=expandLocations,
|
||||
makeInstanceFilename=makeInstanceFilename,
|
||||
)
|
||||
vfDoc.lib = {**vfDoc.lib, **vf.lib}
|
||||
yield vf.name, vfDoc
|
||||
|
||||
|
||||
def convert5to4(
|
||||
doc: DesignSpaceDocument,
|
||||
) -> Dict[str, DesignSpaceDocument]:
|
||||
"""Convert each variable font listed in this document into a standalone
|
||||
format 4 designspace. This can be used to compile all the variable fonts
|
||||
from a format 5 designspace using tools that only know about format 4.
|
||||
|
||||
.. versionadded:: 5.0
|
||||
"""
|
||||
vfs = {}
|
||||
for _location, subDoc in splitInterpolable(doc):
|
||||
for vfName, vfDoc in splitVariableFonts(subDoc):
|
||||
vfDoc.formatVersion = "4.1"
|
||||
vfs[vfName] = vfDoc
|
||||
return vfs
|
||||
|
||||
|
||||
def _extractSubSpace(
|
||||
doc: DesignSpaceDocument,
|
||||
userRegion: Region,
|
||||
*,
|
||||
keepVFs: bool,
|
||||
makeNames: bool,
|
||||
expandLocations: bool,
|
||||
makeInstanceFilename: MakeInstanceFilenameCallable,
|
||||
) -> DesignSpaceDocument:
|
||||
subDoc = DesignSpaceDocument()
|
||||
# Don't include STAT info
|
||||
# FIXME: (Jany) let's think about it. Not include = OK because the point of
|
||||
# the splitting is to build VFs and we'll use the STAT data of the full
|
||||
# document to generate the STAT of the VFs, so "no need" to have STAT data
|
||||
# in sub-docs. Counterpoint: what if someone wants to split this DS for
|
||||
# other purposes? Maybe for that it would be useful to also subset the STAT
|
||||
# data?
|
||||
# subDoc.elidedFallbackName = doc.elidedFallbackName
|
||||
|
||||
def maybeExpandDesignLocation(object):
|
||||
if expandLocations:
|
||||
return object.getFullDesignLocation(doc)
|
||||
else:
|
||||
return object.designLocation
|
||||
|
||||
for axis in doc.axes:
|
||||
range = userRegion[axis.name]
|
||||
if isinstance(range, Range) and hasattr(axis, "minimum"):
|
||||
# Mypy doesn't support narrowing union types via hasattr()
|
||||
# TODO(Python 3.10): use TypeGuard
|
||||
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
|
||||
axis = cast(AxisDescriptor, axis)
|
||||
subDoc.addAxis(
|
||||
AxisDescriptor(
|
||||
# Same info
|
||||
tag=axis.tag,
|
||||
name=axis.name,
|
||||
labelNames=axis.labelNames,
|
||||
hidden=axis.hidden,
|
||||
# Subset range
|
||||
minimum=max(range.minimum, axis.minimum),
|
||||
default=range.default or axis.default,
|
||||
maximum=min(range.maximum, axis.maximum),
|
||||
map=[
|
||||
(user, design)
|
||||
for user, design in axis.map
|
||||
if range.minimum <= user <= range.maximum
|
||||
],
|
||||
# Don't include STAT info
|
||||
axisOrdering=None,
|
||||
axisLabels=None,
|
||||
)
|
||||
)
|
||||
|
||||
subDoc.axisMappings = mappings = []
|
||||
subDocAxes = {axis.name for axis in subDoc.axes}
|
||||
for mapping in doc.axisMappings:
|
||||
if not all(axis in subDocAxes for axis in mapping.inputLocation.keys()):
|
||||
continue
|
||||
if not all(axis in subDocAxes for axis in mapping.outputLocation.keys()):
|
||||
LOGGER.error(
|
||||
"In axis mapping from input %s, some output axes are not in the variable-font: %s",
|
||||
mapping.inputLocation,
|
||||
mapping.outputLocation,
|
||||
)
|
||||
continue
|
||||
|
||||
mappingAxes = set()
|
||||
mappingAxes.update(mapping.inputLocation.keys())
|
||||
mappingAxes.update(mapping.outputLocation.keys())
|
||||
for axis in doc.axes:
|
||||
if axis.name not in mappingAxes:
|
||||
continue
|
||||
range = userRegion[axis.name]
|
||||
if (
|
||||
range.minimum != axis.minimum
|
||||
or (range.default is not None and range.default != axis.default)
|
||||
or range.maximum != axis.maximum
|
||||
):
|
||||
LOGGER.error(
|
||||
"Limiting axis ranges used in <mapping> elements not supported: %s",
|
||||
axis.name,
|
||||
)
|
||||
continue
|
||||
|
||||
mappings.append(
|
||||
AxisMappingDescriptor(
|
||||
inputLocation=mapping.inputLocation,
|
||||
outputLocation=mapping.outputLocation,
|
||||
)
|
||||
)
|
||||
|
||||
# Don't include STAT info
|
||||
# subDoc.locationLabels = doc.locationLabels
|
||||
|
||||
# Rules: subset them based on conditions
|
||||
designRegion = userRegionToDesignRegion(doc, userRegion)
|
||||
subDoc.rules = _subsetRulesBasedOnConditions(doc.rules, designRegion)
|
||||
subDoc.rulesProcessingLast = doc.rulesProcessingLast
|
||||
|
||||
# Sources: keep only the ones that fall within the kept axis ranges
|
||||
for source in doc.sources:
|
||||
if not locationInRegion(doc.map_backward(source.designLocation), userRegion):
|
||||
continue
|
||||
|
||||
subDoc.addSource(
|
||||
SourceDescriptor(
|
||||
filename=source.filename,
|
||||
path=source.path,
|
||||
font=source.font,
|
||||
name=source.name,
|
||||
designLocation=_filterLocation(
|
||||
userRegion, maybeExpandDesignLocation(source)
|
||||
),
|
||||
layerName=source.layerName,
|
||||
familyName=source.familyName,
|
||||
styleName=source.styleName,
|
||||
muteKerning=source.muteKerning,
|
||||
muteInfo=source.muteInfo,
|
||||
mutedGlyphNames=source.mutedGlyphNames,
|
||||
)
|
||||
)
|
||||
|
||||
# Copy family name translations from the old default source to the new default
|
||||
vfDefault = subDoc.findDefault()
|
||||
oldDefault = doc.findDefault()
|
||||
if vfDefault is not None and oldDefault is not None:
|
||||
vfDefault.localisedFamilyName = oldDefault.localisedFamilyName
|
||||
|
||||
# Variable fonts: keep only the ones that fall within the kept axis ranges
|
||||
if keepVFs:
|
||||
# Note: call getVariableFont() to make the implicit VFs explicit
|
||||
for vf in doc.getVariableFonts():
|
||||
vfUserRegion = getVFUserRegion(doc, vf)
|
||||
if regionInRegion(vfUserRegion, userRegion):
|
||||
subDoc.addVariableFont(
|
||||
VariableFontDescriptor(
|
||||
name=vf.name,
|
||||
filename=vf.filename,
|
||||
axisSubsets=[
|
||||
axisSubset
|
||||
for axisSubset in vf.axisSubsets
|
||||
if isinstance(userRegion[axisSubset.name], Range)
|
||||
],
|
||||
lib=vf.lib,
|
||||
)
|
||||
)
|
||||
|
||||
# Instances: same as Sources + compute missing names
|
||||
for instance in doc.instances:
|
||||
if not locationInRegion(instance.getFullUserLocation(doc), userRegion):
|
||||
continue
|
||||
|
||||
if makeNames:
|
||||
statNames = getStatNames(doc, instance.getFullUserLocation(doc))
|
||||
familyName = instance.familyName or statNames.familyNames.get("en")
|
||||
styleName = instance.styleName or statNames.styleNames.get("en")
|
||||
subDoc.addInstance(
|
||||
InstanceDescriptor(
|
||||
filename=instance.filename
|
||||
or makeInstanceFilename(doc, instance, statNames),
|
||||
path=instance.path,
|
||||
font=instance.font,
|
||||
name=instance.name or f"{familyName} {styleName}",
|
||||
userLocation={} if expandLocations else instance.userLocation,
|
||||
designLocation=_filterLocation(
|
||||
userRegion, maybeExpandDesignLocation(instance)
|
||||
),
|
||||
familyName=familyName,
|
||||
styleName=styleName,
|
||||
postScriptFontName=instance.postScriptFontName
|
||||
or statNames.postScriptFontName,
|
||||
styleMapFamilyName=instance.styleMapFamilyName
|
||||
or statNames.styleMapFamilyNames.get("en"),
|
||||
styleMapStyleName=instance.styleMapStyleName
|
||||
or statNames.styleMapStyleName,
|
||||
localisedFamilyName=instance.localisedFamilyName
|
||||
or statNames.familyNames,
|
||||
localisedStyleName=instance.localisedStyleName
|
||||
or statNames.styleNames,
|
||||
localisedStyleMapFamilyName=instance.localisedStyleMapFamilyName
|
||||
or statNames.styleMapFamilyNames,
|
||||
localisedStyleMapStyleName=instance.localisedStyleMapStyleName
|
||||
or {},
|
||||
lib=instance.lib,
|
||||
)
|
||||
)
|
||||
else:
|
||||
subDoc.addInstance(
|
||||
InstanceDescriptor(
|
||||
filename=instance.filename,
|
||||
path=instance.path,
|
||||
font=instance.font,
|
||||
name=instance.name,
|
||||
userLocation={} if expandLocations else instance.userLocation,
|
||||
designLocation=_filterLocation(
|
||||
userRegion, maybeExpandDesignLocation(instance)
|
||||
),
|
||||
familyName=instance.familyName,
|
||||
styleName=instance.styleName,
|
||||
postScriptFontName=instance.postScriptFontName,
|
||||
styleMapFamilyName=instance.styleMapFamilyName,
|
||||
styleMapStyleName=instance.styleMapStyleName,
|
||||
localisedFamilyName=instance.localisedFamilyName,
|
||||
localisedStyleName=instance.localisedStyleName,
|
||||
localisedStyleMapFamilyName=instance.localisedStyleMapFamilyName,
|
||||
localisedStyleMapStyleName=instance.localisedStyleMapStyleName,
|
||||
lib=instance.lib,
|
||||
)
|
||||
)
|
||||
|
||||
subDoc.lib = doc.lib
|
||||
|
||||
return subDoc
|
||||
|
||||
|
||||
def _conditionSetFrom(conditionSet: List[Dict[str, Any]]) -> ConditionSet:
|
||||
c: Dict[str, Range] = {}
|
||||
for condition in conditionSet:
|
||||
minimum, maximum = condition.get("minimum"), condition.get("maximum")
|
||||
c[condition["name"]] = Range(
|
||||
minimum if minimum is not None else -math.inf,
|
||||
maximum if maximum is not None else math.inf,
|
||||
)
|
||||
return c
|
||||
|
||||
|
||||
def _subsetRulesBasedOnConditions(
|
||||
rules: List[RuleDescriptor], designRegion: Region
|
||||
) -> List[RuleDescriptor]:
|
||||
# What rules to keep:
|
||||
# - Keep the rule if any conditionset is relevant.
|
||||
# - A conditionset is relevant if all conditions are relevant or it is empty.
|
||||
# - A condition is relevant if
|
||||
# - axis is point (C-AP),
|
||||
# - and point in condition's range (C-AP-in)
|
||||
# (in this case remove the condition because it's always true)
|
||||
# - else (C-AP-out) whole conditionset can be discarded (condition false
|
||||
# => conditionset false)
|
||||
# - axis is range (C-AR),
|
||||
# - (C-AR-all) and axis range fully contained in condition range: we can
|
||||
# scrap the condition because it's always true
|
||||
# - (C-AR-inter) and intersection(axis range, condition range) not empty:
|
||||
# keep the condition with the smaller range (= intersection)
|
||||
# - (C-AR-none) else, whole conditionset can be discarded
|
||||
newRules: List[RuleDescriptor] = []
|
||||
for rule in rules:
|
||||
newRule: RuleDescriptor = RuleDescriptor(
|
||||
name=rule.name, conditionSets=[], subs=rule.subs
|
||||
)
|
||||
for conditionset in rule.conditionSets:
|
||||
cs = _conditionSetFrom(conditionset)
|
||||
newConditionset: List[Dict[str, Any]] = []
|
||||
discardConditionset = False
|
||||
for selectionName, selectionValue in designRegion.items():
|
||||
# TODO: Ensure that all(key in conditionset for key in region.keys())?
|
||||
if selectionName not in cs:
|
||||
# raise Exception("Selection has different axes than the rules")
|
||||
continue
|
||||
if isinstance(selectionValue, (float, int)): # is point
|
||||
# Case C-AP-in
|
||||
if selectionValue in cs[selectionName]:
|
||||
pass # always matches, conditionset can stay empty for this one.
|
||||
# Case C-AP-out
|
||||
else:
|
||||
discardConditionset = True
|
||||
else: # is range
|
||||
# Case C-AR-all
|
||||
if selectionValue in cs[selectionName]:
|
||||
pass # always matches, conditionset can stay empty for this one.
|
||||
else:
|
||||
intersection = cs[selectionName].intersection(selectionValue)
|
||||
# Case C-AR-inter
|
||||
if intersection is not None:
|
||||
newConditionset.append(
|
||||
{
|
||||
"name": selectionName,
|
||||
"minimum": intersection.minimum,
|
||||
"maximum": intersection.maximum,
|
||||
}
|
||||
)
|
||||
# Case C-AR-none
|
||||
else:
|
||||
discardConditionset = True
|
||||
if not discardConditionset:
|
||||
newRule.conditionSets.append(newConditionset)
|
||||
if newRule.conditionSets:
|
||||
newRules.append(newRule)
|
||||
|
||||
return newRules
|
||||
|
||||
|
||||
def _filterLocation(
|
||||
userRegion: Region,
|
||||
location: Dict[str, float],
|
||||
) -> Dict[str, float]:
|
||||
return {
|
||||
name: value
|
||||
for name, value in location.items()
|
||||
if name in userRegion and isinstance(userRegion[name], Range)
|
||||
}
|
||||
@ -0,0 +1,253 @@
|
||||
"""Compute name information for a given location in user-space coordinates
|
||||
using STAT data. This can be used to fill-in automatically the names of an
|
||||
instance:
|
||||
|
||||
.. code:: python
|
||||
|
||||
instance = doc.instances[0]
|
||||
names = getStatNames(doc, instance.getFullUserLocation(doc))
|
||||
print(names.styleNames)
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, Optional, Tuple, Union
|
||||
import logging
|
||||
|
||||
from fontTools.designspaceLib import (
|
||||
AxisDescriptor,
|
||||
AxisLabelDescriptor,
|
||||
DesignSpaceDocument,
|
||||
DesignSpaceDocumentError,
|
||||
DiscreteAxisDescriptor,
|
||||
SimpleLocationDict,
|
||||
SourceDescriptor,
|
||||
)
|
||||
|
||||
LOGGER = logging.getLogger(__name__)
|
||||
|
||||
# TODO(Python 3.8): use Literal
|
||||
# RibbiStyleName = Union[Literal["regular"], Literal["bold"], Literal["italic"], Literal["bold italic"]]
|
||||
RibbiStyle = str
|
||||
BOLD_ITALIC_TO_RIBBI_STYLE = {
|
||||
(False, False): "regular",
|
||||
(False, True): "italic",
|
||||
(True, False): "bold",
|
||||
(True, True): "bold italic",
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class StatNames:
|
||||
"""Name data generated from the STAT table information."""
|
||||
|
||||
familyNames: Dict[str, str]
|
||||
styleNames: Dict[str, str]
|
||||
postScriptFontName: Optional[str]
|
||||
styleMapFamilyNames: Dict[str, str]
|
||||
styleMapStyleName: Optional[RibbiStyle]
|
||||
|
||||
|
||||
def getStatNames(
|
||||
doc: DesignSpaceDocument, userLocation: SimpleLocationDict
|
||||
) -> StatNames:
|
||||
"""Compute the family, style, PostScript names of the given ``userLocation``
|
||||
using the document's STAT information.
|
||||
|
||||
Also computes localizations.
|
||||
|
||||
If not enough STAT data is available for a given name, either its dict of
|
||||
localized names will be empty (family and style names), or the name will be
|
||||
None (PostScript name).
|
||||
|
||||
.. versionadded:: 5.0
|
||||
"""
|
||||
familyNames: Dict[str, str] = {}
|
||||
defaultSource: Optional[SourceDescriptor] = doc.findDefault()
|
||||
if defaultSource is None:
|
||||
LOGGER.warning("Cannot determine default source to look up family name.")
|
||||
elif defaultSource.familyName is None:
|
||||
LOGGER.warning(
|
||||
"Cannot look up family name, assign the 'familyname' attribute to the default source."
|
||||
)
|
||||
else:
|
||||
familyNames = {
|
||||
"en": defaultSource.familyName,
|
||||
**defaultSource.localisedFamilyName,
|
||||
}
|
||||
|
||||
styleNames: Dict[str, str] = {}
|
||||
# If a free-standing label matches the location, use it for name generation.
|
||||
label = doc.labelForUserLocation(userLocation)
|
||||
if label is not None:
|
||||
styleNames = {"en": label.name, **label.labelNames}
|
||||
# Otherwise, scour the axis labels for matches.
|
||||
else:
|
||||
# Gather all languages in which at least one translation is provided
|
||||
# Then build names for all these languages, but fallback to English
|
||||
# whenever a translation is missing.
|
||||
labels = _getAxisLabelsForUserLocation(doc.axes, userLocation)
|
||||
if labels:
|
||||
languages = set(
|
||||
language for label in labels for language in label.labelNames
|
||||
)
|
||||
languages.add("en")
|
||||
for language in languages:
|
||||
styleName = " ".join(
|
||||
label.labelNames.get(language, label.defaultName)
|
||||
for label in labels
|
||||
if not label.elidable
|
||||
)
|
||||
if not styleName and doc.elidedFallbackName is not None:
|
||||
styleName = doc.elidedFallbackName
|
||||
styleNames[language] = styleName
|
||||
|
||||
if "en" not in familyNames or "en" not in styleNames:
|
||||
# Not enough information to compute PS names of styleMap names
|
||||
return StatNames(
|
||||
familyNames=familyNames,
|
||||
styleNames=styleNames,
|
||||
postScriptFontName=None,
|
||||
styleMapFamilyNames={},
|
||||
styleMapStyleName=None,
|
||||
)
|
||||
|
||||
postScriptFontName = f"{familyNames['en']}-{styleNames['en']}".replace(" ", "")
|
||||
|
||||
styleMapStyleName, regularUserLocation = _getRibbiStyle(doc, userLocation)
|
||||
|
||||
styleNamesForStyleMap = styleNames
|
||||
if regularUserLocation != userLocation:
|
||||
regularStatNames = getStatNames(doc, regularUserLocation)
|
||||
styleNamesForStyleMap = regularStatNames.styleNames
|
||||
|
||||
styleMapFamilyNames = {}
|
||||
for language in set(familyNames).union(styleNames.keys()):
|
||||
familyName = familyNames.get(language, familyNames["en"])
|
||||
styleName = styleNamesForStyleMap.get(language, styleNamesForStyleMap["en"])
|
||||
styleMapFamilyNames[language] = (familyName + " " + styleName).strip()
|
||||
|
||||
return StatNames(
|
||||
familyNames=familyNames,
|
||||
styleNames=styleNames,
|
||||
postScriptFontName=postScriptFontName,
|
||||
styleMapFamilyNames=styleMapFamilyNames,
|
||||
styleMapStyleName=styleMapStyleName,
|
||||
)
|
||||
|
||||
|
||||
def _getSortedAxisLabels(
|
||||
axes: list[Union[AxisDescriptor, DiscreteAxisDescriptor]],
|
||||
) -> Dict[str, list[AxisLabelDescriptor]]:
|
||||
"""Returns axis labels sorted by their ordering, with unordered ones appended as
|
||||
they are listed."""
|
||||
|
||||
# First, get the axis labels with explicit ordering...
|
||||
sortedAxes = sorted(
|
||||
(axis for axis in axes if axis.axisOrdering is not None),
|
||||
key=lambda a: a.axisOrdering,
|
||||
)
|
||||
sortedLabels: Dict[str, list[AxisLabelDescriptor]] = {
|
||||
axis.name: axis.axisLabels for axis in sortedAxes
|
||||
}
|
||||
|
||||
# ... then append the others in the order they appear.
|
||||
# NOTE: This relies on Python 3.7+ dict's preserved insertion order.
|
||||
for axis in axes:
|
||||
if axis.axisOrdering is None:
|
||||
sortedLabels[axis.name] = axis.axisLabels
|
||||
|
||||
return sortedLabels
|
||||
|
||||
|
||||
def _getAxisLabelsForUserLocation(
|
||||
axes: list[Union[AxisDescriptor, DiscreteAxisDescriptor]],
|
||||
userLocation: SimpleLocationDict,
|
||||
) -> list[AxisLabelDescriptor]:
|
||||
labels: list[AxisLabelDescriptor] = []
|
||||
|
||||
allAxisLabels = _getSortedAxisLabels(axes)
|
||||
if allAxisLabels.keys() != userLocation.keys():
|
||||
LOGGER.warning(
|
||||
f"Mismatch between user location '{userLocation.keys()}' and available "
|
||||
f"labels for '{allAxisLabels.keys()}'."
|
||||
)
|
||||
|
||||
for axisName, axisLabels in allAxisLabels.items():
|
||||
userValue = userLocation[axisName]
|
||||
label: Optional[AxisLabelDescriptor] = next(
|
||||
(
|
||||
l
|
||||
for l in axisLabels
|
||||
if l.userValue == userValue
|
||||
or (
|
||||
l.userMinimum is not None
|
||||
and l.userMaximum is not None
|
||||
and l.userMinimum <= userValue <= l.userMaximum
|
||||
)
|
||||
),
|
||||
None,
|
||||
)
|
||||
if label is None:
|
||||
LOGGER.debug(
|
||||
f"Document needs a label for axis '{axisName}', user value '{userValue}'."
|
||||
)
|
||||
else:
|
||||
labels.append(label)
|
||||
|
||||
return labels
|
||||
|
||||
|
||||
def _getRibbiStyle(
|
||||
self: DesignSpaceDocument, userLocation: SimpleLocationDict
|
||||
) -> Tuple[RibbiStyle, SimpleLocationDict]:
|
||||
"""Compute the RIBBI style name of the given user location,
|
||||
return the location of the matching Regular in the RIBBI group.
|
||||
|
||||
.. versionadded:: 5.0
|
||||
"""
|
||||
regularUserLocation = {}
|
||||
axes_by_tag = {axis.tag: axis for axis in self.axes}
|
||||
|
||||
bold: bool = False
|
||||
italic: bool = False
|
||||
|
||||
axis = axes_by_tag.get("wght")
|
||||
if axis is not None:
|
||||
for regular_label in axis.axisLabels:
|
||||
if (
|
||||
regular_label.linkedUserValue == userLocation[axis.name]
|
||||
# In the "recursive" case where both the Regular has
|
||||
# linkedUserValue pointing the Bold, and the Bold has
|
||||
# linkedUserValue pointing to the Regular, only consider the
|
||||
# first case: Regular (e.g. 400) has linkedUserValue pointing to
|
||||
# Bold (e.g. 700, higher than Regular)
|
||||
and regular_label.userValue < regular_label.linkedUserValue
|
||||
):
|
||||
regularUserLocation[axis.name] = regular_label.userValue
|
||||
bold = True
|
||||
break
|
||||
|
||||
axis = axes_by_tag.get("ital") or axes_by_tag.get("slnt")
|
||||
if axis is not None:
|
||||
for upright_label in axis.axisLabels:
|
||||
if (
|
||||
upright_label.linkedUserValue == userLocation[axis.name]
|
||||
# In the "recursive" case where both the Upright has
|
||||
# linkedUserValue pointing the Italic, and the Italic has
|
||||
# linkedUserValue pointing to the Upright, only consider the
|
||||
# first case: Upright (e.g. ital=0, slant=0) has
|
||||
# linkedUserValue pointing to Italic (e.g ital=1, slant=-12 or
|
||||
# slant=12 for backwards italics, in any case higher than
|
||||
# Upright in absolute value, hence the abs() below.
|
||||
and abs(upright_label.userValue) < abs(upright_label.linkedUserValue)
|
||||
):
|
||||
regularUserLocation[axis.name] = upright_label.userValue
|
||||
italic = True
|
||||
break
|
||||
|
||||
return BOLD_ITALIC_TO_RIBBI_STYLE[bold, italic], {
|
||||
**userLocation,
|
||||
**regularUserLocation,
|
||||
}
|
||||
@ -0,0 +1,147 @@
|
||||
from __future__ import annotations
|
||||
|
||||
from dataclasses import dataclass
|
||||
from typing import Dict, List, Optional, Union, cast
|
||||
|
||||
from fontTools.designspaceLib import (
|
||||
AxisDescriptor,
|
||||
DesignSpaceDocument,
|
||||
DesignSpaceDocumentError,
|
||||
RangeAxisSubsetDescriptor,
|
||||
SimpleLocationDict,
|
||||
ValueAxisSubsetDescriptor,
|
||||
VariableFontDescriptor,
|
||||
)
|
||||
|
||||
|
||||
def clamp(value, minimum, maximum):
|
||||
return min(max(value, minimum), maximum)
|
||||
|
||||
|
||||
@dataclass
|
||||
class Range:
|
||||
minimum: float
|
||||
"""Inclusive minimum of the range."""
|
||||
maximum: float
|
||||
"""Inclusive maximum of the range."""
|
||||
default: float = 0
|
||||
"""Default value"""
|
||||
|
||||
def __post_init__(self):
|
||||
self.minimum, self.maximum = sorted((self.minimum, self.maximum))
|
||||
self.default = clamp(self.default, self.minimum, self.maximum)
|
||||
|
||||
def __contains__(self, value: Union[float, Range]) -> bool:
|
||||
if isinstance(value, Range):
|
||||
return self.minimum <= value.minimum and value.maximum <= self.maximum
|
||||
return self.minimum <= value <= self.maximum
|
||||
|
||||
def intersection(self, other: Range) -> Optional[Range]:
|
||||
if self.maximum < other.minimum or self.minimum > other.maximum:
|
||||
return None
|
||||
else:
|
||||
return Range(
|
||||
max(self.minimum, other.minimum),
|
||||
min(self.maximum, other.maximum),
|
||||
self.default, # We don't care about the default in this use-case
|
||||
)
|
||||
|
||||
|
||||
# A region selection is either a range or a single value, as a Designspace v5
|
||||
# axis-subset element only allows a single discrete value or a range for a
|
||||
# variable-font element.
|
||||
Region = Dict[str, Union[Range, float]]
|
||||
|
||||
# A conditionset is a set of named ranges.
|
||||
ConditionSet = Dict[str, Range]
|
||||
|
||||
# A rule is a list of conditionsets where any has to be relevant for the whole rule to be relevant.
|
||||
Rule = List[ConditionSet]
|
||||
Rules = Dict[str, Rule]
|
||||
|
||||
|
||||
def locationInRegion(location: SimpleLocationDict, region: Region) -> bool:
|
||||
for name, value in location.items():
|
||||
if name not in region:
|
||||
return False
|
||||
regionValue = region[name]
|
||||
if isinstance(regionValue, (float, int)):
|
||||
if value != regionValue:
|
||||
return False
|
||||
else:
|
||||
if value not in regionValue:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def regionInRegion(region: Region, superRegion: Region) -> bool:
|
||||
for name, value in region.items():
|
||||
if not name in superRegion:
|
||||
return False
|
||||
superValue = superRegion[name]
|
||||
if isinstance(superValue, (float, int)):
|
||||
if value != superValue:
|
||||
return False
|
||||
else:
|
||||
if value not in superValue:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def userRegionToDesignRegion(doc: DesignSpaceDocument, userRegion: Region) -> Region:
|
||||
designRegion = {}
|
||||
for name, value in userRegion.items():
|
||||
axis = doc.getAxis(name)
|
||||
if axis is None:
|
||||
raise DesignSpaceDocumentError(
|
||||
f"Cannot find axis named '{name}' for region."
|
||||
)
|
||||
if isinstance(value, (float, int)):
|
||||
designRegion[name] = axis.map_forward(value)
|
||||
else:
|
||||
designRegion[name] = Range(
|
||||
axis.map_forward(value.minimum),
|
||||
axis.map_forward(value.maximum),
|
||||
axis.map_forward(value.default),
|
||||
)
|
||||
return designRegion
|
||||
|
||||
|
||||
def getVFUserRegion(doc: DesignSpaceDocument, vf: VariableFontDescriptor) -> Region:
|
||||
vfUserRegion: Region = {}
|
||||
# For each axis, 2 cases:
|
||||
# - it has a range = it's an axis in the VF DS
|
||||
# - it's a single location = use it to know which rules should apply in the VF
|
||||
for axisSubset in vf.axisSubsets:
|
||||
axis = doc.getAxis(axisSubset.name)
|
||||
if axis is None:
|
||||
raise DesignSpaceDocumentError(
|
||||
f"Cannot find axis named '{axisSubset.name}' for variable font '{vf.name}'."
|
||||
)
|
||||
if hasattr(axisSubset, "userMinimum"):
|
||||
# Mypy doesn't support narrowing union types via hasattr()
|
||||
# TODO(Python 3.10): use TypeGuard
|
||||
# https://mypy.readthedocs.io/en/stable/type_narrowing.html
|
||||
axisSubset = cast(RangeAxisSubsetDescriptor, axisSubset)
|
||||
if not hasattr(axis, "minimum"):
|
||||
raise DesignSpaceDocumentError(
|
||||
f"Cannot select a range over '{axis.name}' for variable font '{vf.name}' "
|
||||
"because it's a discrete axis, use only 'userValue' instead."
|
||||
)
|
||||
axis = cast(AxisDescriptor, axis)
|
||||
vfUserRegion[axis.name] = Range(
|
||||
max(axisSubset.userMinimum, axis.minimum),
|
||||
min(axisSubset.userMaximum, axis.maximum),
|
||||
axisSubset.userDefault or axis.default,
|
||||
)
|
||||
else:
|
||||
axisSubset = cast(ValueAxisSubsetDescriptor, axisSubset)
|
||||
vfUserRegion[axis.name] = axisSubset.userValue
|
||||
# Any axis not mentioned explicitly has a single location = default value
|
||||
for axis in doc.axes:
|
||||
if axis.name not in vfUserRegion:
|
||||
assert isinstance(
|
||||
axis.default, (int, float)
|
||||
), f"Axis '{axis.name}' has no valid default value."
|
||||
vfUserRegion[axis.name] = axis.default
|
||||
return vfUserRegion
|
||||
@ -0,0 +1,258 @@
|
||||
MacRoman = [
|
||||
"NUL",
|
||||
"Eth",
|
||||
"eth",
|
||||
"Lslash",
|
||||
"lslash",
|
||||
"Scaron",
|
||||
"scaron",
|
||||
"Yacute",
|
||||
"yacute",
|
||||
"HT",
|
||||
"LF",
|
||||
"Thorn",
|
||||
"thorn",
|
||||
"CR",
|
||||
"Zcaron",
|
||||
"zcaron",
|
||||
"DLE",
|
||||
"DC1",
|
||||
"DC2",
|
||||
"DC3",
|
||||
"DC4",
|
||||
"onehalf",
|
||||
"onequarter",
|
||||
"onesuperior",
|
||||
"threequarters",
|
||||
"threesuperior",
|
||||
"twosuperior",
|
||||
"brokenbar",
|
||||
"minus",
|
||||
"multiply",
|
||||
"RS",
|
||||
"US",
|
||||
"space",
|
||||
"exclam",
|
||||
"quotedbl",
|
||||
"numbersign",
|
||||
"dollar",
|
||||
"percent",
|
||||
"ampersand",
|
||||
"quotesingle",
|
||||
"parenleft",
|
||||
"parenright",
|
||||
"asterisk",
|
||||
"plus",
|
||||
"comma",
|
||||
"hyphen",
|
||||
"period",
|
||||
"slash",
|
||||
"zero",
|
||||
"one",
|
||||
"two",
|
||||
"three",
|
||||
"four",
|
||||
"five",
|
||||
"six",
|
||||
"seven",
|
||||
"eight",
|
||||
"nine",
|
||||
"colon",
|
||||
"semicolon",
|
||||
"less",
|
||||
"equal",
|
||||
"greater",
|
||||
"question",
|
||||
"at",
|
||||
"A",
|
||||
"B",
|
||||
"C",
|
||||
"D",
|
||||
"E",
|
||||
"F",
|
||||
"G",
|
||||
"H",
|
||||
"I",
|
||||
"J",
|
||||
"K",
|
||||
"L",
|
||||
"M",
|
||||
"N",
|
||||
"O",
|
||||
"P",
|
||||
"Q",
|
||||
"R",
|
||||
"S",
|
||||
"T",
|
||||
"U",
|
||||
"V",
|
||||
"W",
|
||||
"X",
|
||||
"Y",
|
||||
"Z",
|
||||
"bracketleft",
|
||||
"backslash",
|
||||
"bracketright",
|
||||
"asciicircum",
|
||||
"underscore",
|
||||
"grave",
|
||||
"a",
|
||||
"b",
|
||||
"c",
|
||||
"d",
|
||||
"e",
|
||||
"f",
|
||||
"g",
|
||||
"h",
|
||||
"i",
|
||||
"j",
|
||||
"k",
|
||||
"l",
|
||||
"m",
|
||||
"n",
|
||||
"o",
|
||||
"p",
|
||||
"q",
|
||||
"r",
|
||||
"s",
|
||||
"t",
|
||||
"u",
|
||||
"v",
|
||||
"w",
|
||||
"x",
|
||||
"y",
|
||||
"z",
|
||||
"braceleft",
|
||||
"bar",
|
||||
"braceright",
|
||||
"asciitilde",
|
||||
"DEL",
|
||||
"Adieresis",
|
||||
"Aring",
|
||||
"Ccedilla",
|
||||
"Eacute",
|
||||
"Ntilde",
|
||||
"Odieresis",
|
||||
"Udieresis",
|
||||
"aacute",
|
||||
"agrave",
|
||||
"acircumflex",
|
||||
"adieresis",
|
||||
"atilde",
|
||||
"aring",
|
||||
"ccedilla",
|
||||
"eacute",
|
||||
"egrave",
|
||||
"ecircumflex",
|
||||
"edieresis",
|
||||
"iacute",
|
||||
"igrave",
|
||||
"icircumflex",
|
||||
"idieresis",
|
||||
"ntilde",
|
||||
"oacute",
|
||||
"ograve",
|
||||
"ocircumflex",
|
||||
"odieresis",
|
||||
"otilde",
|
||||
"uacute",
|
||||
"ugrave",
|
||||
"ucircumflex",
|
||||
"udieresis",
|
||||
"dagger",
|
||||
"degree",
|
||||
"cent",
|
||||
"sterling",
|
||||
"section",
|
||||
"bullet",
|
||||
"paragraph",
|
||||
"germandbls",
|
||||
"registered",
|
||||
"copyright",
|
||||
"trademark",
|
||||
"acute",
|
||||
"dieresis",
|
||||
"notequal",
|
||||
"AE",
|
||||
"Oslash",
|
||||
"infinity",
|
||||
"plusminus",
|
||||
"lessequal",
|
||||
"greaterequal",
|
||||
"yen",
|
||||
"mu",
|
||||
"partialdiff",
|
||||
"summation",
|
||||
"product",
|
||||
"pi",
|
||||
"integral",
|
||||
"ordfeminine",
|
||||
"ordmasculine",
|
||||
"Omega",
|
||||
"ae",
|
||||
"oslash",
|
||||
"questiondown",
|
||||
"exclamdown",
|
||||
"logicalnot",
|
||||
"radical",
|
||||
"florin",
|
||||
"approxequal",
|
||||
"Delta",
|
||||
"guillemotleft",
|
||||
"guillemotright",
|
||||
"ellipsis",
|
||||
"nbspace",
|
||||
"Agrave",
|
||||
"Atilde",
|
||||
"Otilde",
|
||||
"OE",
|
||||
"oe",
|
||||
"endash",
|
||||
"emdash",
|
||||
"quotedblleft",
|
||||
"quotedblright",
|
||||
"quoteleft",
|
||||
"quoteright",
|
||||
"divide",
|
||||
"lozenge",
|
||||
"ydieresis",
|
||||
"Ydieresis",
|
||||
"fraction",
|
||||
"currency",
|
||||
"guilsinglleft",
|
||||
"guilsinglright",
|
||||
"fi",
|
||||
"fl",
|
||||
"daggerdbl",
|
||||
"periodcentered",
|
||||
"quotesinglbase",
|
||||
"quotedblbase",
|
||||
"perthousand",
|
||||
"Acircumflex",
|
||||
"Ecircumflex",
|
||||
"Aacute",
|
||||
"Edieresis",
|
||||
"Egrave",
|
||||
"Iacute",
|
||||
"Icircumflex",
|
||||
"Idieresis",
|
||||
"Igrave",
|
||||
"Oacute",
|
||||
"Ocircumflex",
|
||||
"apple",
|
||||
"Ograve",
|
||||
"Uacute",
|
||||
"Ucircumflex",
|
||||
"Ugrave",
|
||||
"dotlessi",
|
||||
"circumflex",
|
||||
"tilde",
|
||||
"macron",
|
||||
"breve",
|
||||
"dotaccent",
|
||||
"ring",
|
||||
"cedilla",
|
||||
"hungarumlaut",
|
||||
"ogonek",
|
||||
"caron",
|
||||
]
|
||||
@ -0,0 +1,258 @@
|
||||
StandardEncoding = [
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
"space",
|
||||
"exclam",
|
||||
"quotedbl",
|
||||
"numbersign",
|
||||
"dollar",
|
||||
"percent",
|
||||
"ampersand",
|
||||
"quoteright",
|
||||
"parenleft",
|
||||
"parenright",
|
||||
"asterisk",
|
||||
"plus",
|
||||
"comma",
|
||||
"hyphen",
|
||||
"period",
|
||||
"slash",
|
||||
"zero",
|
||||
"one",
|
||||
"two",
|
||||
"three",
|
||||
"four",
|
||||
"five",
|
||||
"six",
|
||||
"seven",
|
||||
"eight",
|
||||
"nine",
|
||||
"colon",
|
||||
"semicolon",
|
||||
"less",
|
||||
"equal",
|
||||
"greater",
|
||||
"question",
|
||||
"at",
|
||||
"A",
|
||||
"B",
|
||||
"C",
|
||||
"D",
|
||||
"E",
|
||||
"F",
|
||||
"G",
|
||||
"H",
|
||||
"I",
|
||||
"J",
|
||||
"K",
|
||||
"L",
|
||||
"M",
|
||||
"N",
|
||||
"O",
|
||||
"P",
|
||||
"Q",
|
||||
"R",
|
||||
"S",
|
||||
"T",
|
||||
"U",
|
||||
"V",
|
||||
"W",
|
||||
"X",
|
||||
"Y",
|
||||
"Z",
|
||||
"bracketleft",
|
||||
"backslash",
|
||||
"bracketright",
|
||||
"asciicircum",
|
||||
"underscore",
|
||||
"quoteleft",
|
||||
"a",
|
||||
"b",
|
||||
"c",
|
||||
"d",
|
||||
"e",
|
||||
"f",
|
||||
"g",
|
||||
"h",
|
||||
"i",
|
||||
"j",
|
||||
"k",
|
||||
"l",
|
||||
"m",
|
||||
"n",
|
||||
"o",
|
||||
"p",
|
||||
"q",
|
||||
"r",
|
||||
"s",
|
||||
"t",
|
||||
"u",
|
||||
"v",
|
||||
"w",
|
||||
"x",
|
||||
"y",
|
||||
"z",
|
||||
"braceleft",
|
||||
"bar",
|
||||
"braceright",
|
||||
"asciitilde",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
"exclamdown",
|
||||
"cent",
|
||||
"sterling",
|
||||
"fraction",
|
||||
"yen",
|
||||
"florin",
|
||||
"section",
|
||||
"currency",
|
||||
"quotesingle",
|
||||
"quotedblleft",
|
||||
"guillemotleft",
|
||||
"guilsinglleft",
|
||||
"guilsinglright",
|
||||
"fi",
|
||||
"fl",
|
||||
".notdef",
|
||||
"endash",
|
||||
"dagger",
|
||||
"daggerdbl",
|
||||
"periodcentered",
|
||||
".notdef",
|
||||
"paragraph",
|
||||
"bullet",
|
||||
"quotesinglbase",
|
||||
"quotedblbase",
|
||||
"quotedblright",
|
||||
"guillemotright",
|
||||
"ellipsis",
|
||||
"perthousand",
|
||||
".notdef",
|
||||
"questiondown",
|
||||
".notdef",
|
||||
"grave",
|
||||
"acute",
|
||||
"circumflex",
|
||||
"tilde",
|
||||
"macron",
|
||||
"breve",
|
||||
"dotaccent",
|
||||
"dieresis",
|
||||
".notdef",
|
||||
"ring",
|
||||
"cedilla",
|
||||
".notdef",
|
||||
"hungarumlaut",
|
||||
"ogonek",
|
||||
"caron",
|
||||
"emdash",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
"AE",
|
||||
".notdef",
|
||||
"ordfeminine",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
"Lslash",
|
||||
"Oslash",
|
||||
"OE",
|
||||
"ordmasculine",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
"ae",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
"dotlessi",
|
||||
".notdef",
|
||||
".notdef",
|
||||
"lslash",
|
||||
"oslash",
|
||||
"oe",
|
||||
"germandbls",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
".notdef",
|
||||
]
|
||||
@ -0,0 +1 @@
|
||||
"""Empty __init__.py file to signal Python this directory is a package."""
|
||||
135
venv/lib/python3.12/site-packages/fontTools/encodings/codecs.py
Normal file
135
venv/lib/python3.12/site-packages/fontTools/encodings/codecs.py
Normal file
@ -0,0 +1,135 @@
|
||||
"""Extend the Python codecs module with a few encodings that are used in OpenType (name table)
|
||||
but missing from Python. See https://github.com/fonttools/fonttools/issues/236 for details."""
|
||||
|
||||
import codecs
|
||||
import encodings
|
||||
|
||||
|
||||
class ExtendCodec(codecs.Codec):
|
||||
def __init__(self, name, base_encoding, mapping):
|
||||
self.name = name
|
||||
self.base_encoding = base_encoding
|
||||
self.mapping = mapping
|
||||
self.reverse = {v: k for k, v in mapping.items()}
|
||||
self.max_len = max(len(v) for v in mapping.values())
|
||||
self.info = codecs.CodecInfo(
|
||||
name=self.name, encode=self.encode, decode=self.decode
|
||||
)
|
||||
codecs.register_error(name, self.error)
|
||||
|
||||
def _map(self, mapper, output_type, exc_type, input, errors):
|
||||
base_error_handler = codecs.lookup_error(errors)
|
||||
length = len(input)
|
||||
out = output_type()
|
||||
while input:
|
||||
# first try to use self.error as the error handler
|
||||
try:
|
||||
part = mapper(input, self.base_encoding, errors=self.name)
|
||||
out += part
|
||||
break # All converted
|
||||
except exc_type as e:
|
||||
# else convert the correct part, handle error as requested and continue
|
||||
out += mapper(input[: e.start], self.base_encoding, self.name)
|
||||
replacement, pos = base_error_handler(e)
|
||||
out += replacement
|
||||
input = input[pos:]
|
||||
return out, length
|
||||
|
||||
def encode(self, input, errors="strict"):
|
||||
return self._map(codecs.encode, bytes, UnicodeEncodeError, input, errors)
|
||||
|
||||
def decode(self, input, errors="strict"):
|
||||
return self._map(codecs.decode, str, UnicodeDecodeError, input, errors)
|
||||
|
||||
def error(self, e):
|
||||
if isinstance(e, UnicodeDecodeError):
|
||||
for end in range(e.start + 1, e.end + 1):
|
||||
s = e.object[e.start : end]
|
||||
if s in self.mapping:
|
||||
return self.mapping[s], end
|
||||
elif isinstance(e, UnicodeEncodeError):
|
||||
for end in range(e.start + 1, e.start + self.max_len + 1):
|
||||
s = e.object[e.start : end]
|
||||
if s in self.reverse:
|
||||
return self.reverse[s], end
|
||||
e.encoding = self.name
|
||||
raise e
|
||||
|
||||
|
||||
_extended_encodings = {
|
||||
"x_mac_japanese_ttx": (
|
||||
"shift_jis",
|
||||
{
|
||||
b"\xFC": chr(0x007C),
|
||||
b"\x7E": chr(0x007E),
|
||||
b"\x80": chr(0x005C),
|
||||
b"\xA0": chr(0x00A0),
|
||||
b"\xFD": chr(0x00A9),
|
||||
b"\xFE": chr(0x2122),
|
||||
b"\xFF": chr(0x2026),
|
||||
},
|
||||
),
|
||||
"x_mac_trad_chinese_ttx": (
|
||||
"big5",
|
||||
{
|
||||
b"\x80": chr(0x005C),
|
||||
b"\xA0": chr(0x00A0),
|
||||
b"\xFD": chr(0x00A9),
|
||||
b"\xFE": chr(0x2122),
|
||||
b"\xFF": chr(0x2026),
|
||||
},
|
||||
),
|
||||
"x_mac_korean_ttx": (
|
||||
"euc_kr",
|
||||
{
|
||||
b"\x80": chr(0x00A0),
|
||||
b"\x81": chr(0x20A9),
|
||||
b"\x82": chr(0x2014),
|
||||
b"\x83": chr(0x00A9),
|
||||
b"\xFE": chr(0x2122),
|
||||
b"\xFF": chr(0x2026),
|
||||
},
|
||||
),
|
||||
"x_mac_simp_chinese_ttx": (
|
||||
"gb2312",
|
||||
{
|
||||
b"\x80": chr(0x00FC),
|
||||
b"\xA0": chr(0x00A0),
|
||||
b"\xFD": chr(0x00A9),
|
||||
b"\xFE": chr(0x2122),
|
||||
b"\xFF": chr(0x2026),
|
||||
},
|
||||
),
|
||||
}
|
||||
|
||||
_cache = {}
|
||||
|
||||
|
||||
def search_function(name):
|
||||
name = encodings.normalize_encoding(name) # Rather undocumented...
|
||||
if name in _extended_encodings:
|
||||
if name not in _cache:
|
||||
base_encoding, mapping = _extended_encodings[name]
|
||||
assert name[-4:] == "_ttx"
|
||||
# Python 2 didn't have any of the encodings that we are implementing
|
||||
# in this file. Python 3 added aliases for the East Asian ones, mapping
|
||||
# them "temporarily" to the same base encoding as us, with a comment
|
||||
# suggesting that full implementation will appear some time later.
|
||||
# As such, try the Python version of the x_mac_... first, if that is found,
|
||||
# use *that* as our base encoding. This would make our encoding upgrade
|
||||
# to the full encoding when and if Python finally implements that.
|
||||
# http://bugs.python.org/issue24041
|
||||
base_encodings = [name[:-4], base_encoding]
|
||||
for base_encoding in base_encodings:
|
||||
try:
|
||||
codecs.lookup(base_encoding)
|
||||
except LookupError:
|
||||
continue
|
||||
_cache[name] = ExtendCodec(name, base_encoding, mapping)
|
||||
break
|
||||
return _cache[name].info
|
||||
|
||||
return None
|
||||
|
||||
|
||||
codecs.register(search_function)
|
||||
@ -0,0 +1,4 @@
|
||||
"""fontTools.feaLib -- a package for dealing with OpenType feature files."""
|
||||
|
||||
# The structure of OpenType feature files is defined here:
|
||||
# http://www.adobe.com/devnet/opentype/afdko/topic_feature_file_syntax.html
|
||||
@ -0,0 +1,78 @@
|
||||
from fontTools.ttLib import TTFont
|
||||
from fontTools.feaLib.builder import addOpenTypeFeatures, Builder
|
||||
from fontTools.feaLib.error import FeatureLibError
|
||||
from fontTools import configLogger
|
||||
from fontTools.misc.cliTools import makeOutputFileName
|
||||
import sys
|
||||
import argparse
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.feaLib")
|
||||
|
||||
|
||||
def main(args=None):
|
||||
"""Add features from a feature file (.fea) into an OTF font"""
|
||||
parser = argparse.ArgumentParser(
|
||||
description="Use fontTools to compile OpenType feature files (*.fea)."
|
||||
)
|
||||
parser.add_argument(
|
||||
"input_fea", metavar="FEATURES", help="Path to the feature file"
|
||||
)
|
||||
parser.add_argument(
|
||||
"input_font", metavar="INPUT_FONT", help="Path to the input font"
|
||||
)
|
||||
parser.add_argument(
|
||||
"-o",
|
||||
"--output",
|
||||
dest="output_font",
|
||||
metavar="OUTPUT_FONT",
|
||||
help="Path to the output font.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-t",
|
||||
"--tables",
|
||||
metavar="TABLE_TAG",
|
||||
choices=Builder.supportedTables,
|
||||
nargs="+",
|
||||
help="Specify the table(s) to be built.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-d",
|
||||
"--debug",
|
||||
action="store_true",
|
||||
help="Add source-level debugging information to font.",
|
||||
)
|
||||
parser.add_argument(
|
||||
"-v",
|
||||
"--verbose",
|
||||
help="Increase the logger verbosity. Multiple -v " "options are allowed.",
|
||||
action="count",
|
||||
default=0,
|
||||
)
|
||||
parser.add_argument(
|
||||
"--traceback", help="show traceback for exceptions.", action="store_true"
|
||||
)
|
||||
options = parser.parse_args(args)
|
||||
|
||||
levels = ["WARNING", "INFO", "DEBUG"]
|
||||
configLogger(level=levels[min(len(levels) - 1, options.verbose)])
|
||||
|
||||
output_font = options.output_font or makeOutputFileName(options.input_font)
|
||||
log.info("Compiling features to '%s'" % (output_font))
|
||||
|
||||
font = TTFont(options.input_font)
|
||||
try:
|
||||
addOpenTypeFeatures(
|
||||
font, options.input_fea, tables=options.tables, debug=options.debug
|
||||
)
|
||||
except FeatureLibError as e:
|
||||
if options.traceback:
|
||||
raise
|
||||
log.error(e)
|
||||
sys.exit(1)
|
||||
font.save(output_font)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
2134
venv/lib/python3.12/site-packages/fontTools/feaLib/ast.py
Normal file
2134
venv/lib/python3.12/site-packages/fontTools/feaLib/ast.py
Normal file
File diff suppressed because it is too large
Load Diff
1729
venv/lib/python3.12/site-packages/fontTools/feaLib/builder.py
Normal file
1729
venv/lib/python3.12/site-packages/fontTools/feaLib/builder.py
Normal file
File diff suppressed because it is too large
Load Diff
22
venv/lib/python3.12/site-packages/fontTools/feaLib/error.py
Normal file
22
venv/lib/python3.12/site-packages/fontTools/feaLib/error.py
Normal file
@ -0,0 +1,22 @@
|
||||
class FeatureLibError(Exception):
|
||||
def __init__(self, message, location):
|
||||
Exception.__init__(self, message)
|
||||
self.location = location
|
||||
|
||||
def __str__(self):
|
||||
message = Exception.__str__(self)
|
||||
if self.location:
|
||||
return f"{self.location}: {message}"
|
||||
else:
|
||||
return message
|
||||
|
||||
|
||||
class IncludedFeaNotFound(FeatureLibError):
|
||||
def __str__(self):
|
||||
assert self.location is not None
|
||||
|
||||
message = (
|
||||
"The following feature file should be included but cannot be found: "
|
||||
f"{Exception.__str__(self)}"
|
||||
)
|
||||
return f"{self.location}: {message}"
|
||||
17986
venv/lib/python3.12/site-packages/fontTools/feaLib/lexer.c
Normal file
17986
venv/lib/python3.12/site-packages/fontTools/feaLib/lexer.c
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
287
venv/lib/python3.12/site-packages/fontTools/feaLib/lexer.py
Normal file
287
venv/lib/python3.12/site-packages/fontTools/feaLib/lexer.py
Normal file
@ -0,0 +1,287 @@
|
||||
from fontTools.feaLib.error import FeatureLibError, IncludedFeaNotFound
|
||||
from fontTools.feaLib.location import FeatureLibLocation
|
||||
import re
|
||||
import os
|
||||
|
||||
try:
|
||||
import cython
|
||||
except ImportError:
|
||||
# if cython not installed, use mock module with no-op decorators and types
|
||||
from fontTools.misc import cython
|
||||
|
||||
|
||||
class Lexer(object):
|
||||
NUMBER = "NUMBER"
|
||||
HEXADECIMAL = "HEXADECIMAL"
|
||||
OCTAL = "OCTAL"
|
||||
NUMBERS = (NUMBER, HEXADECIMAL, OCTAL)
|
||||
FLOAT = "FLOAT"
|
||||
STRING = "STRING"
|
||||
NAME = "NAME"
|
||||
FILENAME = "FILENAME"
|
||||
GLYPHCLASS = "GLYPHCLASS"
|
||||
CID = "CID"
|
||||
SYMBOL = "SYMBOL"
|
||||
COMMENT = "COMMENT"
|
||||
NEWLINE = "NEWLINE"
|
||||
ANONYMOUS_BLOCK = "ANONYMOUS_BLOCK"
|
||||
|
||||
CHAR_WHITESPACE_ = " \t"
|
||||
CHAR_NEWLINE_ = "\r\n"
|
||||
CHAR_SYMBOL_ = ",;:-+'{}[]<>()="
|
||||
CHAR_DIGIT_ = "0123456789"
|
||||
CHAR_HEXDIGIT_ = "0123456789ABCDEFabcdef"
|
||||
CHAR_LETTER_ = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
|
||||
CHAR_NAME_START_ = CHAR_LETTER_ + "_+*:.^~!\\"
|
||||
CHAR_NAME_CONTINUATION_ = CHAR_LETTER_ + CHAR_DIGIT_ + "_.+*:^~!/-"
|
||||
|
||||
RE_GLYPHCLASS = re.compile(r"^[A-Za-z_0-9.\-]+$")
|
||||
|
||||
MODE_NORMAL_ = "NORMAL"
|
||||
MODE_FILENAME_ = "FILENAME"
|
||||
|
||||
def __init__(self, text, filename):
|
||||
self.filename_ = filename
|
||||
self.line_ = 1
|
||||
self.pos_ = 0
|
||||
self.line_start_ = 0
|
||||
self.text_ = text
|
||||
self.text_length_ = len(text)
|
||||
self.mode_ = Lexer.MODE_NORMAL_
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self): # Python 2
|
||||
return self.__next__()
|
||||
|
||||
def __next__(self): # Python 3
|
||||
while True:
|
||||
token_type, token, location = self.next_()
|
||||
if token_type != Lexer.NEWLINE:
|
||||
return (token_type, token, location)
|
||||
|
||||
def location_(self):
|
||||
column = self.pos_ - self.line_start_ + 1
|
||||
return FeatureLibLocation(self.filename_ or "<features>", self.line_, column)
|
||||
|
||||
def next_(self):
|
||||
self.scan_over_(Lexer.CHAR_WHITESPACE_)
|
||||
location = self.location_()
|
||||
start = self.pos_
|
||||
text = self.text_
|
||||
limit = len(text)
|
||||
if start >= limit:
|
||||
raise StopIteration()
|
||||
cur_char = text[start]
|
||||
next_char = text[start + 1] if start + 1 < limit else None
|
||||
|
||||
if cur_char == "\n":
|
||||
self.pos_ += 1
|
||||
self.line_ += 1
|
||||
self.line_start_ = self.pos_
|
||||
return (Lexer.NEWLINE, None, location)
|
||||
if cur_char == "\r":
|
||||
self.pos_ += 2 if next_char == "\n" else 1
|
||||
self.line_ += 1
|
||||
self.line_start_ = self.pos_
|
||||
return (Lexer.NEWLINE, None, location)
|
||||
if cur_char == "#":
|
||||
self.scan_until_(Lexer.CHAR_NEWLINE_)
|
||||
return (Lexer.COMMENT, text[start : self.pos_], location)
|
||||
|
||||
if self.mode_ is Lexer.MODE_FILENAME_:
|
||||
if cur_char != "(":
|
||||
raise FeatureLibError("Expected '(' before file name", location)
|
||||
self.scan_until_(")")
|
||||
cur_char = text[self.pos_] if self.pos_ < limit else None
|
||||
if cur_char != ")":
|
||||
raise FeatureLibError("Expected ')' after file name", location)
|
||||
self.pos_ += 1
|
||||
self.mode_ = Lexer.MODE_NORMAL_
|
||||
return (Lexer.FILENAME, text[start + 1 : self.pos_ - 1], location)
|
||||
|
||||
if cur_char == "\\" and next_char in Lexer.CHAR_DIGIT_:
|
||||
self.pos_ += 1
|
||||
self.scan_over_(Lexer.CHAR_DIGIT_)
|
||||
return (Lexer.CID, int(text[start + 1 : self.pos_], 10), location)
|
||||
if cur_char == "@":
|
||||
self.pos_ += 1
|
||||
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
|
||||
glyphclass = text[start + 1 : self.pos_]
|
||||
if len(glyphclass) < 1:
|
||||
raise FeatureLibError("Expected glyph class name", location)
|
||||
if not Lexer.RE_GLYPHCLASS.match(glyphclass):
|
||||
raise FeatureLibError(
|
||||
"Glyph class names must consist of letters, digits, "
|
||||
"underscore, period or hyphen",
|
||||
location,
|
||||
)
|
||||
return (Lexer.GLYPHCLASS, glyphclass, location)
|
||||
if cur_char in Lexer.CHAR_NAME_START_:
|
||||
self.pos_ += 1
|
||||
self.scan_over_(Lexer.CHAR_NAME_CONTINUATION_)
|
||||
token = text[start : self.pos_]
|
||||
if token == "include":
|
||||
self.mode_ = Lexer.MODE_FILENAME_
|
||||
return (Lexer.NAME, token, location)
|
||||
if cur_char == "0" and next_char in "xX":
|
||||
self.pos_ += 2
|
||||
self.scan_over_(Lexer.CHAR_HEXDIGIT_)
|
||||
return (Lexer.HEXADECIMAL, int(text[start : self.pos_], 16), location)
|
||||
if cur_char == "0" and next_char in Lexer.CHAR_DIGIT_:
|
||||
self.scan_over_(Lexer.CHAR_DIGIT_)
|
||||
return (Lexer.OCTAL, int(text[start : self.pos_], 8), location)
|
||||
if cur_char in Lexer.CHAR_DIGIT_:
|
||||
self.scan_over_(Lexer.CHAR_DIGIT_)
|
||||
if self.pos_ >= limit or text[self.pos_] != ".":
|
||||
return (Lexer.NUMBER, int(text[start : self.pos_], 10), location)
|
||||
self.scan_over_(".")
|
||||
self.scan_over_(Lexer.CHAR_DIGIT_)
|
||||
return (Lexer.FLOAT, float(text[start : self.pos_]), location)
|
||||
if cur_char == "-" and next_char in Lexer.CHAR_DIGIT_:
|
||||
self.pos_ += 1
|
||||
self.scan_over_(Lexer.CHAR_DIGIT_)
|
||||
if self.pos_ >= limit or text[self.pos_] != ".":
|
||||
return (Lexer.NUMBER, int(text[start : self.pos_], 10), location)
|
||||
self.scan_over_(".")
|
||||
self.scan_over_(Lexer.CHAR_DIGIT_)
|
||||
return (Lexer.FLOAT, float(text[start : self.pos_]), location)
|
||||
if cur_char in Lexer.CHAR_SYMBOL_:
|
||||
self.pos_ += 1
|
||||
return (Lexer.SYMBOL, cur_char, location)
|
||||
if cur_char == '"':
|
||||
self.pos_ += 1
|
||||
self.scan_until_('"')
|
||||
if self.pos_ < self.text_length_ and self.text_[self.pos_] == '"':
|
||||
self.pos_ += 1
|
||||
# strip newlines embedded within a string
|
||||
string = re.sub("[\r\n]", "", text[start + 1 : self.pos_ - 1])
|
||||
return (Lexer.STRING, string, location)
|
||||
else:
|
||||
raise FeatureLibError("Expected '\"' to terminate string", location)
|
||||
raise FeatureLibError("Unexpected character: %r" % cur_char, location)
|
||||
|
||||
def scan_over_(self, valid):
|
||||
p = self.pos_
|
||||
while p < self.text_length_ and self.text_[p] in valid:
|
||||
p += 1
|
||||
self.pos_ = p
|
||||
|
||||
def scan_until_(self, stop_at):
|
||||
p = self.pos_
|
||||
while p < self.text_length_ and self.text_[p] not in stop_at:
|
||||
p += 1
|
||||
self.pos_ = p
|
||||
|
||||
def scan_anonymous_block(self, tag):
|
||||
location = self.location_()
|
||||
tag = tag.strip()
|
||||
self.scan_until_(Lexer.CHAR_NEWLINE_)
|
||||
self.scan_over_(Lexer.CHAR_NEWLINE_)
|
||||
regexp = r"}\s*" + tag + r"\s*;"
|
||||
split = re.split(regexp, self.text_[self.pos_ :], maxsplit=1)
|
||||
if len(split) != 2:
|
||||
raise FeatureLibError(
|
||||
"Expected '} %s;' to terminate anonymous block" % tag, location
|
||||
)
|
||||
self.pos_ += len(split[0])
|
||||
return (Lexer.ANONYMOUS_BLOCK, split[0], location)
|
||||
|
||||
|
||||
class IncludingLexer(object):
|
||||
"""A Lexer that follows include statements.
|
||||
|
||||
The OpenType feature file specification states that due to
|
||||
historical reasons, relative imports should be resolved in this
|
||||
order:
|
||||
|
||||
1. If the source font is UFO format, then relative to the UFO's
|
||||
font directory
|
||||
2. relative to the top-level include file
|
||||
3. relative to the parent include file
|
||||
|
||||
We only support 1 (via includeDir) and 2.
|
||||
"""
|
||||
|
||||
def __init__(self, featurefile, *, includeDir=None):
|
||||
"""Initializes an IncludingLexer.
|
||||
|
||||
Behavior:
|
||||
If includeDir is passed, it will be used to determine the top-level
|
||||
include directory to use for all encountered include statements. If it is
|
||||
not passed, ``os.path.dirname(featurefile)`` will be considered the
|
||||
include directory.
|
||||
"""
|
||||
|
||||
self.lexers_ = [self.make_lexer_(featurefile)]
|
||||
self.featurefilepath = self.lexers_[0].filename_
|
||||
self.includeDir = includeDir
|
||||
|
||||
def __iter__(self):
|
||||
return self
|
||||
|
||||
def next(self): # Python 2
|
||||
return self.__next__()
|
||||
|
||||
def __next__(self): # Python 3
|
||||
while self.lexers_:
|
||||
lexer = self.lexers_[-1]
|
||||
try:
|
||||
token_type, token, location = next(lexer)
|
||||
except StopIteration:
|
||||
self.lexers_.pop()
|
||||
continue
|
||||
if token_type is Lexer.NAME and token == "include":
|
||||
fname_type, fname_token, fname_location = lexer.next()
|
||||
if fname_type is not Lexer.FILENAME:
|
||||
raise FeatureLibError("Expected file name", fname_location)
|
||||
# semi_type, semi_token, semi_location = lexer.next()
|
||||
# if semi_type is not Lexer.SYMBOL or semi_token != ";":
|
||||
# raise FeatureLibError("Expected ';'", semi_location)
|
||||
if os.path.isabs(fname_token):
|
||||
path = fname_token
|
||||
else:
|
||||
if self.includeDir is not None:
|
||||
curpath = self.includeDir
|
||||
elif self.featurefilepath is not None:
|
||||
curpath = os.path.dirname(self.featurefilepath)
|
||||
else:
|
||||
# if the IncludingLexer was initialized from an in-memory
|
||||
# file-like stream, it doesn't have a 'name' pointing to
|
||||
# its filesystem path, therefore we fall back to using the
|
||||
# current working directory to resolve relative includes
|
||||
curpath = os.getcwd()
|
||||
path = os.path.join(curpath, fname_token)
|
||||
if len(self.lexers_) >= 5:
|
||||
raise FeatureLibError("Too many recursive includes", fname_location)
|
||||
try:
|
||||
self.lexers_.append(self.make_lexer_(path))
|
||||
except FileNotFoundError as err:
|
||||
raise IncludedFeaNotFound(fname_token, fname_location) from err
|
||||
else:
|
||||
return (token_type, token, location)
|
||||
raise StopIteration()
|
||||
|
||||
@staticmethod
|
||||
def make_lexer_(file_or_path):
|
||||
if hasattr(file_or_path, "read"):
|
||||
fileobj, closing = file_or_path, False
|
||||
else:
|
||||
filename, closing = file_or_path, True
|
||||
fileobj = open(filename, "r", encoding="utf-8-sig")
|
||||
data = fileobj.read()
|
||||
filename = getattr(fileobj, "name", None)
|
||||
if closing:
|
||||
fileobj.close()
|
||||
return Lexer(data, filename)
|
||||
|
||||
def scan_anonymous_block(self, tag):
|
||||
return self.lexers_[-1].scan_anonymous_block(tag)
|
||||
|
||||
|
||||
class NonIncludingLexer(IncludingLexer):
|
||||
"""Lexer that does not follow `include` statements, emits them as-is."""
|
||||
|
||||
def __next__(self): # Python 3
|
||||
return next(self.lexers_[0])
|
||||
@ -0,0 +1,12 @@
|
||||
from typing import NamedTuple
|
||||
|
||||
|
||||
class FeatureLibLocation(NamedTuple):
|
||||
"""A location in a feature file"""
|
||||
|
||||
file: str
|
||||
line: int
|
||||
column: int
|
||||
|
||||
def __str__(self):
|
||||
return f"{self.file}:{self.line}:{self.column}"
|
||||
@ -0,0 +1,12 @@
|
||||
from typing import NamedTuple
|
||||
|
||||
LOOKUP_DEBUG_INFO_KEY = "com.github.fonttools.feaLib"
|
||||
LOOKUP_DEBUG_ENV_VAR = "FONTTOOLS_LOOKUP_DEBUGGING"
|
||||
|
||||
|
||||
class LookupDebugInfo(NamedTuple):
|
||||
"""Information about where a lookup came from, to be embedded in a font"""
|
||||
|
||||
location: str
|
||||
name: str
|
||||
feature: list
|
||||
2359
venv/lib/python3.12/site-packages/fontTools/feaLib/parser.py
Normal file
2359
venv/lib/python3.12/site-packages/fontTools/feaLib/parser.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,113 @@
|
||||
from fontTools.varLib.models import VariationModel, normalizeValue, piecewiseLinearMap
|
||||
|
||||
|
||||
def Location(loc):
|
||||
return tuple(sorted(loc.items()))
|
||||
|
||||
|
||||
class VariableScalar:
|
||||
"""A scalar with different values at different points in the designspace."""
|
||||
|
||||
def __init__(self, location_value={}):
|
||||
self.values = {}
|
||||
self.axes = {}
|
||||
for location, value in location_value.items():
|
||||
self.add_value(location, value)
|
||||
|
||||
def __repr__(self):
|
||||
items = []
|
||||
for location, value in self.values.items():
|
||||
loc = ",".join(["%s=%i" % (ax, loc) for ax, loc in location])
|
||||
items.append("%s:%i" % (loc, value))
|
||||
return "(" + (" ".join(items)) + ")"
|
||||
|
||||
@property
|
||||
def does_vary(self):
|
||||
values = list(self.values.values())
|
||||
return any(v != values[0] for v in values[1:])
|
||||
|
||||
@property
|
||||
def axes_dict(self):
|
||||
if not self.axes:
|
||||
raise ValueError(
|
||||
".axes must be defined on variable scalar before interpolating"
|
||||
)
|
||||
return {ax.axisTag: ax for ax in self.axes}
|
||||
|
||||
def _normalized_location(self, location):
|
||||
location = self.fix_location(location)
|
||||
normalized_location = {}
|
||||
for axtag in location.keys():
|
||||
if axtag not in self.axes_dict:
|
||||
raise ValueError("Unknown axis %s in %s" % (axtag, location))
|
||||
axis = self.axes_dict[axtag]
|
||||
normalized_location[axtag] = normalizeValue(
|
||||
location[axtag], (axis.minValue, axis.defaultValue, axis.maxValue)
|
||||
)
|
||||
|
||||
return Location(normalized_location)
|
||||
|
||||
def fix_location(self, location):
|
||||
location = dict(location)
|
||||
for tag, axis in self.axes_dict.items():
|
||||
if tag not in location:
|
||||
location[tag] = axis.defaultValue
|
||||
return location
|
||||
|
||||
def add_value(self, location, value):
|
||||
if self.axes:
|
||||
location = self.fix_location(location)
|
||||
|
||||
self.values[Location(location)] = value
|
||||
|
||||
def fix_all_locations(self):
|
||||
self.values = {
|
||||
Location(self.fix_location(l)): v for l, v in self.values.items()
|
||||
}
|
||||
|
||||
@property
|
||||
def default(self):
|
||||
self.fix_all_locations()
|
||||
key = Location({ax.axisTag: ax.defaultValue for ax in self.axes})
|
||||
if key not in self.values:
|
||||
raise ValueError("Default value could not be found")
|
||||
# I *guess* we could interpolate one, but I don't know how.
|
||||
return self.values[key]
|
||||
|
||||
def value_at_location(self, location, model_cache=None, avar=None):
|
||||
loc = Location(location)
|
||||
if loc in self.values.keys():
|
||||
return self.values[loc]
|
||||
values = list(self.values.values())
|
||||
loc = dict(self._normalized_location(loc))
|
||||
return self.model(model_cache, avar).interpolateFromMasters(loc, values)
|
||||
|
||||
def model(self, model_cache=None, avar=None):
|
||||
if model_cache is not None:
|
||||
key = tuple(self.values.keys())
|
||||
if key in model_cache:
|
||||
return model_cache[key]
|
||||
locations = [dict(self._normalized_location(k)) for k in self.values.keys()]
|
||||
if avar is not None:
|
||||
mapping = avar.segments
|
||||
locations = [
|
||||
{
|
||||
k: piecewiseLinearMap(v, mapping[k]) if k in mapping else v
|
||||
for k, v in location.items()
|
||||
}
|
||||
for location in locations
|
||||
]
|
||||
m = VariationModel(locations)
|
||||
if model_cache is not None:
|
||||
model_cache[key] = m
|
||||
return m
|
||||
|
||||
def get_deltas_and_supports(self, model_cache=None, avar=None):
|
||||
values = list(self.values.values())
|
||||
return self.model(model_cache, avar).getDeltasAndSupports(values)
|
||||
|
||||
def add_to_variation_store(self, store_builder, model_cache=None, avar=None):
|
||||
deltas, supports = self.get_deltas_and_supports(model_cache, avar)
|
||||
store_builder.setSupports(supports)
|
||||
index = store_builder.storeDeltas(deltas)
|
||||
return int(self.default), index
|
||||
1008
venv/lib/python3.12/site-packages/fontTools/fontBuilder.py
Normal file
1008
venv/lib/python3.12/site-packages/fontTools/fontBuilder.py
Normal file
File diff suppressed because it is too large
Load Diff
36
venv/lib/python3.12/site-packages/fontTools/help.py
Normal file
36
venv/lib/python3.12/site-packages/fontTools/help.py
Normal file
@ -0,0 +1,36 @@
|
||||
import pkgutil
|
||||
import sys
|
||||
import fontTools
|
||||
import importlib
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
def main():
|
||||
"""Show this help"""
|
||||
path = fontTools.__path__
|
||||
descriptions = {}
|
||||
for pkg in sorted(
|
||||
mod.name
|
||||
for mod in pkgutil.walk_packages([fontTools.__path__[0]], prefix="fontTools.")
|
||||
):
|
||||
try:
|
||||
imports = __import__(pkg, globals(), locals(), ["main"])
|
||||
except ImportError as e:
|
||||
continue
|
||||
try:
|
||||
description = imports.main.__doc__
|
||||
# Cython modules seem to return "main()" as the docstring
|
||||
if description and description != "main()":
|
||||
pkg = pkg.replace("fontTools.", "").replace(".__main__", "")
|
||||
# show the docstring's first line only
|
||||
descriptions[pkg] = description.splitlines()[0]
|
||||
except AttributeError as e:
|
||||
pass
|
||||
for pkg, description in descriptions.items():
|
||||
print("fonttools %-25s %s" % (pkg, description), file=sys.stderr)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("fonttools v%s\n" % fontTools.__version__, file=sys.stderr)
|
||||
main()
|
||||
248
venv/lib/python3.12/site-packages/fontTools/merge/__init__.py
Normal file
248
venv/lib/python3.12/site-packages/fontTools/merge/__init__.py
Normal file
@ -0,0 +1,248 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
|
||||
|
||||
from fontTools import ttLib
|
||||
import fontTools.merge.base
|
||||
from fontTools.merge.cmap import (
|
||||
computeMegaGlyphOrder,
|
||||
computeMegaCmap,
|
||||
renameCFFCharStrings,
|
||||
)
|
||||
from fontTools.merge.layout import layoutPreMerge, layoutPostMerge
|
||||
from fontTools.merge.options import Options
|
||||
import fontTools.merge.tables
|
||||
from fontTools.misc.loggingTools import Timer
|
||||
from functools import reduce
|
||||
import sys
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.merge")
|
||||
timer = Timer(logger=logging.getLogger(__name__ + ".timer"), level=logging.INFO)
|
||||
|
||||
|
||||
class Merger(object):
|
||||
"""Font merger.
|
||||
|
||||
This class merges multiple files into a single OpenType font, taking into
|
||||
account complexities such as OpenType layout (``GSUB``/``GPOS``) tables and
|
||||
cross-font metrics (for example ``hhea.ascent`` is set to the maximum value
|
||||
across all the fonts).
|
||||
|
||||
If multiple glyphs map to the same Unicode value, and the glyphs are considered
|
||||
sufficiently different (that is, they differ in any of paths, widths, or
|
||||
height), then subsequent glyphs are renamed and a lookup in the ``locl``
|
||||
feature will be created to disambiguate them. For example, if the arguments
|
||||
are an Arabic font and a Latin font and both contain a set of parentheses,
|
||||
the Latin glyphs will be renamed to ``parenleft.1`` and ``parenright.1``,
|
||||
and a lookup will be inserted into the to ``locl`` feature (creating it if
|
||||
necessary) under the ``latn`` script to substitute ``parenleft`` with
|
||||
``parenleft.1`` etc.
|
||||
|
||||
Restrictions:
|
||||
|
||||
- All fonts must have the same units per em.
|
||||
- If duplicate glyph disambiguation takes place as described above then the
|
||||
fonts must have a ``GSUB`` table.
|
||||
|
||||
Attributes:
|
||||
options: Currently unused.
|
||||
"""
|
||||
|
||||
def __init__(self, options=None):
|
||||
if not options:
|
||||
options = Options()
|
||||
|
||||
self.options = options
|
||||
|
||||
def _openFonts(self, fontfiles):
|
||||
fonts = [ttLib.TTFont(fontfile) for fontfile in fontfiles]
|
||||
for font, fontfile in zip(fonts, fontfiles):
|
||||
font._merger__fontfile = fontfile
|
||||
font._merger__name = font["name"].getDebugName(4)
|
||||
return fonts
|
||||
|
||||
def merge(self, fontfiles):
|
||||
"""Merges fonts together.
|
||||
|
||||
Args:
|
||||
fontfiles: A list of file names to be merged
|
||||
|
||||
Returns:
|
||||
A :class:`fontTools.ttLib.TTFont` object. Call the ``save`` method on
|
||||
this to write it out to an OTF file.
|
||||
"""
|
||||
#
|
||||
# Settle on a mega glyph order.
|
||||
#
|
||||
fonts = self._openFonts(fontfiles)
|
||||
glyphOrders = [list(font.getGlyphOrder()) for font in fonts]
|
||||
computeMegaGlyphOrder(self, glyphOrders)
|
||||
|
||||
# Take first input file sfntVersion
|
||||
sfntVersion = fonts[0].sfntVersion
|
||||
|
||||
# Reload fonts and set new glyph names on them.
|
||||
fonts = self._openFonts(fontfiles)
|
||||
for font, glyphOrder in zip(fonts, glyphOrders):
|
||||
font.setGlyphOrder(glyphOrder)
|
||||
if "CFF " in font:
|
||||
renameCFFCharStrings(self, glyphOrder, font["CFF "])
|
||||
|
||||
cmaps = [font["cmap"] for font in fonts]
|
||||
self.duplicateGlyphsPerFont = [{} for _ in fonts]
|
||||
computeMegaCmap(self, cmaps)
|
||||
|
||||
mega = ttLib.TTFont(sfntVersion=sfntVersion)
|
||||
mega.setGlyphOrder(self.glyphOrder)
|
||||
|
||||
for font in fonts:
|
||||
self._preMerge(font)
|
||||
|
||||
self.fonts = fonts
|
||||
|
||||
allTags = reduce(set.union, (list(font.keys()) for font in fonts), set())
|
||||
allTags.remove("GlyphOrder")
|
||||
|
||||
for tag in sorted(allTags):
|
||||
if tag in self.options.drop_tables:
|
||||
continue
|
||||
|
||||
with timer("merge '%s'" % tag):
|
||||
tables = [font.get(tag, NotImplemented) for font in fonts]
|
||||
|
||||
log.info("Merging '%s'.", tag)
|
||||
clazz = ttLib.getTableClass(tag)
|
||||
table = clazz(tag).merge(self, tables)
|
||||
# XXX Clean this up and use: table = mergeObjects(tables)
|
||||
|
||||
if table is not NotImplemented and table is not False:
|
||||
mega[tag] = table
|
||||
log.info("Merged '%s'.", tag)
|
||||
else:
|
||||
log.info("Dropped '%s'.", tag)
|
||||
|
||||
del self.duplicateGlyphsPerFont
|
||||
del self.fonts
|
||||
|
||||
self._postMerge(mega)
|
||||
|
||||
return mega
|
||||
|
||||
def mergeObjects(self, returnTable, logic, tables):
|
||||
# Right now we don't use self at all. Will use in the future
|
||||
# for options and logging.
|
||||
|
||||
allKeys = set.union(
|
||||
set(),
|
||||
*(vars(table).keys() for table in tables if table is not NotImplemented),
|
||||
)
|
||||
for key in allKeys:
|
||||
log.info(" %s", key)
|
||||
try:
|
||||
mergeLogic = logic[key]
|
||||
except KeyError:
|
||||
try:
|
||||
mergeLogic = logic["*"]
|
||||
except KeyError:
|
||||
raise Exception(
|
||||
"Don't know how to merge key %s of class %s"
|
||||
% (key, returnTable.__class__.__name__)
|
||||
)
|
||||
if mergeLogic is NotImplemented:
|
||||
continue
|
||||
value = mergeLogic(getattr(table, key, NotImplemented) for table in tables)
|
||||
if value is not NotImplemented:
|
||||
setattr(returnTable, key, value)
|
||||
|
||||
return returnTable
|
||||
|
||||
def _preMerge(self, font):
|
||||
layoutPreMerge(font)
|
||||
|
||||
def _postMerge(self, font):
|
||||
layoutPostMerge(font)
|
||||
|
||||
if "OS/2" in font:
|
||||
# https://github.com/fonttools/fonttools/issues/2538
|
||||
# TODO: Add an option to disable this?
|
||||
font["OS/2"].recalcAvgCharWidth(font)
|
||||
|
||||
|
||||
__all__ = ["Options", "Merger", "main"]
|
||||
|
||||
|
||||
@timer("make one with everything (TOTAL TIME)")
|
||||
def main(args=None):
|
||||
"""Merge multiple fonts into one"""
|
||||
from fontTools import configLogger
|
||||
|
||||
if args is None:
|
||||
args = sys.argv[1:]
|
||||
|
||||
options = Options()
|
||||
args = options.parse_opts(args)
|
||||
fontfiles = []
|
||||
if options.input_file:
|
||||
with open(options.input_file) as inputfile:
|
||||
fontfiles = [
|
||||
line.strip()
|
||||
for line in inputfile.readlines()
|
||||
if not line.lstrip().startswith("#")
|
||||
]
|
||||
for g in args:
|
||||
fontfiles.append(g)
|
||||
|
||||
if len(fontfiles) < 1:
|
||||
print(
|
||||
"usage: pyftmerge [font1 ... fontN] [--input-file=filelist.txt] [--output-file=merged.ttf] [--import-file=tables.ttx]",
|
||||
file=sys.stderr,
|
||||
)
|
||||
print(
|
||||
" [--drop-tables=tags] [--verbose] [--timing]",
|
||||
file=sys.stderr,
|
||||
)
|
||||
print("", file=sys.stderr)
|
||||
print(" font1 ... fontN Files to merge.", file=sys.stderr)
|
||||
print(
|
||||
" --input-file=<filename> Read files to merge from a text file, each path new line. # Comment lines allowed.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
print(
|
||||
" --output-file=<filename> Specify output file name (default: merged.ttf).",
|
||||
file=sys.stderr,
|
||||
)
|
||||
print(
|
||||
" --import-file=<filename> TTX file to import after merging. This can be used to set metadata.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
print(
|
||||
" --drop-tables=<table tags> Comma separated list of table tags to skip, case sensitive.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
print(
|
||||
" --verbose Output progress information.",
|
||||
file=sys.stderr,
|
||||
)
|
||||
print(" --timing Output progress timing.", file=sys.stderr)
|
||||
return 1
|
||||
|
||||
configLogger(level=logging.INFO if options.verbose else logging.WARNING)
|
||||
if options.timing:
|
||||
timer.logger.setLevel(logging.DEBUG)
|
||||
else:
|
||||
timer.logger.disabled = True
|
||||
|
||||
merger = Merger(options=options)
|
||||
font = merger.merge(fontfiles)
|
||||
|
||||
if options.import_file:
|
||||
font.importXML(options.import_file)
|
||||
|
||||
with timer("compile and save font"):
|
||||
font.save(options.output_file)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@ -0,0 +1,6 @@
|
||||
import sys
|
||||
from fontTools.merge import main
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
81
venv/lib/python3.12/site-packages/fontTools/merge/base.py
Normal file
81
venv/lib/python3.12/site-packages/fontTools/merge/base.py
Normal file
@ -0,0 +1,81 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
|
||||
|
||||
from fontTools.ttLib.tables.DefaultTable import DefaultTable
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.merge")
|
||||
|
||||
|
||||
def add_method(*clazzes, **kwargs):
|
||||
"""Returns a decorator function that adds a new method to one or
|
||||
more classes."""
|
||||
allowDefault = kwargs.get("allowDefaultTable", False)
|
||||
|
||||
def wrapper(method):
|
||||
done = []
|
||||
for clazz in clazzes:
|
||||
if clazz in done:
|
||||
continue # Support multiple names of a clazz
|
||||
done.append(clazz)
|
||||
assert allowDefault or clazz != DefaultTable, "Oops, table class not found."
|
||||
assert (
|
||||
method.__name__ not in clazz.__dict__
|
||||
), "Oops, class '%s' has method '%s'." % (clazz.__name__, method.__name__)
|
||||
setattr(clazz, method.__name__, method)
|
||||
return None
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def mergeObjects(lst):
|
||||
lst = [item for item in lst if item is not NotImplemented]
|
||||
if not lst:
|
||||
return NotImplemented
|
||||
lst = [item for item in lst if item is not None]
|
||||
if not lst:
|
||||
return None
|
||||
|
||||
clazz = lst[0].__class__
|
||||
assert all(type(item) == clazz for item in lst), lst
|
||||
|
||||
logic = clazz.mergeMap
|
||||
returnTable = clazz()
|
||||
returnDict = {}
|
||||
|
||||
allKeys = set.union(set(), *(vars(table).keys() for table in lst))
|
||||
for key in allKeys:
|
||||
try:
|
||||
mergeLogic = logic[key]
|
||||
except KeyError:
|
||||
try:
|
||||
mergeLogic = logic["*"]
|
||||
except KeyError:
|
||||
raise Exception(
|
||||
"Don't know how to merge key %s of class %s" % (key, clazz.__name__)
|
||||
)
|
||||
if mergeLogic is NotImplemented:
|
||||
continue
|
||||
value = mergeLogic(getattr(table, key, NotImplemented) for table in lst)
|
||||
if value is not NotImplemented:
|
||||
returnDict[key] = value
|
||||
|
||||
returnTable.__dict__ = returnDict
|
||||
|
||||
return returnTable
|
||||
|
||||
|
||||
@add_method(DefaultTable, allowDefaultTable=True)
|
||||
def merge(self, m, tables):
|
||||
if not hasattr(self, "mergeMap"):
|
||||
log.info("Don't know how to merge '%s'.", self.tableTag)
|
||||
return NotImplemented
|
||||
|
||||
logic = self.mergeMap
|
||||
|
||||
if isinstance(logic, dict):
|
||||
return m.mergeObjects(self, self.mergeMap, tables)
|
||||
else:
|
||||
return logic(tables)
|
||||
141
venv/lib/python3.12/site-packages/fontTools/merge/cmap.py
Normal file
141
venv/lib/python3.12/site-packages/fontTools/merge/cmap.py
Normal file
@ -0,0 +1,141 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
|
||||
|
||||
from fontTools.merge.unicode import is_Default_Ignorable
|
||||
from fontTools.pens.recordingPen import DecomposingRecordingPen
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.merge")
|
||||
|
||||
|
||||
def computeMegaGlyphOrder(merger, glyphOrders):
|
||||
"""Modifies passed-in glyphOrders to reflect new glyph names.
|
||||
Stores merger.glyphOrder."""
|
||||
megaOrder = {}
|
||||
for glyphOrder in glyphOrders:
|
||||
for i, glyphName in enumerate(glyphOrder):
|
||||
if glyphName in megaOrder:
|
||||
n = megaOrder[glyphName]
|
||||
while (glyphName + "." + repr(n)) in megaOrder:
|
||||
n += 1
|
||||
megaOrder[glyphName] = n
|
||||
glyphName += "." + repr(n)
|
||||
glyphOrder[i] = glyphName
|
||||
megaOrder[glyphName] = 1
|
||||
merger.glyphOrder = megaOrder = list(megaOrder.keys())
|
||||
|
||||
|
||||
def _glyphsAreSame(
|
||||
glyphSet1,
|
||||
glyphSet2,
|
||||
glyph1,
|
||||
glyph2,
|
||||
advanceTolerance=0.05,
|
||||
advanceToleranceEmpty=0.20,
|
||||
):
|
||||
pen1 = DecomposingRecordingPen(glyphSet1)
|
||||
pen2 = DecomposingRecordingPen(glyphSet2)
|
||||
g1 = glyphSet1[glyph1]
|
||||
g2 = glyphSet2[glyph2]
|
||||
g1.draw(pen1)
|
||||
g2.draw(pen2)
|
||||
if pen1.value != pen2.value:
|
||||
return False
|
||||
# Allow more width tolerance for glyphs with no ink
|
||||
tolerance = advanceTolerance if pen1.value else advanceToleranceEmpty
|
||||
# TODO Warn if advances not the same but within tolerance.
|
||||
if abs(g1.width - g2.width) > g1.width * tolerance:
|
||||
return False
|
||||
if hasattr(g1, "height") and g1.height is not None:
|
||||
if abs(g1.height - g2.height) > g1.height * tolerance:
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
# Valid (format, platformID, platEncID) triplets for cmap subtables containing
|
||||
# Unicode BMP-only and Unicode Full Repertoire semantics.
|
||||
# Cf. OpenType spec for "Platform specific encodings":
|
||||
# https://docs.microsoft.com/en-us/typography/opentype/spec/name
|
||||
class _CmapUnicodePlatEncodings:
|
||||
BMP = {(4, 3, 1), (4, 0, 3), (4, 0, 4), (4, 0, 6)}
|
||||
FullRepertoire = {(12, 3, 10), (12, 0, 4), (12, 0, 6)}
|
||||
|
||||
|
||||
def computeMegaCmap(merger, cmapTables):
|
||||
"""Sets merger.cmap and merger.glyphOrder."""
|
||||
|
||||
# TODO Handle format=14.
|
||||
# Only merge format 4 and 12 Unicode subtables, ignores all other subtables
|
||||
# If there is a format 12 table for a font, ignore the format 4 table of it
|
||||
chosenCmapTables = []
|
||||
for fontIdx, table in enumerate(cmapTables):
|
||||
format4 = None
|
||||
format12 = None
|
||||
for subtable in table.tables:
|
||||
properties = (subtable.format, subtable.platformID, subtable.platEncID)
|
||||
if properties in _CmapUnicodePlatEncodings.BMP:
|
||||
format4 = subtable
|
||||
elif properties in _CmapUnicodePlatEncodings.FullRepertoire:
|
||||
format12 = subtable
|
||||
else:
|
||||
log.warning(
|
||||
"Dropped cmap subtable from font '%s':\t"
|
||||
"format %2s, platformID %2s, platEncID %2s",
|
||||
fontIdx,
|
||||
subtable.format,
|
||||
subtable.platformID,
|
||||
subtable.platEncID,
|
||||
)
|
||||
if format12 is not None:
|
||||
chosenCmapTables.append((format12, fontIdx))
|
||||
elif format4 is not None:
|
||||
chosenCmapTables.append((format4, fontIdx))
|
||||
|
||||
# Build the unicode mapping
|
||||
merger.cmap = cmap = {}
|
||||
fontIndexForGlyph = {}
|
||||
glyphSets = [None for f in merger.fonts] if hasattr(merger, "fonts") else None
|
||||
|
||||
for table, fontIdx in chosenCmapTables:
|
||||
# handle duplicates
|
||||
for uni, gid in table.cmap.items():
|
||||
oldgid = cmap.get(uni, None)
|
||||
if oldgid is None:
|
||||
cmap[uni] = gid
|
||||
fontIndexForGlyph[gid] = fontIdx
|
||||
elif is_Default_Ignorable(uni) or uni in (0x25CC,): # U+25CC DOTTED CIRCLE
|
||||
continue
|
||||
elif oldgid != gid:
|
||||
# Char previously mapped to oldgid, now to gid.
|
||||
# Record, to fix up in GSUB 'locl' later.
|
||||
if merger.duplicateGlyphsPerFont[fontIdx].get(oldgid) is None:
|
||||
if glyphSets is not None:
|
||||
oldFontIdx = fontIndexForGlyph[oldgid]
|
||||
for idx in (fontIdx, oldFontIdx):
|
||||
if glyphSets[idx] is None:
|
||||
glyphSets[idx] = merger.fonts[idx].getGlyphSet()
|
||||
# if _glyphsAreSame(glyphSets[oldFontIdx], glyphSets[fontIdx], oldgid, gid):
|
||||
# continue
|
||||
merger.duplicateGlyphsPerFont[fontIdx][oldgid] = gid
|
||||
elif merger.duplicateGlyphsPerFont[fontIdx][oldgid] != gid:
|
||||
# Char previously mapped to oldgid but oldgid is already remapped to a different
|
||||
# gid, because of another Unicode character.
|
||||
# TODO: Try harder to do something about these.
|
||||
log.warning(
|
||||
"Dropped mapping from codepoint %#06X to glyphId '%s'", uni, gid
|
||||
)
|
||||
|
||||
|
||||
def renameCFFCharStrings(merger, glyphOrder, cffTable):
|
||||
"""Rename topDictIndex charStrings based on glyphOrder."""
|
||||
td = cffTable.cff.topDictIndex[0]
|
||||
|
||||
charStrings = {}
|
||||
for i, v in enumerate(td.CharStrings.charStrings.values()):
|
||||
glyphName = glyphOrder[i]
|
||||
charStrings[glyphName] = v
|
||||
td.CharStrings.charStrings = charStrings
|
||||
|
||||
td.charset = list(glyphOrder)
|
||||
526
venv/lib/python3.12/site-packages/fontTools/merge/layout.py
Normal file
526
venv/lib/python3.12/site-packages/fontTools/merge/layout.py
Normal file
@ -0,0 +1,526 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
|
||||
|
||||
from fontTools import ttLib
|
||||
from fontTools.ttLib.tables.DefaultTable import DefaultTable
|
||||
from fontTools.ttLib.tables import otTables
|
||||
from fontTools.merge.base import add_method, mergeObjects
|
||||
from fontTools.merge.util import *
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.merge")
|
||||
|
||||
|
||||
def mergeLookupLists(lst):
|
||||
# TODO Do smarter merge.
|
||||
return sumLists(lst)
|
||||
|
||||
|
||||
def mergeFeatures(lst):
|
||||
assert lst
|
||||
self = otTables.Feature()
|
||||
self.FeatureParams = None
|
||||
self.LookupListIndex = mergeLookupLists(
|
||||
[l.LookupListIndex for l in lst if l.LookupListIndex]
|
||||
)
|
||||
self.LookupCount = len(self.LookupListIndex)
|
||||
return self
|
||||
|
||||
|
||||
def mergeFeatureLists(lst):
|
||||
d = {}
|
||||
for l in lst:
|
||||
for f in l:
|
||||
tag = f.FeatureTag
|
||||
if tag not in d:
|
||||
d[tag] = []
|
||||
d[tag].append(f.Feature)
|
||||
ret = []
|
||||
for tag in sorted(d.keys()):
|
||||
rec = otTables.FeatureRecord()
|
||||
rec.FeatureTag = tag
|
||||
rec.Feature = mergeFeatures(d[tag])
|
||||
ret.append(rec)
|
||||
return ret
|
||||
|
||||
|
||||
def mergeLangSyses(lst):
|
||||
assert lst
|
||||
|
||||
# TODO Support merging ReqFeatureIndex
|
||||
assert all(l.ReqFeatureIndex == 0xFFFF for l in lst)
|
||||
|
||||
self = otTables.LangSys()
|
||||
self.LookupOrder = None
|
||||
self.ReqFeatureIndex = 0xFFFF
|
||||
self.FeatureIndex = mergeFeatureLists(
|
||||
[l.FeatureIndex for l in lst if l.FeatureIndex]
|
||||
)
|
||||
self.FeatureCount = len(self.FeatureIndex)
|
||||
return self
|
||||
|
||||
|
||||
def mergeScripts(lst):
|
||||
assert lst
|
||||
|
||||
if len(lst) == 1:
|
||||
return lst[0]
|
||||
langSyses = {}
|
||||
for sr in lst:
|
||||
for lsr in sr.LangSysRecord:
|
||||
if lsr.LangSysTag not in langSyses:
|
||||
langSyses[lsr.LangSysTag] = []
|
||||
langSyses[lsr.LangSysTag].append(lsr.LangSys)
|
||||
lsrecords = []
|
||||
for tag, langSys_list in sorted(langSyses.items()):
|
||||
lsr = otTables.LangSysRecord()
|
||||
lsr.LangSys = mergeLangSyses(langSys_list)
|
||||
lsr.LangSysTag = tag
|
||||
lsrecords.append(lsr)
|
||||
|
||||
self = otTables.Script()
|
||||
self.LangSysRecord = lsrecords
|
||||
self.LangSysCount = len(lsrecords)
|
||||
dfltLangSyses = [s.DefaultLangSys for s in lst if s.DefaultLangSys]
|
||||
if dfltLangSyses:
|
||||
self.DefaultLangSys = mergeLangSyses(dfltLangSyses)
|
||||
else:
|
||||
self.DefaultLangSys = None
|
||||
return self
|
||||
|
||||
|
||||
def mergeScriptRecords(lst):
|
||||
d = {}
|
||||
for l in lst:
|
||||
for s in l:
|
||||
tag = s.ScriptTag
|
||||
if tag not in d:
|
||||
d[tag] = []
|
||||
d[tag].append(s.Script)
|
||||
ret = []
|
||||
for tag in sorted(d.keys()):
|
||||
rec = otTables.ScriptRecord()
|
||||
rec.ScriptTag = tag
|
||||
rec.Script = mergeScripts(d[tag])
|
||||
ret.append(rec)
|
||||
return ret
|
||||
|
||||
|
||||
otTables.ScriptList.mergeMap = {
|
||||
"ScriptCount": lambda lst: None, # TODO
|
||||
"ScriptRecord": mergeScriptRecords,
|
||||
}
|
||||
otTables.BaseScriptList.mergeMap = {
|
||||
"BaseScriptCount": lambda lst: None, # TODO
|
||||
# TODO: Merge duplicate entries
|
||||
"BaseScriptRecord": lambda lst: sorted(
|
||||
sumLists(lst), key=lambda s: s.BaseScriptTag
|
||||
),
|
||||
}
|
||||
|
||||
otTables.FeatureList.mergeMap = {
|
||||
"FeatureCount": sum,
|
||||
"FeatureRecord": lambda lst: sorted(sumLists(lst), key=lambda s: s.FeatureTag),
|
||||
}
|
||||
|
||||
otTables.LookupList.mergeMap = {
|
||||
"LookupCount": sum,
|
||||
"Lookup": sumLists,
|
||||
}
|
||||
|
||||
otTables.Coverage.mergeMap = {
|
||||
"Format": min,
|
||||
"glyphs": sumLists,
|
||||
}
|
||||
|
||||
otTables.ClassDef.mergeMap = {
|
||||
"Format": min,
|
||||
"classDefs": sumDicts,
|
||||
}
|
||||
|
||||
otTables.LigCaretList.mergeMap = {
|
||||
"Coverage": mergeObjects,
|
||||
"LigGlyphCount": sum,
|
||||
"LigGlyph": sumLists,
|
||||
}
|
||||
|
||||
otTables.AttachList.mergeMap = {
|
||||
"Coverage": mergeObjects,
|
||||
"GlyphCount": sum,
|
||||
"AttachPoint": sumLists,
|
||||
}
|
||||
|
||||
# XXX Renumber MarkFilterSets of lookups
|
||||
otTables.MarkGlyphSetsDef.mergeMap = {
|
||||
"MarkSetTableFormat": equal,
|
||||
"MarkSetCount": sum,
|
||||
"Coverage": sumLists,
|
||||
}
|
||||
|
||||
otTables.Axis.mergeMap = {
|
||||
"*": mergeObjects,
|
||||
}
|
||||
|
||||
# XXX Fix BASE table merging
|
||||
otTables.BaseTagList.mergeMap = {
|
||||
"BaseTagCount": sum,
|
||||
"BaselineTag": sumLists,
|
||||
}
|
||||
|
||||
otTables.GDEF.mergeMap = otTables.GSUB.mergeMap = otTables.GPOS.mergeMap = (
|
||||
otTables.BASE.mergeMap
|
||||
) = otTables.JSTF.mergeMap = otTables.MATH.mergeMap = {
|
||||
"*": mergeObjects,
|
||||
"Version": max,
|
||||
}
|
||||
|
||||
ttLib.getTableClass("GDEF").mergeMap = ttLib.getTableClass("GSUB").mergeMap = (
|
||||
ttLib.getTableClass("GPOS").mergeMap
|
||||
) = ttLib.getTableClass("BASE").mergeMap = ttLib.getTableClass(
|
||||
"JSTF"
|
||||
).mergeMap = ttLib.getTableClass(
|
||||
"MATH"
|
||||
).mergeMap = {
|
||||
"tableTag": onlyExisting(equal), # XXX clean me up
|
||||
"table": mergeObjects,
|
||||
}
|
||||
|
||||
|
||||
@add_method(ttLib.getTableClass("GSUB"))
|
||||
def merge(self, m, tables):
|
||||
assert len(tables) == len(m.duplicateGlyphsPerFont)
|
||||
for i, (table, dups) in enumerate(zip(tables, m.duplicateGlyphsPerFont)):
|
||||
if not dups:
|
||||
continue
|
||||
if table is None or table is NotImplemented:
|
||||
log.warning(
|
||||
"Have non-identical duplicates to resolve for '%s' but no GSUB. Are duplicates intended?: %s",
|
||||
m.fonts[i]._merger__name,
|
||||
dups,
|
||||
)
|
||||
continue
|
||||
|
||||
synthFeature = None
|
||||
synthLookup = None
|
||||
for script in table.table.ScriptList.ScriptRecord:
|
||||
if script.ScriptTag == "DFLT":
|
||||
continue # XXX
|
||||
for langsys in [script.Script.DefaultLangSys] + [
|
||||
l.LangSys for l in script.Script.LangSysRecord
|
||||
]:
|
||||
if langsys is None:
|
||||
continue # XXX Create!
|
||||
feature = [v for v in langsys.FeatureIndex if v.FeatureTag == "locl"]
|
||||
assert len(feature) <= 1
|
||||
if feature:
|
||||
feature = feature[0]
|
||||
else:
|
||||
if not synthFeature:
|
||||
synthFeature = otTables.FeatureRecord()
|
||||
synthFeature.FeatureTag = "locl"
|
||||
f = synthFeature.Feature = otTables.Feature()
|
||||
f.FeatureParams = None
|
||||
f.LookupCount = 0
|
||||
f.LookupListIndex = []
|
||||
table.table.FeatureList.FeatureRecord.append(synthFeature)
|
||||
table.table.FeatureList.FeatureCount += 1
|
||||
feature = synthFeature
|
||||
langsys.FeatureIndex.append(feature)
|
||||
langsys.FeatureIndex.sort(key=lambda v: v.FeatureTag)
|
||||
|
||||
if not synthLookup:
|
||||
subtable = otTables.SingleSubst()
|
||||
subtable.mapping = dups
|
||||
synthLookup = otTables.Lookup()
|
||||
synthLookup.LookupFlag = 0
|
||||
synthLookup.LookupType = 1
|
||||
synthLookup.SubTableCount = 1
|
||||
synthLookup.SubTable = [subtable]
|
||||
if table.table.LookupList is None:
|
||||
# mtiLib uses None as default value for LookupList,
|
||||
# while feaLib points to an empty array with count 0
|
||||
# TODO: make them do the same
|
||||
table.table.LookupList = otTables.LookupList()
|
||||
table.table.LookupList.Lookup = []
|
||||
table.table.LookupList.LookupCount = 0
|
||||
table.table.LookupList.Lookup.append(synthLookup)
|
||||
table.table.LookupList.LookupCount += 1
|
||||
|
||||
if feature.Feature.LookupListIndex[:1] != [synthLookup]:
|
||||
feature.Feature.LookupListIndex[:0] = [synthLookup]
|
||||
feature.Feature.LookupCount += 1
|
||||
|
||||
DefaultTable.merge(self, m, tables)
|
||||
return self
|
||||
|
||||
|
||||
@add_method(
|
||||
otTables.SingleSubst,
|
||||
otTables.MultipleSubst,
|
||||
otTables.AlternateSubst,
|
||||
otTables.LigatureSubst,
|
||||
otTables.ReverseChainSingleSubst,
|
||||
otTables.SinglePos,
|
||||
otTables.PairPos,
|
||||
otTables.CursivePos,
|
||||
otTables.MarkBasePos,
|
||||
otTables.MarkLigPos,
|
||||
otTables.MarkMarkPos,
|
||||
)
|
||||
def mapLookups(self, lookupMap):
|
||||
pass
|
||||
|
||||
|
||||
# Copied and trimmed down from subset.py
|
||||
@add_method(
|
||||
otTables.ContextSubst,
|
||||
otTables.ChainContextSubst,
|
||||
otTables.ContextPos,
|
||||
otTables.ChainContextPos,
|
||||
)
|
||||
def __merge_classify_context(self):
|
||||
class ContextHelper(object):
|
||||
def __init__(self, klass, Format):
|
||||
if klass.__name__.endswith("Subst"):
|
||||
Typ = "Sub"
|
||||
Type = "Subst"
|
||||
else:
|
||||
Typ = "Pos"
|
||||
Type = "Pos"
|
||||
if klass.__name__.startswith("Chain"):
|
||||
Chain = "Chain"
|
||||
else:
|
||||
Chain = ""
|
||||
ChainTyp = Chain + Typ
|
||||
|
||||
self.Typ = Typ
|
||||
self.Type = Type
|
||||
self.Chain = Chain
|
||||
self.ChainTyp = ChainTyp
|
||||
|
||||
self.LookupRecord = Type + "LookupRecord"
|
||||
|
||||
if Format == 1:
|
||||
self.Rule = ChainTyp + "Rule"
|
||||
self.RuleSet = ChainTyp + "RuleSet"
|
||||
elif Format == 2:
|
||||
self.Rule = ChainTyp + "ClassRule"
|
||||
self.RuleSet = ChainTyp + "ClassSet"
|
||||
|
||||
if self.Format not in [1, 2, 3]:
|
||||
return None # Don't shoot the messenger; let it go
|
||||
if not hasattr(self.__class__, "_merge__ContextHelpers"):
|
||||
self.__class__._merge__ContextHelpers = {}
|
||||
if self.Format not in self.__class__._merge__ContextHelpers:
|
||||
helper = ContextHelper(self.__class__, self.Format)
|
||||
self.__class__._merge__ContextHelpers[self.Format] = helper
|
||||
return self.__class__._merge__ContextHelpers[self.Format]
|
||||
|
||||
|
||||
@add_method(
|
||||
otTables.ContextSubst,
|
||||
otTables.ChainContextSubst,
|
||||
otTables.ContextPos,
|
||||
otTables.ChainContextPos,
|
||||
)
|
||||
def mapLookups(self, lookupMap):
|
||||
c = self.__merge_classify_context()
|
||||
|
||||
if self.Format in [1, 2]:
|
||||
for rs in getattr(self, c.RuleSet):
|
||||
if not rs:
|
||||
continue
|
||||
for r in getattr(rs, c.Rule):
|
||||
if not r:
|
||||
continue
|
||||
for ll in getattr(r, c.LookupRecord):
|
||||
if not ll:
|
||||
continue
|
||||
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
|
||||
elif self.Format == 3:
|
||||
for ll in getattr(self, c.LookupRecord):
|
||||
if not ll:
|
||||
continue
|
||||
ll.LookupListIndex = lookupMap[ll.LookupListIndex]
|
||||
else:
|
||||
assert 0, "unknown format: %s" % self.Format
|
||||
|
||||
|
||||
@add_method(otTables.ExtensionSubst, otTables.ExtensionPos)
|
||||
def mapLookups(self, lookupMap):
|
||||
if self.Format == 1:
|
||||
self.ExtSubTable.mapLookups(lookupMap)
|
||||
else:
|
||||
assert 0, "unknown format: %s" % self.Format
|
||||
|
||||
|
||||
@add_method(otTables.Lookup)
|
||||
def mapLookups(self, lookupMap):
|
||||
for st in self.SubTable:
|
||||
if not st:
|
||||
continue
|
||||
st.mapLookups(lookupMap)
|
||||
|
||||
|
||||
@add_method(otTables.LookupList)
|
||||
def mapLookups(self, lookupMap):
|
||||
for l in self.Lookup:
|
||||
if not l:
|
||||
continue
|
||||
l.mapLookups(lookupMap)
|
||||
|
||||
|
||||
@add_method(otTables.Lookup)
|
||||
def mapMarkFilteringSets(self, markFilteringSetMap):
|
||||
if self.LookupFlag & 0x0010:
|
||||
self.MarkFilteringSet = markFilteringSetMap[self.MarkFilteringSet]
|
||||
|
||||
|
||||
@add_method(otTables.LookupList)
|
||||
def mapMarkFilteringSets(self, markFilteringSetMap):
|
||||
for l in self.Lookup:
|
||||
if not l:
|
||||
continue
|
||||
l.mapMarkFilteringSets(markFilteringSetMap)
|
||||
|
||||
|
||||
@add_method(otTables.Feature)
|
||||
def mapLookups(self, lookupMap):
|
||||
self.LookupListIndex = [lookupMap[i] for i in self.LookupListIndex]
|
||||
|
||||
|
||||
@add_method(otTables.FeatureList)
|
||||
def mapLookups(self, lookupMap):
|
||||
for f in self.FeatureRecord:
|
||||
if not f or not f.Feature:
|
||||
continue
|
||||
f.Feature.mapLookups(lookupMap)
|
||||
|
||||
|
||||
@add_method(otTables.DefaultLangSys, otTables.LangSys)
|
||||
def mapFeatures(self, featureMap):
|
||||
self.FeatureIndex = [featureMap[i] for i in self.FeatureIndex]
|
||||
if self.ReqFeatureIndex != 65535:
|
||||
self.ReqFeatureIndex = featureMap[self.ReqFeatureIndex]
|
||||
|
||||
|
||||
@add_method(otTables.Script)
|
||||
def mapFeatures(self, featureMap):
|
||||
if self.DefaultLangSys:
|
||||
self.DefaultLangSys.mapFeatures(featureMap)
|
||||
for l in self.LangSysRecord:
|
||||
if not l or not l.LangSys:
|
||||
continue
|
||||
l.LangSys.mapFeatures(featureMap)
|
||||
|
||||
|
||||
@add_method(otTables.ScriptList)
|
||||
def mapFeatures(self, featureMap):
|
||||
for s in self.ScriptRecord:
|
||||
if not s or not s.Script:
|
||||
continue
|
||||
s.Script.mapFeatures(featureMap)
|
||||
|
||||
|
||||
def layoutPreMerge(font):
|
||||
# Map indices to references
|
||||
|
||||
GDEF = font.get("GDEF")
|
||||
GSUB = font.get("GSUB")
|
||||
GPOS = font.get("GPOS")
|
||||
|
||||
for t in [GSUB, GPOS]:
|
||||
if not t:
|
||||
continue
|
||||
|
||||
if t.table.LookupList:
|
||||
lookupMap = {i: v for i, v in enumerate(t.table.LookupList.Lookup)}
|
||||
t.table.LookupList.mapLookups(lookupMap)
|
||||
t.table.FeatureList.mapLookups(lookupMap)
|
||||
|
||||
if (
|
||||
GDEF
|
||||
and GDEF.table.Version >= 0x00010002
|
||||
and GDEF.table.MarkGlyphSetsDef
|
||||
):
|
||||
markFilteringSetMap = {
|
||||
i: v for i, v in enumerate(GDEF.table.MarkGlyphSetsDef.Coverage)
|
||||
}
|
||||
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
|
||||
|
||||
if t.table.FeatureList and t.table.ScriptList:
|
||||
featureMap = {i: v for i, v in enumerate(t.table.FeatureList.FeatureRecord)}
|
||||
t.table.ScriptList.mapFeatures(featureMap)
|
||||
|
||||
# TODO FeatureParams nameIDs
|
||||
|
||||
|
||||
def layoutPostMerge(font):
|
||||
# Map references back to indices
|
||||
|
||||
GDEF = font.get("GDEF")
|
||||
GSUB = font.get("GSUB")
|
||||
GPOS = font.get("GPOS")
|
||||
|
||||
for t in [GSUB, GPOS]:
|
||||
if not t:
|
||||
continue
|
||||
|
||||
if t.table.FeatureList and t.table.ScriptList:
|
||||
# Collect unregistered (new) features.
|
||||
featureMap = GregariousIdentityDict(t.table.FeatureList.FeatureRecord)
|
||||
t.table.ScriptList.mapFeatures(featureMap)
|
||||
|
||||
# Record used features.
|
||||
featureMap = AttendanceRecordingIdentityDict(
|
||||
t.table.FeatureList.FeatureRecord
|
||||
)
|
||||
t.table.ScriptList.mapFeatures(featureMap)
|
||||
usedIndices = featureMap.s
|
||||
|
||||
# Remove unused features
|
||||
t.table.FeatureList.FeatureRecord = [
|
||||
f
|
||||
for i, f in enumerate(t.table.FeatureList.FeatureRecord)
|
||||
if i in usedIndices
|
||||
]
|
||||
|
||||
# Map back to indices.
|
||||
featureMap = NonhashableDict(t.table.FeatureList.FeatureRecord)
|
||||
t.table.ScriptList.mapFeatures(featureMap)
|
||||
|
||||
t.table.FeatureList.FeatureCount = len(t.table.FeatureList.FeatureRecord)
|
||||
|
||||
if t.table.LookupList:
|
||||
# Collect unregistered (new) lookups.
|
||||
lookupMap = GregariousIdentityDict(t.table.LookupList.Lookup)
|
||||
t.table.FeatureList.mapLookups(lookupMap)
|
||||
t.table.LookupList.mapLookups(lookupMap)
|
||||
|
||||
# Record used lookups.
|
||||
lookupMap = AttendanceRecordingIdentityDict(t.table.LookupList.Lookup)
|
||||
t.table.FeatureList.mapLookups(lookupMap)
|
||||
t.table.LookupList.mapLookups(lookupMap)
|
||||
usedIndices = lookupMap.s
|
||||
|
||||
# Remove unused lookups
|
||||
t.table.LookupList.Lookup = [
|
||||
l for i, l in enumerate(t.table.LookupList.Lookup) if i in usedIndices
|
||||
]
|
||||
|
||||
# Map back to indices.
|
||||
lookupMap = NonhashableDict(t.table.LookupList.Lookup)
|
||||
t.table.FeatureList.mapLookups(lookupMap)
|
||||
t.table.LookupList.mapLookups(lookupMap)
|
||||
|
||||
t.table.LookupList.LookupCount = len(t.table.LookupList.Lookup)
|
||||
|
||||
if GDEF and GDEF.table.Version >= 0x00010002:
|
||||
markFilteringSetMap = NonhashableDict(
|
||||
GDEF.table.MarkGlyphSetsDef.Coverage
|
||||
)
|
||||
t.table.LookupList.mapMarkFilteringSets(markFilteringSetMap)
|
||||
|
||||
# TODO FeatureParams nameIDs
|
||||
85
venv/lib/python3.12/site-packages/fontTools/merge/options.py
Normal file
85
venv/lib/python3.12/site-packages/fontTools/merge/options.py
Normal file
@ -0,0 +1,85 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
|
||||
|
||||
|
||||
class Options(object):
|
||||
class UnknownOptionError(Exception):
|
||||
pass
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.verbose = False
|
||||
self.timing = False
|
||||
self.drop_tables = []
|
||||
self.input_file = None
|
||||
self.output_file = "merged.ttf"
|
||||
self.import_file = None
|
||||
|
||||
self.set(**kwargs)
|
||||
|
||||
def set(self, **kwargs):
|
||||
for k, v in kwargs.items():
|
||||
if not hasattr(self, k):
|
||||
raise self.UnknownOptionError("Unknown option '%s'" % k)
|
||||
setattr(self, k, v)
|
||||
|
||||
def parse_opts(self, argv, ignore_unknown=[]):
|
||||
ret = []
|
||||
opts = {}
|
||||
for a in argv:
|
||||
orig_a = a
|
||||
if not a.startswith("--"):
|
||||
ret.append(a)
|
||||
continue
|
||||
a = a[2:]
|
||||
i = a.find("=")
|
||||
op = "="
|
||||
if i == -1:
|
||||
if a.startswith("no-"):
|
||||
k = a[3:]
|
||||
v = False
|
||||
else:
|
||||
k = a
|
||||
v = True
|
||||
else:
|
||||
k = a[:i]
|
||||
if k[-1] in "-+":
|
||||
op = k[-1] + "=" # Ops is '-=' or '+=' now.
|
||||
k = k[:-1]
|
||||
v = a[i + 1 :]
|
||||
ok = k
|
||||
k = k.replace("-", "_")
|
||||
if not hasattr(self, k):
|
||||
if ignore_unknown is True or ok in ignore_unknown:
|
||||
ret.append(orig_a)
|
||||
continue
|
||||
else:
|
||||
raise self.UnknownOptionError("Unknown option '%s'" % a)
|
||||
|
||||
ov = getattr(self, k)
|
||||
if isinstance(ov, bool):
|
||||
v = bool(v)
|
||||
elif isinstance(ov, int):
|
||||
v = int(v)
|
||||
elif isinstance(ov, list):
|
||||
vv = v.split(",")
|
||||
if vv == [""]:
|
||||
vv = []
|
||||
vv = [int(x, 0) if len(x) and x[0] in "0123456789" else x for x in vv]
|
||||
if op == "=":
|
||||
v = vv
|
||||
elif op == "+=":
|
||||
v = ov
|
||||
v.extend(vv)
|
||||
elif op == "-=":
|
||||
v = ov
|
||||
for x in vv:
|
||||
if x in v:
|
||||
v.remove(x)
|
||||
else:
|
||||
assert 0
|
||||
|
||||
opts[k] = v
|
||||
self.set(**opts)
|
||||
|
||||
return ret
|
||||
341
venv/lib/python3.12/site-packages/fontTools/merge/tables.py
Normal file
341
venv/lib/python3.12/site-packages/fontTools/merge/tables.py
Normal file
@ -0,0 +1,341 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
|
||||
|
||||
from fontTools import ttLib, cffLib
|
||||
from fontTools.misc.psCharStrings import T2WidthExtractor
|
||||
from fontTools.ttLib.tables.DefaultTable import DefaultTable
|
||||
from fontTools.merge.base import add_method, mergeObjects
|
||||
from fontTools.merge.cmap import computeMegaCmap
|
||||
from fontTools.merge.util import *
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.merge")
|
||||
|
||||
|
||||
ttLib.getTableClass("maxp").mergeMap = {
|
||||
"*": max,
|
||||
"tableTag": equal,
|
||||
"tableVersion": equal,
|
||||
"numGlyphs": sum,
|
||||
"maxStorage": first,
|
||||
"maxFunctionDefs": first,
|
||||
"maxInstructionDefs": first,
|
||||
# TODO When we correctly merge hinting data, update these values:
|
||||
# maxFunctionDefs, maxInstructionDefs, maxSizeOfInstructions
|
||||
}
|
||||
|
||||
headFlagsMergeBitMap = {
|
||||
"size": 16,
|
||||
"*": bitwise_or,
|
||||
1: bitwise_and, # Baseline at y = 0
|
||||
2: bitwise_and, # lsb at x = 0
|
||||
3: bitwise_and, # Force ppem to integer values. FIXME?
|
||||
5: bitwise_and, # Font is vertical
|
||||
6: lambda bit: 0, # Always set to zero
|
||||
11: bitwise_and, # Font data is 'lossless'
|
||||
13: bitwise_and, # Optimized for ClearType
|
||||
14: bitwise_and, # Last resort font. FIXME? equal or first may be better
|
||||
15: lambda bit: 0, # Always set to zero
|
||||
}
|
||||
|
||||
ttLib.getTableClass("head").mergeMap = {
|
||||
"tableTag": equal,
|
||||
"tableVersion": max,
|
||||
"fontRevision": max,
|
||||
"checkSumAdjustment": lambda lst: 0, # We need *something* here
|
||||
"magicNumber": equal,
|
||||
"flags": mergeBits(headFlagsMergeBitMap),
|
||||
"unitsPerEm": equal,
|
||||
"created": current_time,
|
||||
"modified": current_time,
|
||||
"xMin": min,
|
||||
"yMin": min,
|
||||
"xMax": max,
|
||||
"yMax": max,
|
||||
"macStyle": first,
|
||||
"lowestRecPPEM": max,
|
||||
"fontDirectionHint": lambda lst: 2,
|
||||
"indexToLocFormat": first,
|
||||
"glyphDataFormat": equal,
|
||||
}
|
||||
|
||||
ttLib.getTableClass("hhea").mergeMap = {
|
||||
"*": equal,
|
||||
"tableTag": equal,
|
||||
"tableVersion": max,
|
||||
"ascent": max,
|
||||
"descent": min,
|
||||
"lineGap": max,
|
||||
"advanceWidthMax": max,
|
||||
"minLeftSideBearing": min,
|
||||
"minRightSideBearing": min,
|
||||
"xMaxExtent": max,
|
||||
"caretSlopeRise": first,
|
||||
"caretSlopeRun": first,
|
||||
"caretOffset": first,
|
||||
"numberOfHMetrics": recalculate,
|
||||
}
|
||||
|
||||
ttLib.getTableClass("vhea").mergeMap = {
|
||||
"*": equal,
|
||||
"tableTag": equal,
|
||||
"tableVersion": max,
|
||||
"ascent": max,
|
||||
"descent": min,
|
||||
"lineGap": max,
|
||||
"advanceHeightMax": max,
|
||||
"minTopSideBearing": min,
|
||||
"minBottomSideBearing": min,
|
||||
"yMaxExtent": max,
|
||||
"caretSlopeRise": first,
|
||||
"caretSlopeRun": first,
|
||||
"caretOffset": first,
|
||||
"numberOfVMetrics": recalculate,
|
||||
}
|
||||
|
||||
os2FsTypeMergeBitMap = {
|
||||
"size": 16,
|
||||
"*": lambda bit: 0,
|
||||
1: bitwise_or, # no embedding permitted
|
||||
2: bitwise_and, # allow previewing and printing documents
|
||||
3: bitwise_and, # allow editing documents
|
||||
8: bitwise_or, # no subsetting permitted
|
||||
9: bitwise_or, # no embedding of outlines permitted
|
||||
}
|
||||
|
||||
|
||||
def mergeOs2FsType(lst):
|
||||
lst = list(lst)
|
||||
if all(item == 0 for item in lst):
|
||||
return 0
|
||||
|
||||
# Compute least restrictive logic for each fsType value
|
||||
for i in range(len(lst)):
|
||||
# unset bit 1 (no embedding permitted) if either bit 2 or 3 is set
|
||||
if lst[i] & 0x000C:
|
||||
lst[i] &= ~0x0002
|
||||
# set bit 2 (allow previewing) if bit 3 is set (allow editing)
|
||||
elif lst[i] & 0x0008:
|
||||
lst[i] |= 0x0004
|
||||
# set bits 2 and 3 if everything is allowed
|
||||
elif lst[i] == 0:
|
||||
lst[i] = 0x000C
|
||||
|
||||
fsType = mergeBits(os2FsTypeMergeBitMap)(lst)
|
||||
# unset bits 2 and 3 if bit 1 is set (some font is "no embedding")
|
||||
if fsType & 0x0002:
|
||||
fsType &= ~0x000C
|
||||
return fsType
|
||||
|
||||
|
||||
ttLib.getTableClass("OS/2").mergeMap = {
|
||||
"*": first,
|
||||
"tableTag": equal,
|
||||
"version": max,
|
||||
"xAvgCharWidth": first, # Will be recalculated at the end on the merged font
|
||||
"fsType": mergeOs2FsType, # Will be overwritten
|
||||
"panose": first, # FIXME: should really be the first Latin font
|
||||
"ulUnicodeRange1": bitwise_or,
|
||||
"ulUnicodeRange2": bitwise_or,
|
||||
"ulUnicodeRange3": bitwise_or,
|
||||
"ulUnicodeRange4": bitwise_or,
|
||||
"fsFirstCharIndex": min,
|
||||
"fsLastCharIndex": max,
|
||||
"sTypoAscender": max,
|
||||
"sTypoDescender": min,
|
||||
"sTypoLineGap": max,
|
||||
"usWinAscent": max,
|
||||
"usWinDescent": max,
|
||||
# Version 1
|
||||
"ulCodePageRange1": onlyExisting(bitwise_or),
|
||||
"ulCodePageRange2": onlyExisting(bitwise_or),
|
||||
# Version 2, 3, 4
|
||||
"sxHeight": onlyExisting(max),
|
||||
"sCapHeight": onlyExisting(max),
|
||||
"usDefaultChar": onlyExisting(first),
|
||||
"usBreakChar": onlyExisting(first),
|
||||
"usMaxContext": onlyExisting(max),
|
||||
# version 5
|
||||
"usLowerOpticalPointSize": onlyExisting(min),
|
||||
"usUpperOpticalPointSize": onlyExisting(max),
|
||||
}
|
||||
|
||||
|
||||
@add_method(ttLib.getTableClass("OS/2"))
|
||||
def merge(self, m, tables):
|
||||
DefaultTable.merge(self, m, tables)
|
||||
if self.version < 2:
|
||||
# bits 8 and 9 are reserved and should be set to zero
|
||||
self.fsType &= ~0x0300
|
||||
if self.version >= 3:
|
||||
# Only one of bits 1, 2, and 3 may be set. We already take
|
||||
# care of bit 1 implications in mergeOs2FsType. So unset
|
||||
# bit 2 if bit 3 is already set.
|
||||
if self.fsType & 0x0008:
|
||||
self.fsType &= ~0x0004
|
||||
return self
|
||||
|
||||
|
||||
ttLib.getTableClass("post").mergeMap = {
|
||||
"*": first,
|
||||
"tableTag": equal,
|
||||
"formatType": max,
|
||||
"isFixedPitch": min,
|
||||
"minMemType42": max,
|
||||
"maxMemType42": lambda lst: 0,
|
||||
"minMemType1": max,
|
||||
"maxMemType1": lambda lst: 0,
|
||||
"mapping": onlyExisting(sumDicts),
|
||||
"extraNames": lambda lst: [],
|
||||
}
|
||||
|
||||
ttLib.getTableClass("vmtx").mergeMap = ttLib.getTableClass("hmtx").mergeMap = {
|
||||
"tableTag": equal,
|
||||
"metrics": sumDicts,
|
||||
}
|
||||
|
||||
ttLib.getTableClass("name").mergeMap = {
|
||||
"tableTag": equal,
|
||||
"names": first, # FIXME? Does mixing name records make sense?
|
||||
}
|
||||
|
||||
ttLib.getTableClass("loca").mergeMap = {
|
||||
"*": recalculate,
|
||||
"tableTag": equal,
|
||||
}
|
||||
|
||||
ttLib.getTableClass("glyf").mergeMap = {
|
||||
"tableTag": equal,
|
||||
"glyphs": sumDicts,
|
||||
"glyphOrder": sumLists,
|
||||
"_reverseGlyphOrder": recalculate,
|
||||
"axisTags": equal,
|
||||
}
|
||||
|
||||
|
||||
@add_method(ttLib.getTableClass("glyf"))
|
||||
def merge(self, m, tables):
|
||||
for i, table in enumerate(tables):
|
||||
for g in table.glyphs.values():
|
||||
if i:
|
||||
# Drop hints for all but first font, since
|
||||
# we don't map functions / CVT values.
|
||||
g.removeHinting()
|
||||
# Expand composite glyphs to load their
|
||||
# composite glyph names.
|
||||
if g.isComposite():
|
||||
g.expand(table)
|
||||
return DefaultTable.merge(self, m, tables)
|
||||
|
||||
|
||||
ttLib.getTableClass("prep").mergeMap = lambda self, lst: first(lst)
|
||||
ttLib.getTableClass("fpgm").mergeMap = lambda self, lst: first(lst)
|
||||
ttLib.getTableClass("cvt ").mergeMap = lambda self, lst: first(lst)
|
||||
ttLib.getTableClass("gasp").mergeMap = lambda self, lst: first(
|
||||
lst
|
||||
) # FIXME? Appears irreconcilable
|
||||
|
||||
|
||||
@add_method(ttLib.getTableClass("CFF "))
|
||||
def merge(self, m, tables):
|
||||
if any(hasattr(table.cff[0], "FDSelect") for table in tables):
|
||||
raise NotImplementedError("Merging CID-keyed CFF tables is not supported yet")
|
||||
|
||||
for table in tables:
|
||||
table.cff.desubroutinize()
|
||||
|
||||
newcff = tables[0]
|
||||
newfont = newcff.cff[0]
|
||||
private = newfont.Private
|
||||
newDefaultWidthX, newNominalWidthX = private.defaultWidthX, private.nominalWidthX
|
||||
storedNamesStrings = []
|
||||
glyphOrderStrings = []
|
||||
glyphOrder = set(newfont.getGlyphOrder())
|
||||
|
||||
for name in newfont.strings.strings:
|
||||
if name not in glyphOrder:
|
||||
storedNamesStrings.append(name)
|
||||
else:
|
||||
glyphOrderStrings.append(name)
|
||||
|
||||
chrset = list(newfont.charset)
|
||||
newcs = newfont.CharStrings
|
||||
log.debug("FONT 0 CharStrings: %d.", len(newcs))
|
||||
|
||||
for i, table in enumerate(tables[1:], start=1):
|
||||
font = table.cff[0]
|
||||
defaultWidthX, nominalWidthX = (
|
||||
font.Private.defaultWidthX,
|
||||
font.Private.nominalWidthX,
|
||||
)
|
||||
widthsDiffer = (
|
||||
defaultWidthX != newDefaultWidthX or nominalWidthX != newNominalWidthX
|
||||
)
|
||||
font.Private = private
|
||||
fontGlyphOrder = set(font.getGlyphOrder())
|
||||
for name in font.strings.strings:
|
||||
if name in fontGlyphOrder:
|
||||
glyphOrderStrings.append(name)
|
||||
cs = font.CharStrings
|
||||
gs = table.cff.GlobalSubrs
|
||||
log.debug("Font %d CharStrings: %d.", i, len(cs))
|
||||
chrset.extend(font.charset)
|
||||
if newcs.charStringsAreIndexed:
|
||||
for i, name in enumerate(cs.charStrings, start=len(newcs)):
|
||||
newcs.charStrings[name] = i
|
||||
newcs.charStringsIndex.items.append(None)
|
||||
for name in cs.charStrings:
|
||||
if widthsDiffer:
|
||||
c = cs[name]
|
||||
defaultWidthXToken = object()
|
||||
extractor = T2WidthExtractor([], [], nominalWidthX, defaultWidthXToken)
|
||||
extractor.execute(c)
|
||||
width = extractor.width
|
||||
if width is not defaultWidthXToken:
|
||||
# The following will be wrong if the width is added
|
||||
# by a subroutine. Ouch!
|
||||
c.program.pop(0)
|
||||
else:
|
||||
width = defaultWidthX
|
||||
if width != newDefaultWidthX:
|
||||
c.program.insert(0, width - newNominalWidthX)
|
||||
newcs[name] = cs[name]
|
||||
|
||||
newfont.charset = chrset
|
||||
newfont.numGlyphs = len(chrset)
|
||||
newfont.strings.strings = glyphOrderStrings + storedNamesStrings
|
||||
|
||||
return newcff
|
||||
|
||||
|
||||
@add_method(ttLib.getTableClass("cmap"))
|
||||
def merge(self, m, tables):
|
||||
# TODO Handle format=14.
|
||||
if not hasattr(m, "cmap"):
|
||||
computeMegaCmap(m, tables)
|
||||
cmap = m.cmap
|
||||
|
||||
cmapBmpOnly = {uni: gid for uni, gid in cmap.items() if uni <= 0xFFFF}
|
||||
self.tables = []
|
||||
module = ttLib.getTableModule("cmap")
|
||||
if len(cmapBmpOnly) != len(cmap):
|
||||
# format-12 required.
|
||||
cmapTable = module.cmap_classes[12](12)
|
||||
cmapTable.platformID = 3
|
||||
cmapTable.platEncID = 10
|
||||
cmapTable.language = 0
|
||||
cmapTable.cmap = cmap
|
||||
self.tables.append(cmapTable)
|
||||
# always create format-4
|
||||
cmapTable = module.cmap_classes[4](4)
|
||||
cmapTable.platformID = 3
|
||||
cmapTable.platEncID = 1
|
||||
cmapTable.language = 0
|
||||
cmapTable.cmap = cmapBmpOnly
|
||||
# ordered by platform then encoding
|
||||
self.tables.insert(0, cmapTable)
|
||||
self.tableVersion = 0
|
||||
self.numSubTables = len(self.tables)
|
||||
return self
|
||||
78
venv/lib/python3.12/site-packages/fontTools/merge/unicode.py
Normal file
78
venv/lib/python3.12/site-packages/fontTools/merge/unicode.py
Normal file
@ -0,0 +1,78 @@
|
||||
# Copyright 2021 Behdad Esfahbod. All Rights Reserved.
|
||||
|
||||
|
||||
def is_Default_Ignorable(u):
|
||||
# http://www.unicode.org/reports/tr44/#Default_Ignorable_Code_Point
|
||||
#
|
||||
# TODO Move me to unicodedata module and autogenerate.
|
||||
#
|
||||
# Unicode 14.0:
|
||||
# $ grep '; Default_Ignorable_Code_Point ' DerivedCoreProperties.txt | sed 's/;.*#/#/'
|
||||
# 00AD # Cf SOFT HYPHEN
|
||||
# 034F # Mn COMBINING GRAPHEME JOINER
|
||||
# 061C # Cf ARABIC LETTER MARK
|
||||
# 115F..1160 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
|
||||
# 17B4..17B5 # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
|
||||
# 180B..180D # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
|
||||
# 180E # Cf MONGOLIAN VOWEL SEPARATOR
|
||||
# 180F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
|
||||
# 200B..200F # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
|
||||
# 202A..202E # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
|
||||
# 2060..2064 # Cf [5] WORD JOINER..INVISIBLE PLUS
|
||||
# 2065 # Cn <reserved-2065>
|
||||
# 2066..206F # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
|
||||
# 3164 # Lo HANGUL FILLER
|
||||
# FE00..FE0F # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
|
||||
# FEFF # Cf ZERO WIDTH NO-BREAK SPACE
|
||||
# FFA0 # Lo HALFWIDTH HANGUL FILLER
|
||||
# FFF0..FFF8 # Cn [9] <reserved-FFF0>..<reserved-FFF8>
|
||||
# 1BCA0..1BCA3 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
|
||||
# 1D173..1D17A # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
|
||||
# E0000 # Cn <reserved-E0000>
|
||||
# E0001 # Cf LANGUAGE TAG
|
||||
# E0002..E001F # Cn [30] <reserved-E0002>..<reserved-E001F>
|
||||
# E0020..E007F # Cf [96] TAG SPACE..CANCEL TAG
|
||||
# E0080..E00FF # Cn [128] <reserved-E0080>..<reserved-E00FF>
|
||||
# E0100..E01EF # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
|
||||
# E01F0..E0FFF # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
|
||||
return (
|
||||
u == 0x00AD
|
||||
or u == 0x034F # Cf SOFT HYPHEN
|
||||
or u == 0x061C # Mn COMBINING GRAPHEME JOINER
|
||||
or 0x115F <= u <= 0x1160 # Cf ARABIC LETTER MARK
|
||||
or 0x17B4 # Lo [2] HANGUL CHOSEONG FILLER..HANGUL JUNGSEONG FILLER
|
||||
<= u
|
||||
<= 0x17B5
|
||||
or 0x180B # Mn [2] KHMER VOWEL INHERENT AQ..KHMER VOWEL INHERENT AA
|
||||
<= u
|
||||
<= 0x180D
|
||||
or u # Mn [3] MONGOLIAN FREE VARIATION SELECTOR ONE..MONGOLIAN FREE VARIATION SELECTOR THREE
|
||||
== 0x180E
|
||||
or u == 0x180F # Cf MONGOLIAN VOWEL SEPARATOR
|
||||
or 0x200B <= u <= 0x200F # Mn MONGOLIAN FREE VARIATION SELECTOR FOUR
|
||||
or 0x202A <= u <= 0x202E # Cf [5] ZERO WIDTH SPACE..RIGHT-TO-LEFT MARK
|
||||
or 0x2060 # Cf [5] LEFT-TO-RIGHT EMBEDDING..RIGHT-TO-LEFT OVERRIDE
|
||||
<= u
|
||||
<= 0x2064
|
||||
or u == 0x2065 # Cf [5] WORD JOINER..INVISIBLE PLUS
|
||||
or 0x2066 <= u <= 0x206F # Cn <reserved-2065>
|
||||
or u == 0x3164 # Cf [10] LEFT-TO-RIGHT ISOLATE..NOMINAL DIGIT SHAPES
|
||||
or 0xFE00 <= u <= 0xFE0F # Lo HANGUL FILLER
|
||||
or u == 0xFEFF # Mn [16] VARIATION SELECTOR-1..VARIATION SELECTOR-16
|
||||
or u == 0xFFA0 # Cf ZERO WIDTH NO-BREAK SPACE
|
||||
or 0xFFF0 <= u <= 0xFFF8 # Lo HALFWIDTH HANGUL FILLER
|
||||
or 0x1BCA0 <= u <= 0x1BCA3 # Cn [9] <reserved-FFF0>..<reserved-FFF8>
|
||||
or 0x1D173 # Cf [4] SHORTHAND FORMAT LETTER OVERLAP..SHORTHAND FORMAT UP STEP
|
||||
<= u
|
||||
<= 0x1D17A
|
||||
or u == 0xE0000 # Cf [8] MUSICAL SYMBOL BEGIN BEAM..MUSICAL SYMBOL END PHRASE
|
||||
or u == 0xE0001 # Cn <reserved-E0000>
|
||||
or 0xE0002 <= u <= 0xE001F # Cf LANGUAGE TAG
|
||||
or 0xE0020 <= u <= 0xE007F # Cn [30] <reserved-E0002>..<reserved-E001F>
|
||||
or 0xE0080 <= u <= 0xE00FF # Cf [96] TAG SPACE..CANCEL TAG
|
||||
or 0xE0100 <= u <= 0xE01EF # Cn [128] <reserved-E0080>..<reserved-E00FF>
|
||||
or 0xE01F0 # Mn [240] VARIATION SELECTOR-17..VARIATION SELECTOR-256
|
||||
<= u
|
||||
<= 0xE0FFF
|
||||
or False # Cn [3600] <reserved-E01F0>..<reserved-E0FFF>
|
||||
)
|
||||
143
venv/lib/python3.12/site-packages/fontTools/merge/util.py
Normal file
143
venv/lib/python3.12/site-packages/fontTools/merge/util.py
Normal file
@ -0,0 +1,143 @@
|
||||
# Copyright 2013 Google, Inc. All Rights Reserved.
|
||||
#
|
||||
# Google Author(s): Behdad Esfahbod, Roozbeh Pournader
|
||||
|
||||
from fontTools.misc.timeTools import timestampNow
|
||||
from fontTools.ttLib.tables.DefaultTable import DefaultTable
|
||||
from functools import reduce
|
||||
import operator
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger("fontTools.merge")
|
||||
|
||||
|
||||
# General utility functions for merging values from different fonts
|
||||
|
||||
|
||||
def equal(lst):
|
||||
lst = list(lst)
|
||||
t = iter(lst)
|
||||
first = next(t)
|
||||
assert all(item == first for item in t), "Expected all items to be equal: %s" % lst
|
||||
return first
|
||||
|
||||
|
||||
def first(lst):
|
||||
return next(iter(lst))
|
||||
|
||||
|
||||
def recalculate(lst):
|
||||
return NotImplemented
|
||||
|
||||
|
||||
def current_time(lst):
|
||||
return timestampNow()
|
||||
|
||||
|
||||
def bitwise_and(lst):
|
||||
return reduce(operator.and_, lst)
|
||||
|
||||
|
||||
def bitwise_or(lst):
|
||||
return reduce(operator.or_, lst)
|
||||
|
||||
|
||||
def avg_int(lst):
|
||||
lst = list(lst)
|
||||
return sum(lst) // len(lst)
|
||||
|
||||
|
||||
def onlyExisting(func):
|
||||
"""Returns a filter func that when called with a list,
|
||||
only calls func on the non-NotImplemented items of the list,
|
||||
and only so if there's at least one item remaining.
|
||||
Otherwise returns NotImplemented."""
|
||||
|
||||
def wrapper(lst):
|
||||
items = [item for item in lst if item is not NotImplemented]
|
||||
return func(items) if items else NotImplemented
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def sumLists(lst):
|
||||
l = []
|
||||
for item in lst:
|
||||
l.extend(item)
|
||||
return l
|
||||
|
||||
|
||||
def sumDicts(lst):
|
||||
d = {}
|
||||
for item in lst:
|
||||
d.update(item)
|
||||
return d
|
||||
|
||||
|
||||
def mergeBits(bitmap):
|
||||
def wrapper(lst):
|
||||
lst = list(lst)
|
||||
returnValue = 0
|
||||
for bitNumber in range(bitmap["size"]):
|
||||
try:
|
||||
mergeLogic = bitmap[bitNumber]
|
||||
except KeyError:
|
||||
try:
|
||||
mergeLogic = bitmap["*"]
|
||||
except KeyError:
|
||||
raise Exception("Don't know how to merge bit %s" % bitNumber)
|
||||
shiftedBit = 1 << bitNumber
|
||||
mergedValue = mergeLogic(bool(item & shiftedBit) for item in lst)
|
||||
returnValue |= mergedValue << bitNumber
|
||||
return returnValue
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class AttendanceRecordingIdentityDict(object):
|
||||
"""A dictionary-like object that records indices of items actually accessed
|
||||
from a list."""
|
||||
|
||||
def __init__(self, lst):
|
||||
self.l = lst
|
||||
self.d = {id(v): i for i, v in enumerate(lst)}
|
||||
self.s = set()
|
||||
|
||||
def __getitem__(self, v):
|
||||
self.s.add(self.d[id(v)])
|
||||
return v
|
||||
|
||||
|
||||
class GregariousIdentityDict(object):
|
||||
"""A dictionary-like object that welcomes guests without reservations and
|
||||
adds them to the end of the guest list."""
|
||||
|
||||
def __init__(self, lst):
|
||||
self.l = lst
|
||||
self.s = set(id(v) for v in lst)
|
||||
|
||||
def __getitem__(self, v):
|
||||
if id(v) not in self.s:
|
||||
self.s.add(id(v))
|
||||
self.l.append(v)
|
||||
return v
|
||||
|
||||
|
||||
class NonhashableDict(object):
|
||||
"""A dictionary-like object mapping objects to values."""
|
||||
|
||||
def __init__(self, keys, values=None):
|
||||
if values is None:
|
||||
self.d = {id(v): i for i, v in enumerate(keys)}
|
||||
else:
|
||||
self.d = {id(k): v for k, v in zip(keys, values)}
|
||||
|
||||
def __getitem__(self, k):
|
||||
return self.d[id(k)]
|
||||
|
||||
def __setitem__(self, k, v):
|
||||
self.d[id(k)] = v
|
||||
|
||||
def __delitem__(self, k):
|
||||
del self.d[id(k)]
|
||||
@ -0,0 +1 @@
|
||||
"""Empty __init__.py file to signal Python this directory is a package."""
|
||||
424
venv/lib/python3.12/site-packages/fontTools/misc/arrayTools.py
Normal file
424
venv/lib/python3.12/site-packages/fontTools/misc/arrayTools.py
Normal file
@ -0,0 +1,424 @@
|
||||
"""Routines for calculating bounding boxes, point in rectangle calculations and
|
||||
so on.
|
||||
"""
|
||||
|
||||
from fontTools.misc.roundTools import otRound
|
||||
from fontTools.misc.vector import Vector as _Vector
|
||||
import math
|
||||
import warnings
|
||||
|
||||
|
||||
def calcBounds(array):
|
||||
"""Calculate the bounding rectangle of a 2D points array.
|
||||
|
||||
Args:
|
||||
array: A sequence of 2D tuples.
|
||||
|
||||
Returns:
|
||||
A four-item tuple representing the bounding rectangle ``(xMin, yMin, xMax, yMax)``.
|
||||
"""
|
||||
if not array:
|
||||
return 0, 0, 0, 0
|
||||
xs = [x for x, y in array]
|
||||
ys = [y for x, y in array]
|
||||
return min(xs), min(ys), max(xs), max(ys)
|
||||
|
||||
|
||||
def calcIntBounds(array, round=otRound):
|
||||
"""Calculate the integer bounding rectangle of a 2D points array.
|
||||
|
||||
Values are rounded to closest integer towards ``+Infinity`` using the
|
||||
:func:`fontTools.misc.fixedTools.otRound` function by default, unless
|
||||
an optional ``round`` function is passed.
|
||||
|
||||
Args:
|
||||
array: A sequence of 2D tuples.
|
||||
round: A rounding function of type ``f(x: float) -> int``.
|
||||
|
||||
Returns:
|
||||
A four-item tuple of integers representing the bounding rectangle:
|
||||
``(xMin, yMin, xMax, yMax)``.
|
||||
"""
|
||||
return tuple(round(v) for v in calcBounds(array))
|
||||
|
||||
|
||||
def updateBounds(bounds, p, min=min, max=max):
|
||||
"""Add a point to a bounding rectangle.
|
||||
|
||||
Args:
|
||||
bounds: A bounding rectangle expressed as a tuple
|
||||
``(xMin, yMin, xMax, yMax), or None``.
|
||||
p: A 2D tuple representing a point.
|
||||
min,max: functions to compute the minimum and maximum.
|
||||
|
||||
Returns:
|
||||
The updated bounding rectangle ``(xMin, yMin, xMax, yMax)``.
|
||||
"""
|
||||
(x, y) = p
|
||||
if bounds is None:
|
||||
return x, y, x, y
|
||||
xMin, yMin, xMax, yMax = bounds
|
||||
return min(xMin, x), min(yMin, y), max(xMax, x), max(yMax, y)
|
||||
|
||||
|
||||
def pointInRect(p, rect):
|
||||
"""Test if a point is inside a bounding rectangle.
|
||||
|
||||
Args:
|
||||
p: A 2D tuple representing a point.
|
||||
rect: A bounding rectangle expressed as a tuple
|
||||
``(xMin, yMin, xMax, yMax)``.
|
||||
|
||||
Returns:
|
||||
``True`` if the point is inside the rectangle, ``False`` otherwise.
|
||||
"""
|
||||
(x, y) = p
|
||||
xMin, yMin, xMax, yMax = rect
|
||||
return (xMin <= x <= xMax) and (yMin <= y <= yMax)
|
||||
|
||||
|
||||
def pointsInRect(array, rect):
|
||||
"""Determine which points are inside a bounding rectangle.
|
||||
|
||||
Args:
|
||||
array: A sequence of 2D tuples.
|
||||
rect: A bounding rectangle expressed as a tuple
|
||||
``(xMin, yMin, xMax, yMax)``.
|
||||
|
||||
Returns:
|
||||
A list containing the points inside the rectangle.
|
||||
"""
|
||||
if len(array) < 1:
|
||||
return []
|
||||
xMin, yMin, xMax, yMax = rect
|
||||
return [(xMin <= x <= xMax) and (yMin <= y <= yMax) for x, y in array]
|
||||
|
||||
|
||||
def vectorLength(vector):
|
||||
"""Calculate the length of the given vector.
|
||||
|
||||
Args:
|
||||
vector: A 2D tuple.
|
||||
|
||||
Returns:
|
||||
The Euclidean length of the vector.
|
||||
"""
|
||||
x, y = vector
|
||||
return math.sqrt(x**2 + y**2)
|
||||
|
||||
|
||||
def asInt16(array):
|
||||
"""Round a list of floats to 16-bit signed integers.
|
||||
|
||||
Args:
|
||||
array: List of float values.
|
||||
|
||||
Returns:
|
||||
A list of rounded integers.
|
||||
"""
|
||||
return [int(math.floor(i + 0.5)) for i in array]
|
||||
|
||||
|
||||
def normRect(rect):
|
||||
"""Normalize a bounding box rectangle.
|
||||
|
||||
This function "turns the rectangle the right way up", so that the following
|
||||
holds::
|
||||
|
||||
xMin <= xMax and yMin <= yMax
|
||||
|
||||
Args:
|
||||
rect: A bounding rectangle expressed as a tuple
|
||||
``(xMin, yMin, xMax, yMax)``.
|
||||
|
||||
Returns:
|
||||
A normalized bounding rectangle.
|
||||
"""
|
||||
(xMin, yMin, xMax, yMax) = rect
|
||||
return min(xMin, xMax), min(yMin, yMax), max(xMin, xMax), max(yMin, yMax)
|
||||
|
||||
|
||||
def scaleRect(rect, x, y):
|
||||
"""Scale a bounding box rectangle.
|
||||
|
||||
Args:
|
||||
rect: A bounding rectangle expressed as a tuple
|
||||
``(xMin, yMin, xMax, yMax)``.
|
||||
x: Factor to scale the rectangle along the X axis.
|
||||
Y: Factor to scale the rectangle along the Y axis.
|
||||
|
||||
Returns:
|
||||
A scaled bounding rectangle.
|
||||
"""
|
||||
(xMin, yMin, xMax, yMax) = rect
|
||||
return xMin * x, yMin * y, xMax * x, yMax * y
|
||||
|
||||
|
||||
def offsetRect(rect, dx, dy):
|
||||
"""Offset a bounding box rectangle.
|
||||
|
||||
Args:
|
||||
rect: A bounding rectangle expressed as a tuple
|
||||
``(xMin, yMin, xMax, yMax)``.
|
||||
dx: Amount to offset the rectangle along the X axis.
|
||||
dY: Amount to offset the rectangle along the Y axis.
|
||||
|
||||
Returns:
|
||||
An offset bounding rectangle.
|
||||
"""
|
||||
(xMin, yMin, xMax, yMax) = rect
|
||||
return xMin + dx, yMin + dy, xMax + dx, yMax + dy
|
||||
|
||||
|
||||
def insetRect(rect, dx, dy):
|
||||
"""Inset a bounding box rectangle on all sides.
|
||||
|
||||
Args:
|
||||
rect: A bounding rectangle expressed as a tuple
|
||||
``(xMin, yMin, xMax, yMax)``.
|
||||
dx: Amount to inset the rectangle along the X axis.
|
||||
dY: Amount to inset the rectangle along the Y axis.
|
||||
|
||||
Returns:
|
||||
An inset bounding rectangle.
|
||||
"""
|
||||
(xMin, yMin, xMax, yMax) = rect
|
||||
return xMin + dx, yMin + dy, xMax - dx, yMax - dy
|
||||
|
||||
|
||||
def sectRect(rect1, rect2):
|
||||
"""Test for rectangle-rectangle intersection.
|
||||
|
||||
Args:
|
||||
rect1: First bounding rectangle, expressed as tuples
|
||||
``(xMin, yMin, xMax, yMax)``.
|
||||
rect2: Second bounding rectangle.
|
||||
|
||||
Returns:
|
||||
A boolean and a rectangle.
|
||||
If the input rectangles intersect, returns ``True`` and the intersecting
|
||||
rectangle. Returns ``False`` and ``(0, 0, 0, 0)`` if the input
|
||||
rectangles don't intersect.
|
||||
"""
|
||||
(xMin1, yMin1, xMax1, yMax1) = rect1
|
||||
(xMin2, yMin2, xMax2, yMax2) = rect2
|
||||
xMin, yMin, xMax, yMax = (
|
||||
max(xMin1, xMin2),
|
||||
max(yMin1, yMin2),
|
||||
min(xMax1, xMax2),
|
||||
min(yMax1, yMax2),
|
||||
)
|
||||
if xMin >= xMax or yMin >= yMax:
|
||||
return False, (0, 0, 0, 0)
|
||||
return True, (xMin, yMin, xMax, yMax)
|
||||
|
||||
|
||||
def unionRect(rect1, rect2):
|
||||
"""Determine union of bounding rectangles.
|
||||
|
||||
Args:
|
||||
rect1: First bounding rectangle, expressed as tuples
|
||||
``(xMin, yMin, xMax, yMax)``.
|
||||
rect2: Second bounding rectangle.
|
||||
|
||||
Returns:
|
||||
The smallest rectangle in which both input rectangles are fully
|
||||
enclosed.
|
||||
"""
|
||||
(xMin1, yMin1, xMax1, yMax1) = rect1
|
||||
(xMin2, yMin2, xMax2, yMax2) = rect2
|
||||
xMin, yMin, xMax, yMax = (
|
||||
min(xMin1, xMin2),
|
||||
min(yMin1, yMin2),
|
||||
max(xMax1, xMax2),
|
||||
max(yMax1, yMax2),
|
||||
)
|
||||
return (xMin, yMin, xMax, yMax)
|
||||
|
||||
|
||||
def rectCenter(rect):
|
||||
"""Determine rectangle center.
|
||||
|
||||
Args:
|
||||
rect: Bounding rectangle, expressed as tuples
|
||||
``(xMin, yMin, xMax, yMax)``.
|
||||
|
||||
Returns:
|
||||
A 2D tuple representing the point at the center of the rectangle.
|
||||
"""
|
||||
(xMin, yMin, xMax, yMax) = rect
|
||||
return (xMin + xMax) / 2, (yMin + yMax) / 2
|
||||
|
||||
|
||||
def rectArea(rect):
|
||||
"""Determine rectangle area.
|
||||
|
||||
Args:
|
||||
rect: Bounding rectangle, expressed as tuples
|
||||
``(xMin, yMin, xMax, yMax)``.
|
||||
|
||||
Returns:
|
||||
The area of the rectangle.
|
||||
"""
|
||||
(xMin, yMin, xMax, yMax) = rect
|
||||
return (yMax - yMin) * (xMax - xMin)
|
||||
|
||||
|
||||
def intRect(rect):
|
||||
"""Round a rectangle to integer values.
|
||||
|
||||
Guarantees that the resulting rectangle is NOT smaller than the original.
|
||||
|
||||
Args:
|
||||
rect: Bounding rectangle, expressed as tuples
|
||||
``(xMin, yMin, xMax, yMax)``.
|
||||
|
||||
Returns:
|
||||
A rounded bounding rectangle.
|
||||
"""
|
||||
(xMin, yMin, xMax, yMax) = rect
|
||||
xMin = int(math.floor(xMin))
|
||||
yMin = int(math.floor(yMin))
|
||||
xMax = int(math.ceil(xMax))
|
||||
yMax = int(math.ceil(yMax))
|
||||
return (xMin, yMin, xMax, yMax)
|
||||
|
||||
|
||||
def quantizeRect(rect, factor=1):
|
||||
"""
|
||||
>>> bounds = (72.3, -218.4, 1201.3, 919.1)
|
||||
>>> quantizeRect(bounds)
|
||||
(72, -219, 1202, 920)
|
||||
>>> quantizeRect(bounds, factor=10)
|
||||
(70, -220, 1210, 920)
|
||||
>>> quantizeRect(bounds, factor=100)
|
||||
(0, -300, 1300, 1000)
|
||||
"""
|
||||
if factor < 1:
|
||||
raise ValueError(f"Expected quantization factor >= 1, found: {factor!r}")
|
||||
xMin, yMin, xMax, yMax = normRect(rect)
|
||||
return (
|
||||
int(math.floor(xMin / factor) * factor),
|
||||
int(math.floor(yMin / factor) * factor),
|
||||
int(math.ceil(xMax / factor) * factor),
|
||||
int(math.ceil(yMax / factor) * factor),
|
||||
)
|
||||
|
||||
|
||||
class Vector(_Vector):
|
||||
def __init__(self, *args, **kwargs):
|
||||
warnings.warn(
|
||||
"fontTools.misc.arrayTools.Vector has been deprecated, please use "
|
||||
"fontTools.misc.vector.Vector instead.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
|
||||
def pairwise(iterable, reverse=False):
|
||||
"""Iterate over current and next items in iterable.
|
||||
|
||||
Args:
|
||||
iterable: An iterable
|
||||
reverse: If true, iterate in reverse order.
|
||||
|
||||
Returns:
|
||||
A iterable yielding two elements per iteration.
|
||||
|
||||
Example:
|
||||
|
||||
>>> tuple(pairwise([]))
|
||||
()
|
||||
>>> tuple(pairwise([], reverse=True))
|
||||
()
|
||||
>>> tuple(pairwise([0]))
|
||||
((0, 0),)
|
||||
>>> tuple(pairwise([0], reverse=True))
|
||||
((0, 0),)
|
||||
>>> tuple(pairwise([0, 1]))
|
||||
((0, 1), (1, 0))
|
||||
>>> tuple(pairwise([0, 1], reverse=True))
|
||||
((1, 0), (0, 1))
|
||||
>>> tuple(pairwise([0, 1, 2]))
|
||||
((0, 1), (1, 2), (2, 0))
|
||||
>>> tuple(pairwise([0, 1, 2], reverse=True))
|
||||
((2, 1), (1, 0), (0, 2))
|
||||
>>> tuple(pairwise(['a', 'b', 'c', 'd']))
|
||||
(('a', 'b'), ('b', 'c'), ('c', 'd'), ('d', 'a'))
|
||||
>>> tuple(pairwise(['a', 'b', 'c', 'd'], reverse=True))
|
||||
(('d', 'c'), ('c', 'b'), ('b', 'a'), ('a', 'd'))
|
||||
"""
|
||||
if not iterable:
|
||||
return
|
||||
if reverse:
|
||||
it = reversed(iterable)
|
||||
else:
|
||||
it = iter(iterable)
|
||||
first = next(it, None)
|
||||
a = first
|
||||
for b in it:
|
||||
yield (a, b)
|
||||
a = b
|
||||
yield (a, first)
|
||||
|
||||
|
||||
def _test():
|
||||
"""
|
||||
>>> import math
|
||||
>>> calcBounds([])
|
||||
(0, 0, 0, 0)
|
||||
>>> calcBounds([(0, 40), (0, 100), (50, 50), (80, 10)])
|
||||
(0, 10, 80, 100)
|
||||
>>> updateBounds((0, 0, 0, 0), (100, 100))
|
||||
(0, 0, 100, 100)
|
||||
>>> pointInRect((50, 50), (0, 0, 100, 100))
|
||||
True
|
||||
>>> pointInRect((0, 0), (0, 0, 100, 100))
|
||||
True
|
||||
>>> pointInRect((100, 100), (0, 0, 100, 100))
|
||||
True
|
||||
>>> not pointInRect((101, 100), (0, 0, 100, 100))
|
||||
True
|
||||
>>> list(pointsInRect([(50, 50), (0, 0), (100, 100), (101, 100)], (0, 0, 100, 100)))
|
||||
[True, True, True, False]
|
||||
>>> vectorLength((3, 4))
|
||||
5.0
|
||||
>>> vectorLength((1, 1)) == math.sqrt(2)
|
||||
True
|
||||
>>> list(asInt16([0, 0.1, 0.5, 0.9]))
|
||||
[0, 0, 1, 1]
|
||||
>>> normRect((0, 10, 100, 200))
|
||||
(0, 10, 100, 200)
|
||||
>>> normRect((100, 200, 0, 10))
|
||||
(0, 10, 100, 200)
|
||||
>>> scaleRect((10, 20, 50, 150), 1.5, 2)
|
||||
(15.0, 40, 75.0, 300)
|
||||
>>> offsetRect((10, 20, 30, 40), 5, 6)
|
||||
(15, 26, 35, 46)
|
||||
>>> insetRect((10, 20, 50, 60), 5, 10)
|
||||
(15, 30, 45, 50)
|
||||
>>> insetRect((10, 20, 50, 60), -5, -10)
|
||||
(5, 10, 55, 70)
|
||||
>>> intersects, rect = sectRect((0, 10, 20, 30), (0, 40, 20, 50))
|
||||
>>> not intersects
|
||||
True
|
||||
>>> intersects, rect = sectRect((0, 10, 20, 30), (5, 20, 35, 50))
|
||||
>>> intersects
|
||||
1
|
||||
>>> rect
|
||||
(5, 20, 20, 30)
|
||||
>>> unionRect((0, 10, 20, 30), (0, 40, 20, 50))
|
||||
(0, 10, 20, 50)
|
||||
>>> rectCenter((0, 0, 100, 200))
|
||||
(50.0, 100.0)
|
||||
>>> rectCenter((0, 0, 100, 199.0))
|
||||
(50.0, 99.5)
|
||||
>>> intRect((0.9, 2.9, 3.1, 4.1))
|
||||
(0, 2, 4, 5)
|
||||
"""
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import doctest
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
41647
venv/lib/python3.12/site-packages/fontTools/misc/bezierTools.c
Normal file
41647
venv/lib/python3.12/site-packages/fontTools/misc/bezierTools.c
Normal file
File diff suppressed because it is too large
Load Diff
Binary file not shown.
1493
venv/lib/python3.12/site-packages/fontTools/misc/bezierTools.py
Normal file
1493
venv/lib/python3.12/site-packages/fontTools/misc/bezierTools.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,170 @@
|
||||
""" fontTools.misc.classifyTools.py -- tools for classifying things.
|
||||
"""
|
||||
|
||||
|
||||
class Classifier(object):
|
||||
"""
|
||||
Main Classifier object, used to classify things into similar sets.
|
||||
"""
|
||||
|
||||
def __init__(self, sort=True):
|
||||
self._things = set() # set of all things known so far
|
||||
self._sets = [] # list of class sets produced so far
|
||||
self._mapping = {} # map from things to their class set
|
||||
self._dirty = False
|
||||
self._sort = sort
|
||||
|
||||
def add(self, set_of_things):
|
||||
"""
|
||||
Add a set to the classifier. Any iterable is accepted.
|
||||
"""
|
||||
if not set_of_things:
|
||||
return
|
||||
|
||||
self._dirty = True
|
||||
|
||||
things, sets, mapping = self._things, self._sets, self._mapping
|
||||
|
||||
s = set(set_of_things)
|
||||
intersection = s.intersection(things) # existing things
|
||||
s.difference_update(intersection) # new things
|
||||
difference = s
|
||||
del s
|
||||
|
||||
# Add new class for new things
|
||||
if difference:
|
||||
things.update(difference)
|
||||
sets.append(difference)
|
||||
for thing in difference:
|
||||
mapping[thing] = difference
|
||||
del difference
|
||||
|
||||
while intersection:
|
||||
# Take one item and process the old class it belongs to
|
||||
old_class = mapping[next(iter(intersection))]
|
||||
old_class_intersection = old_class.intersection(intersection)
|
||||
|
||||
# Update old class to remove items from new set
|
||||
old_class.difference_update(old_class_intersection)
|
||||
|
||||
# Remove processed items from todo list
|
||||
intersection.difference_update(old_class_intersection)
|
||||
|
||||
# Add new class for the intersection with old class
|
||||
sets.append(old_class_intersection)
|
||||
for thing in old_class_intersection:
|
||||
mapping[thing] = old_class_intersection
|
||||
del old_class_intersection
|
||||
|
||||
def update(self, list_of_sets):
|
||||
"""
|
||||
Add a a list of sets to the classifier. Any iterable of iterables is accepted.
|
||||
"""
|
||||
for s in list_of_sets:
|
||||
self.add(s)
|
||||
|
||||
def _process(self):
|
||||
if not self._dirty:
|
||||
return
|
||||
|
||||
# Do any deferred processing
|
||||
sets = self._sets
|
||||
self._sets = [s for s in sets if s]
|
||||
|
||||
if self._sort:
|
||||
self._sets = sorted(self._sets, key=lambda s: (-len(s), sorted(s)))
|
||||
|
||||
self._dirty = False
|
||||
|
||||
# Output methods
|
||||
|
||||
def getThings(self):
|
||||
"""Returns the set of all things known so far.
|
||||
|
||||
The return value belongs to the Classifier object and should NOT
|
||||
be modified while the classifier is still in use.
|
||||
"""
|
||||
self._process()
|
||||
return self._things
|
||||
|
||||
def getMapping(self):
|
||||
"""Returns the mapping from things to their class set.
|
||||
|
||||
The return value belongs to the Classifier object and should NOT
|
||||
be modified while the classifier is still in use.
|
||||
"""
|
||||
self._process()
|
||||
return self._mapping
|
||||
|
||||
def getClasses(self):
|
||||
"""Returns the list of class sets.
|
||||
|
||||
The return value belongs to the Classifier object and should NOT
|
||||
be modified while the classifier is still in use.
|
||||
"""
|
||||
self._process()
|
||||
return self._sets
|
||||
|
||||
|
||||
def classify(list_of_sets, sort=True):
|
||||
"""
|
||||
Takes a iterable of iterables (list of sets from here on; but any
|
||||
iterable works.), and returns the smallest list of sets such that
|
||||
each set, is either a subset, or is disjoint from, each of the input
|
||||
sets.
|
||||
|
||||
In other words, this function classifies all the things present in
|
||||
any of the input sets, into similar classes, based on which sets
|
||||
things are a member of.
|
||||
|
||||
If sort=True, return class sets are sorted by decreasing size and
|
||||
their natural sort order within each class size. Otherwise, class
|
||||
sets are returned in the order that they were identified, which is
|
||||
generally not significant.
|
||||
|
||||
>>> classify([]) == ([], {})
|
||||
True
|
||||
>>> classify([[]]) == ([], {})
|
||||
True
|
||||
>>> classify([[], []]) == ([], {})
|
||||
True
|
||||
>>> classify([[1]]) == ([{1}], {1: {1}})
|
||||
True
|
||||
>>> classify([[1,2]]) == ([{1, 2}], {1: {1, 2}, 2: {1, 2}})
|
||||
True
|
||||
>>> classify([[1],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
|
||||
True
|
||||
>>> classify([[1,2],[2]]) == ([{1}, {2}], {1: {1}, 2: {2}})
|
||||
True
|
||||
>>> classify([[1,2],[2,4]]) == ([{1}, {2}, {4}], {1: {1}, 2: {2}, 4: {4}})
|
||||
True
|
||||
>>> classify([[1,2],[2,4,5]]) == (
|
||||
... [{4, 5}, {1}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})
|
||||
True
|
||||
>>> classify([[1,2],[2,4,5]], sort=False) == (
|
||||
... [{1}, {4, 5}, {2}], {1: {1}, 2: {2}, 4: {4, 5}, 5: {4, 5}})
|
||||
True
|
||||
>>> classify([[1,2,9],[2,4,5]], sort=False) == (
|
||||
... [{1, 9}, {4, 5}, {2}], {1: {1, 9}, 2: {2}, 4: {4, 5}, 5: {4, 5},
|
||||
... 9: {1, 9}})
|
||||
True
|
||||
>>> classify([[1,2,9,15],[2,4,5]], sort=False) == (
|
||||
... [{1, 9, 15}, {4, 5}, {2}], {1: {1, 9, 15}, 2: {2}, 4: {4, 5},
|
||||
... 5: {4, 5}, 9: {1, 9, 15}, 15: {1, 9, 15}})
|
||||
True
|
||||
>>> classes, mapping = classify([[1,2,9,15],[2,4,5],[15,5]], sort=False)
|
||||
>>> set([frozenset(c) for c in classes]) == set(
|
||||
... [frozenset(s) for s in ({1, 9}, {4}, {2}, {5}, {15})])
|
||||
True
|
||||
>>> mapping == {1: {1, 9}, 2: {2}, 4: {4}, 5: {5}, 9: {1, 9}, 15: {15}}
|
||||
True
|
||||
"""
|
||||
classifier = Classifier(sort=sort)
|
||||
classifier.update(list_of_sets)
|
||||
return classifier.getClasses(), classifier.getMapping()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys, doctest
|
||||
|
||||
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)
|
||||
53
venv/lib/python3.12/site-packages/fontTools/misc/cliTools.py
Normal file
53
venv/lib/python3.12/site-packages/fontTools/misc/cliTools.py
Normal file
@ -0,0 +1,53 @@
|
||||
"""Collection of utilities for command-line interfaces and console scripts."""
|
||||
|
||||
import os
|
||||
import re
|
||||
|
||||
|
||||
numberAddedRE = re.compile(r"#\d+$")
|
||||
|
||||
|
||||
def makeOutputFileName(
|
||||
input, outputDir=None, extension=None, overWrite=False, suffix=""
|
||||
):
|
||||
"""Generates a suitable file name for writing output.
|
||||
|
||||
Often tools will want to take a file, do some kind of transformation to it,
|
||||
and write it out again. This function determines an appropriate name for the
|
||||
output file, through one or more of the following steps:
|
||||
|
||||
- changing the output directory
|
||||
- appending suffix before file extension
|
||||
- replacing the file extension
|
||||
- suffixing the filename with a number (``#1``, ``#2``, etc.) to avoid
|
||||
overwriting an existing file.
|
||||
|
||||
Args:
|
||||
input: Name of input file.
|
||||
outputDir: Optionally, a new directory to write the file into.
|
||||
suffix: Optionally, a string suffix is appended to file name before
|
||||
the extension.
|
||||
extension: Optionally, a replacement for the current file extension.
|
||||
overWrite: Overwriting an existing file is permitted if true; if false
|
||||
and the proposed filename exists, a new name will be generated by
|
||||
adding an appropriate number suffix.
|
||||
|
||||
Returns:
|
||||
str: Suitable output filename
|
||||
"""
|
||||
dirName, fileName = os.path.split(input)
|
||||
fileName, ext = os.path.splitext(fileName)
|
||||
if outputDir:
|
||||
dirName = outputDir
|
||||
fileName = numberAddedRE.split(fileName)[0]
|
||||
if extension is None:
|
||||
extension = os.path.splitext(input)[1]
|
||||
output = os.path.join(dirName, fileName + suffix + extension)
|
||||
n = 1
|
||||
if not overWrite:
|
||||
while os.path.exists(output):
|
||||
output = os.path.join(
|
||||
dirName, fileName + suffix + "#" + repr(n) + extension
|
||||
)
|
||||
n += 1
|
||||
return output
|
||||
349
venv/lib/python3.12/site-packages/fontTools/misc/configTools.py
Normal file
349
venv/lib/python3.12/site-packages/fontTools/misc/configTools.py
Normal file
@ -0,0 +1,349 @@
|
||||
"""
|
||||
Code of the config system; not related to fontTools or fonts in particular.
|
||||
|
||||
The options that are specific to fontTools are in :mod:`fontTools.config`.
|
||||
|
||||
To create your own config system, you need to create an instance of
|
||||
:class:`Options`, and a subclass of :class:`AbstractConfig` with its
|
||||
``options`` class variable set to your instance of Options.
|
||||
|
||||
"""
|
||||
|
||||
from __future__ import annotations
|
||||
|
||||
import logging
|
||||
from dataclasses import dataclass
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
ClassVar,
|
||||
Dict,
|
||||
Iterable,
|
||||
Mapping,
|
||||
MutableMapping,
|
||||
Optional,
|
||||
Set,
|
||||
Union,
|
||||
)
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__all__ = [
|
||||
"AbstractConfig",
|
||||
"ConfigAlreadyRegisteredError",
|
||||
"ConfigError",
|
||||
"ConfigUnknownOptionError",
|
||||
"ConfigValueParsingError",
|
||||
"ConfigValueValidationError",
|
||||
"Option",
|
||||
"Options",
|
||||
]
|
||||
|
||||
|
||||
class ConfigError(Exception):
|
||||
"""Base exception for the config module."""
|
||||
|
||||
|
||||
class ConfigAlreadyRegisteredError(ConfigError):
|
||||
"""Raised when a module tries to register a configuration option that
|
||||
already exists.
|
||||
|
||||
Should not be raised too much really, only when developing new fontTools
|
||||
modules.
|
||||
"""
|
||||
|
||||
def __init__(self, name):
|
||||
super().__init__(f"Config option {name} is already registered.")
|
||||
|
||||
|
||||
class ConfigValueParsingError(ConfigError):
|
||||
"""Raised when a configuration value cannot be parsed."""
|
||||
|
||||
def __init__(self, name, value):
|
||||
super().__init__(
|
||||
f"Config option {name}: value cannot be parsed (given {repr(value)})"
|
||||
)
|
||||
|
||||
|
||||
class ConfigValueValidationError(ConfigError):
|
||||
"""Raised when a configuration value cannot be validated."""
|
||||
|
||||
def __init__(self, name, value):
|
||||
super().__init__(
|
||||
f"Config option {name}: value is invalid (given {repr(value)})"
|
||||
)
|
||||
|
||||
|
||||
class ConfigUnknownOptionError(ConfigError):
|
||||
"""Raised when a configuration option is unknown."""
|
||||
|
||||
def __init__(self, option_or_name):
|
||||
name = (
|
||||
f"'{option_or_name.name}' (id={id(option_or_name)})>"
|
||||
if isinstance(option_or_name, Option)
|
||||
else f"'{option_or_name}'"
|
||||
)
|
||||
super().__init__(f"Config option {name} is unknown")
|
||||
|
||||
|
||||
# eq=False because Options are unique, not fungible objects
|
||||
@dataclass(frozen=True, eq=False)
|
||||
class Option:
|
||||
name: str
|
||||
"""Unique name identifying the option (e.g. package.module:MY_OPTION)."""
|
||||
help: str
|
||||
"""Help text for this option."""
|
||||
default: Any
|
||||
"""Default value for this option."""
|
||||
parse: Callable[[str], Any]
|
||||
"""Turn input (e.g. string) into proper type. Only when reading from file."""
|
||||
validate: Optional[Callable[[Any], bool]] = None
|
||||
"""Return true if the given value is an acceptable value."""
|
||||
|
||||
@staticmethod
|
||||
def parse_optional_bool(v: str) -> Optional[bool]:
|
||||
s = str(v).lower()
|
||||
if s in {"0", "no", "false"}:
|
||||
return False
|
||||
if s in {"1", "yes", "true"}:
|
||||
return True
|
||||
if s in {"auto", "none"}:
|
||||
return None
|
||||
raise ValueError("invalid optional bool: {v!r}")
|
||||
|
||||
@staticmethod
|
||||
def validate_optional_bool(v: Any) -> bool:
|
||||
return v is None or isinstance(v, bool)
|
||||
|
||||
|
||||
class Options(Mapping):
|
||||
"""Registry of available options for a given config system.
|
||||
|
||||
Define new options using the :meth:`register()` method.
|
||||
|
||||
Access existing options using the Mapping interface.
|
||||
"""
|
||||
|
||||
__options: Dict[str, Option]
|
||||
|
||||
def __init__(self, other: "Options" = None) -> None:
|
||||
self.__options = {}
|
||||
if other is not None:
|
||||
for option in other.values():
|
||||
self.register_option(option)
|
||||
|
||||
def register(
|
||||
self,
|
||||
name: str,
|
||||
help: str,
|
||||
default: Any,
|
||||
parse: Callable[[str], Any],
|
||||
validate: Optional[Callable[[Any], bool]] = None,
|
||||
) -> Option:
|
||||
"""Create and register a new option."""
|
||||
return self.register_option(Option(name, help, default, parse, validate))
|
||||
|
||||
def register_option(self, option: Option) -> Option:
|
||||
"""Register a new option."""
|
||||
name = option.name
|
||||
if name in self.__options:
|
||||
raise ConfigAlreadyRegisteredError(name)
|
||||
self.__options[name] = option
|
||||
return option
|
||||
|
||||
def is_registered(self, option: Option) -> bool:
|
||||
"""Return True if the same option object is already registered."""
|
||||
return self.__options.get(option.name) is option
|
||||
|
||||
def __getitem__(self, key: str) -> Option:
|
||||
return self.__options.__getitem__(key)
|
||||
|
||||
def __iter__(self) -> Iterator[str]:
|
||||
return self.__options.__iter__()
|
||||
|
||||
def __len__(self) -> int:
|
||||
return self.__options.__len__()
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return (
|
||||
f"{self.__class__.__name__}({{\n"
|
||||
+ "".join(
|
||||
f" {k!r}: Option(default={v.default!r}, ...),\n"
|
||||
for k, v in self.__options.items()
|
||||
)
|
||||
+ "})"
|
||||
)
|
||||
|
||||
|
||||
_USE_GLOBAL_DEFAULT = object()
|
||||
|
||||
|
||||
class AbstractConfig(MutableMapping):
|
||||
"""
|
||||
Create a set of config values, optionally pre-filled with values from
|
||||
the given dictionary or pre-existing config object.
|
||||
|
||||
The class implements the MutableMapping protocol keyed by option name (`str`).
|
||||
For convenience its methods accept either Option or str as the key parameter.
|
||||
|
||||
.. seealso:: :meth:`set()`
|
||||
|
||||
This config class is abstract because it needs its ``options`` class
|
||||
var to be set to an instance of :class:`Options` before it can be
|
||||
instanciated and used.
|
||||
|
||||
.. code:: python
|
||||
|
||||
class MyConfig(AbstractConfig):
|
||||
options = Options()
|
||||
|
||||
MyConfig.register_option( "test:option_name", "This is an option", 0, int, lambda v: isinstance(v, int))
|
||||
|
||||
cfg = MyConfig({"test:option_name": 10})
|
||||
|
||||
"""
|
||||
|
||||
options: ClassVar[Options]
|
||||
|
||||
@classmethod
|
||||
def register_option(
|
||||
cls,
|
||||
name: str,
|
||||
help: str,
|
||||
default: Any,
|
||||
parse: Callable[[str], Any],
|
||||
validate: Optional[Callable[[Any], bool]] = None,
|
||||
) -> Option:
|
||||
"""Register an available option in this config system."""
|
||||
return cls.options.register(
|
||||
name, help=help, default=default, parse=parse, validate=validate
|
||||
)
|
||||
|
||||
_values: Dict[str, Any]
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
values: Union[AbstractConfig, Dict[Union[Option, str], Any]] = {},
|
||||
parse_values: bool = False,
|
||||
skip_unknown: bool = False,
|
||||
):
|
||||
self._values = {}
|
||||
values_dict = values._values if isinstance(values, AbstractConfig) else values
|
||||
for name, value in values_dict.items():
|
||||
self.set(name, value, parse_values, skip_unknown)
|
||||
|
||||
def _resolve_option(self, option_or_name: Union[Option, str]) -> Option:
|
||||
if isinstance(option_or_name, Option):
|
||||
option = option_or_name
|
||||
if not self.options.is_registered(option):
|
||||
raise ConfigUnknownOptionError(option)
|
||||
return option
|
||||
elif isinstance(option_or_name, str):
|
||||
name = option_or_name
|
||||
try:
|
||||
return self.options[name]
|
||||
except KeyError:
|
||||
raise ConfigUnknownOptionError(name)
|
||||
else:
|
||||
raise TypeError(
|
||||
"expected Option or str, found "
|
||||
f"{type(option_or_name).__name__}: {option_or_name!r}"
|
||||
)
|
||||
|
||||
def set(
|
||||
self,
|
||||
option_or_name: Union[Option, str],
|
||||
value: Any,
|
||||
parse_values: bool = False,
|
||||
skip_unknown: bool = False,
|
||||
):
|
||||
"""Set the value of an option.
|
||||
|
||||
Args:
|
||||
* `option_or_name`: an `Option` object or its name (`str`).
|
||||
* `value`: the value to be assigned to given option.
|
||||
* `parse_values`: parse the configuration value from a string into
|
||||
its proper type, as per its `Option` object. The default
|
||||
behavior is to raise `ConfigValueValidationError` when the value
|
||||
is not of the right type. Useful when reading options from a
|
||||
file type that doesn't support as many types as Python.
|
||||
* `skip_unknown`: skip unknown configuration options. The default
|
||||
behaviour is to raise `ConfigUnknownOptionError`. Useful when
|
||||
reading options from a configuration file that has extra entries
|
||||
(e.g. for a later version of fontTools)
|
||||
"""
|
||||
try:
|
||||
option = self._resolve_option(option_or_name)
|
||||
except ConfigUnknownOptionError as e:
|
||||
if skip_unknown:
|
||||
log.debug(str(e))
|
||||
return
|
||||
raise
|
||||
|
||||
# Can be useful if the values come from a source that doesn't have
|
||||
# strict typing (.ini file? Terminal input?)
|
||||
if parse_values:
|
||||
try:
|
||||
value = option.parse(value)
|
||||
except Exception as e:
|
||||
raise ConfigValueParsingError(option.name, value) from e
|
||||
|
||||
if option.validate is not None and not option.validate(value):
|
||||
raise ConfigValueValidationError(option.name, value)
|
||||
|
||||
self._values[option.name] = value
|
||||
|
||||
def get(
|
||||
self, option_or_name: Union[Option, str], default: Any = _USE_GLOBAL_DEFAULT
|
||||
) -> Any:
|
||||
"""
|
||||
Get the value of an option. The value which is returned is the first
|
||||
provided among:
|
||||
|
||||
1. a user-provided value in the options's ``self._values`` dict
|
||||
2. a caller-provided default value to this method call
|
||||
3. the global default for the option provided in ``fontTools.config``
|
||||
|
||||
This is to provide the ability to migrate progressively from config
|
||||
options passed as arguments to fontTools APIs to config options read
|
||||
from the current TTFont, e.g.
|
||||
|
||||
.. code:: python
|
||||
|
||||
def fontToolsAPI(font, some_option):
|
||||
value = font.cfg.get("someLib.module:SOME_OPTION", some_option)
|
||||
# use value
|
||||
|
||||
That way, the function will work the same for users of the API that
|
||||
still pass the option to the function call, but will favour the new
|
||||
config mechanism if the given font specifies a value for that option.
|
||||
"""
|
||||
option = self._resolve_option(option_or_name)
|
||||
if option.name in self._values:
|
||||
return self._values[option.name]
|
||||
if default is not _USE_GLOBAL_DEFAULT:
|
||||
return default
|
||||
return option.default
|
||||
|
||||
def copy(self):
|
||||
return self.__class__(self._values)
|
||||
|
||||
def __getitem__(self, option_or_name: Union[Option, str]) -> Any:
|
||||
return self.get(option_or_name)
|
||||
|
||||
def __setitem__(self, option_or_name: Union[Option, str], value: Any) -> None:
|
||||
return self.set(option_or_name, value)
|
||||
|
||||
def __delitem__(self, option_or_name: Union[Option, str]) -> None:
|
||||
option = self._resolve_option(option_or_name)
|
||||
del self._values[option.name]
|
||||
|
||||
def __iter__(self) -> Iterable[str]:
|
||||
return self._values.__iter__()
|
||||
|
||||
def __len__(self) -> int:
|
||||
return len(self._values)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return f"{self.__class__.__name__}({repr(self._values)})"
|
||||
27
venv/lib/python3.12/site-packages/fontTools/misc/cython.py
Normal file
27
venv/lib/python3.12/site-packages/fontTools/misc/cython.py
Normal file
@ -0,0 +1,27 @@
|
||||
""" Exports a no-op 'cython' namespace similar to
|
||||
https://github.com/cython/cython/blob/master/Cython/Shadow.py
|
||||
|
||||
This allows to optionally compile @cython decorated functions
|
||||
(when cython is available at built time), or run the same code
|
||||
as pure-python, without runtime dependency on cython module.
|
||||
|
||||
We only define the symbols that we use. E.g. see fontTools.cu2qu
|
||||
"""
|
||||
|
||||
from types import SimpleNamespace
|
||||
|
||||
|
||||
def _empty_decorator(x):
|
||||
return x
|
||||
|
||||
|
||||
compiled = False
|
||||
|
||||
for name in ("double", "complex", "int"):
|
||||
globals()[name] = None
|
||||
|
||||
for name in ("cfunc", "inline"):
|
||||
globals()[name] = _empty_decorator
|
||||
|
||||
locals = lambda **_: _empty_decorator
|
||||
returns = lambda _: _empty_decorator
|
||||
@ -0,0 +1,83 @@
|
||||
"""Misc dict tools."""
|
||||
|
||||
__all__ = ["hashdict"]
|
||||
|
||||
|
||||
# https://stackoverflow.com/questions/1151658/python-hashable-dicts
|
||||
class hashdict(dict):
|
||||
"""
|
||||
hashable dict implementation, suitable for use as a key into
|
||||
other dicts.
|
||||
|
||||
>>> h1 = hashdict({"apples": 1, "bananas":2})
|
||||
>>> h2 = hashdict({"bananas": 3, "mangoes": 5})
|
||||
>>> h1+h2
|
||||
hashdict(apples=1, bananas=3, mangoes=5)
|
||||
>>> d1 = {}
|
||||
>>> d1[h1] = "salad"
|
||||
>>> d1[h1]
|
||||
'salad'
|
||||
>>> d1[h2]
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
KeyError: hashdict(bananas=3, mangoes=5)
|
||||
|
||||
based on answers from
|
||||
http://stackoverflow.com/questions/1151658/python-hashable-dicts
|
||||
|
||||
"""
|
||||
|
||||
def __key(self):
|
||||
return tuple(sorted(self.items()))
|
||||
|
||||
def __repr__(self):
|
||||
return "{0}({1})".format(
|
||||
self.__class__.__name__,
|
||||
", ".join("{0}={1}".format(str(i[0]), repr(i[1])) for i in self.__key()),
|
||||
)
|
||||
|
||||
def __hash__(self):
|
||||
return hash(self.__key())
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
raise TypeError(
|
||||
"{0} does not support item assignment".format(self.__class__.__name__)
|
||||
)
|
||||
|
||||
def __delitem__(self, key):
|
||||
raise TypeError(
|
||||
"{0} does not support item assignment".format(self.__class__.__name__)
|
||||
)
|
||||
|
||||
def clear(self):
|
||||
raise TypeError(
|
||||
"{0} does not support item assignment".format(self.__class__.__name__)
|
||||
)
|
||||
|
||||
def pop(self, *args, **kwargs):
|
||||
raise TypeError(
|
||||
"{0} does not support item assignment".format(self.__class__.__name__)
|
||||
)
|
||||
|
||||
def popitem(self, *args, **kwargs):
|
||||
raise TypeError(
|
||||
"{0} does not support item assignment".format(self.__class__.__name__)
|
||||
)
|
||||
|
||||
def setdefault(self, *args, **kwargs):
|
||||
raise TypeError(
|
||||
"{0} does not support item assignment".format(self.__class__.__name__)
|
||||
)
|
||||
|
||||
def update(self, *args, **kwargs):
|
||||
raise TypeError(
|
||||
"{0} does not support item assignment".format(self.__class__.__name__)
|
||||
)
|
||||
|
||||
# update is not ok because it mutates the object
|
||||
# __add__ is ok because it creates a new object
|
||||
# while the new object is under construction, it's ok to mutate it
|
||||
def __add__(self, right):
|
||||
result = hashdict(self)
|
||||
dict.update(result, right)
|
||||
return result
|
||||
119
venv/lib/python3.12/site-packages/fontTools/misc/eexec.py
Normal file
119
venv/lib/python3.12/site-packages/fontTools/misc/eexec.py
Normal file
@ -0,0 +1,119 @@
|
||||
"""
|
||||
PostScript Type 1 fonts make use of two types of encryption: charstring
|
||||
encryption and ``eexec`` encryption. Charstring encryption is used for
|
||||
the charstrings themselves, while ``eexec`` is used to encrypt larger
|
||||
sections of the font program, such as the ``Private`` and ``CharStrings``
|
||||
dictionaries. Despite the different names, the algorithm is the same,
|
||||
although ``eexec`` encryption uses a fixed initial key R=55665.
|
||||
|
||||
The algorithm uses cipher feedback, meaning that the ciphertext is used
|
||||
to modify the key. Because of this, the routines in this module return
|
||||
the new key at the end of the operation.
|
||||
|
||||
"""
|
||||
|
||||
from fontTools.misc.textTools import bytechr, bytesjoin, byteord
|
||||
|
||||
|
||||
def _decryptChar(cipher, R):
|
||||
cipher = byteord(cipher)
|
||||
plain = ((cipher ^ (R >> 8))) & 0xFF
|
||||
R = ((cipher + R) * 52845 + 22719) & 0xFFFF
|
||||
return bytechr(plain), R
|
||||
|
||||
|
||||
def _encryptChar(plain, R):
|
||||
plain = byteord(plain)
|
||||
cipher = ((plain ^ (R >> 8))) & 0xFF
|
||||
R = ((cipher + R) * 52845 + 22719) & 0xFFFF
|
||||
return bytechr(cipher), R
|
||||
|
||||
|
||||
def decrypt(cipherstring, R):
|
||||
r"""
|
||||
Decrypts a string using the Type 1 encryption algorithm.
|
||||
|
||||
Args:
|
||||
cipherstring: String of ciphertext.
|
||||
R: Initial key.
|
||||
|
||||
Returns:
|
||||
decryptedStr: Plaintext string.
|
||||
R: Output key for subsequent decryptions.
|
||||
|
||||
Examples::
|
||||
|
||||
>>> testStr = b"\0\0asdadads asds\265"
|
||||
>>> decryptedStr, R = decrypt(testStr, 12321)
|
||||
>>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
|
||||
True
|
||||
>>> R == 36142
|
||||
True
|
||||
"""
|
||||
plainList = []
|
||||
for cipher in cipherstring:
|
||||
plain, R = _decryptChar(cipher, R)
|
||||
plainList.append(plain)
|
||||
plainstring = bytesjoin(plainList)
|
||||
return plainstring, int(R)
|
||||
|
||||
|
||||
def encrypt(plainstring, R):
|
||||
r"""
|
||||
Encrypts a string using the Type 1 encryption algorithm.
|
||||
|
||||
Note that the algorithm as described in the Type 1 specification requires the
|
||||
plaintext to be prefixed with a number of random bytes. (For ``eexec`` the
|
||||
number of random bytes is set to 4.) This routine does *not* add the random
|
||||
prefix to its input.
|
||||
|
||||
Args:
|
||||
plainstring: String of plaintext.
|
||||
R: Initial key.
|
||||
|
||||
Returns:
|
||||
cipherstring: Ciphertext string.
|
||||
R: Output key for subsequent encryptions.
|
||||
|
||||
Examples::
|
||||
|
||||
>>> testStr = b"\0\0asdadads asds\265"
|
||||
>>> decryptedStr, R = decrypt(testStr, 12321)
|
||||
>>> decryptedStr == b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
|
||||
True
|
||||
>>> R == 36142
|
||||
True
|
||||
|
||||
>>> testStr = b'0d\nh\x15\xe8\xc4\xb2\x15\x1d\x108\x1a<6\xa1'
|
||||
>>> encryptedStr, R = encrypt(testStr, 12321)
|
||||
>>> encryptedStr == b"\0\0asdadads asds\265"
|
||||
True
|
||||
>>> R == 36142
|
||||
True
|
||||
"""
|
||||
cipherList = []
|
||||
for plain in plainstring:
|
||||
cipher, R = _encryptChar(plain, R)
|
||||
cipherList.append(cipher)
|
||||
cipherstring = bytesjoin(cipherList)
|
||||
return cipherstring, int(R)
|
||||
|
||||
|
||||
def hexString(s):
|
||||
import binascii
|
||||
|
||||
return binascii.hexlify(s)
|
||||
|
||||
|
||||
def deHexString(h):
|
||||
import binascii
|
||||
|
||||
h = bytesjoin(h.split())
|
||||
return binascii.unhexlify(h)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import doctest
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
@ -0,0 +1,72 @@
|
||||
"""fontTools.misc.encodingTools.py -- tools for working with OpenType encodings.
|
||||
"""
|
||||
|
||||
import fontTools.encodings.codecs
|
||||
|
||||
# Map keyed by platformID, then platEncID, then possibly langID
|
||||
_encodingMap = {
|
||||
0: { # Unicode
|
||||
0: "utf_16_be",
|
||||
1: "utf_16_be",
|
||||
2: "utf_16_be",
|
||||
3: "utf_16_be",
|
||||
4: "utf_16_be",
|
||||
5: "utf_16_be",
|
||||
6: "utf_16_be",
|
||||
},
|
||||
1: { # Macintosh
|
||||
# See
|
||||
# https://github.com/fonttools/fonttools/issues/236
|
||||
0: { # Macintosh, platEncID==0, keyed by langID
|
||||
15: "mac_iceland",
|
||||
17: "mac_turkish",
|
||||
18: "mac_croatian",
|
||||
24: "mac_latin2",
|
||||
25: "mac_latin2",
|
||||
26: "mac_latin2",
|
||||
27: "mac_latin2",
|
||||
28: "mac_latin2",
|
||||
36: "mac_latin2",
|
||||
37: "mac_romanian",
|
||||
38: "mac_latin2",
|
||||
39: "mac_latin2",
|
||||
40: "mac_latin2",
|
||||
Ellipsis: "mac_roman", # Other
|
||||
},
|
||||
1: "x_mac_japanese_ttx",
|
||||
2: "x_mac_trad_chinese_ttx",
|
||||
3: "x_mac_korean_ttx",
|
||||
6: "mac_greek",
|
||||
7: "mac_cyrillic",
|
||||
25: "x_mac_simp_chinese_ttx",
|
||||
29: "mac_latin2",
|
||||
35: "mac_turkish",
|
||||
37: "mac_iceland",
|
||||
},
|
||||
2: { # ISO
|
||||
0: "ascii",
|
||||
1: "utf_16_be",
|
||||
2: "latin1",
|
||||
},
|
||||
3: { # Microsoft
|
||||
0: "utf_16_be",
|
||||
1: "utf_16_be",
|
||||
2: "shift_jis",
|
||||
3: "gb2312",
|
||||
4: "big5",
|
||||
5: "euc_kr",
|
||||
6: "johab",
|
||||
10: "utf_16_be",
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def getEncoding(platformID, platEncID, langID, default=None):
|
||||
"""Returns the Python encoding name for OpenType platformID/encodingID/langID
|
||||
triplet. If encoding for these values is not known, by default None is
|
||||
returned. That can be overriden by passing a value to the default argument.
|
||||
"""
|
||||
encoding = _encodingMap.get(platformID, {}).get(platEncID, default)
|
||||
if isinstance(encoding, dict):
|
||||
encoding = encoding.get(langID, encoding[Ellipsis])
|
||||
return encoding
|
||||
479
venv/lib/python3.12/site-packages/fontTools/misc/etree.py
Normal file
479
venv/lib/python3.12/site-packages/fontTools/misc/etree.py
Normal file
@ -0,0 +1,479 @@
|
||||
"""Shim module exporting the same ElementTree API for lxml and
|
||||
xml.etree backends.
|
||||
|
||||
When lxml is installed, it is automatically preferred over the built-in
|
||||
xml.etree module.
|
||||
On Python 2.7, the cElementTree module is preferred over the pure-python
|
||||
ElementTree module.
|
||||
|
||||
Besides exporting a unified interface, this also defines extra functions
|
||||
or subclasses built-in ElementTree classes to add features that are
|
||||
only availble in lxml, like OrderedDict for attributes, pretty_print and
|
||||
iterwalk.
|
||||
"""
|
||||
|
||||
from fontTools.misc.textTools import tostr
|
||||
|
||||
|
||||
XML_DECLARATION = """<?xml version='1.0' encoding='%s'?>"""
|
||||
|
||||
__all__ = [
|
||||
# public symbols
|
||||
"Comment",
|
||||
"dump",
|
||||
"Element",
|
||||
"ElementTree",
|
||||
"fromstring",
|
||||
"fromstringlist",
|
||||
"iselement",
|
||||
"iterparse",
|
||||
"parse",
|
||||
"ParseError",
|
||||
"PI",
|
||||
"ProcessingInstruction",
|
||||
"QName",
|
||||
"SubElement",
|
||||
"tostring",
|
||||
"tostringlist",
|
||||
"TreeBuilder",
|
||||
"XML",
|
||||
"XMLParser",
|
||||
"register_namespace",
|
||||
]
|
||||
|
||||
try:
|
||||
from lxml.etree import *
|
||||
|
||||
_have_lxml = True
|
||||
except ImportError:
|
||||
try:
|
||||
from xml.etree.cElementTree import *
|
||||
|
||||
# the cElementTree version of XML function doesn't support
|
||||
# the optional 'parser' keyword argument
|
||||
from xml.etree.ElementTree import XML
|
||||
except ImportError: # pragma: no cover
|
||||
from xml.etree.ElementTree import *
|
||||
_have_lxml = False
|
||||
|
||||
import sys
|
||||
|
||||
# dict is always ordered in python >= 3.6 and on pypy
|
||||
PY36 = sys.version_info >= (3, 6)
|
||||
try:
|
||||
import __pypy__
|
||||
except ImportError:
|
||||
__pypy__ = None
|
||||
_dict_is_ordered = bool(PY36 or __pypy__)
|
||||
del PY36, __pypy__
|
||||
|
||||
if _dict_is_ordered:
|
||||
_Attrib = dict
|
||||
else:
|
||||
from collections import OrderedDict as _Attrib
|
||||
|
||||
if isinstance(Element, type):
|
||||
_Element = Element
|
||||
else:
|
||||
# in py27, cElementTree.Element cannot be subclassed, so
|
||||
# we need to import the pure-python class
|
||||
from xml.etree.ElementTree import Element as _Element
|
||||
|
||||
class Element(_Element):
|
||||
"""Element subclass that keeps the order of attributes."""
|
||||
|
||||
def __init__(self, tag, attrib=_Attrib(), **extra):
|
||||
super(Element, self).__init__(tag)
|
||||
self.attrib = _Attrib()
|
||||
if attrib:
|
||||
self.attrib.update(attrib)
|
||||
if extra:
|
||||
self.attrib.update(extra)
|
||||
|
||||
def SubElement(parent, tag, attrib=_Attrib(), **extra):
|
||||
"""Must override SubElement as well otherwise _elementtree.SubElement
|
||||
fails if 'parent' is a subclass of Element object.
|
||||
"""
|
||||
element = parent.__class__(tag, attrib, **extra)
|
||||
parent.append(element)
|
||||
return element
|
||||
|
||||
def _iterwalk(element, events, tag):
|
||||
include = tag is None or element.tag == tag
|
||||
if include and "start" in events:
|
||||
yield ("start", element)
|
||||
for e in element:
|
||||
for item in _iterwalk(e, events, tag):
|
||||
yield item
|
||||
if include:
|
||||
yield ("end", element)
|
||||
|
||||
def iterwalk(element_or_tree, events=("end",), tag=None):
|
||||
"""A tree walker that generates events from an existing tree as
|
||||
if it was parsing XML data with iterparse().
|
||||
Drop-in replacement for lxml.etree.iterwalk.
|
||||
"""
|
||||
if iselement(element_or_tree):
|
||||
element = element_or_tree
|
||||
else:
|
||||
element = element_or_tree.getroot()
|
||||
if tag == "*":
|
||||
tag = None
|
||||
for item in _iterwalk(element, events, tag):
|
||||
yield item
|
||||
|
||||
_ElementTree = ElementTree
|
||||
|
||||
class ElementTree(_ElementTree):
|
||||
"""ElementTree subclass that adds 'pretty_print' and 'doctype'
|
||||
arguments to the 'write' method.
|
||||
Currently these are only supported for the default XML serialization
|
||||
'method', and not also for "html" or "text", for these are delegated
|
||||
to the base class.
|
||||
"""
|
||||
|
||||
def write(
|
||||
self,
|
||||
file_or_filename,
|
||||
encoding=None,
|
||||
xml_declaration=False,
|
||||
method=None,
|
||||
doctype=None,
|
||||
pretty_print=False,
|
||||
):
|
||||
if method and method != "xml":
|
||||
# delegate to super-class
|
||||
super(ElementTree, self).write(
|
||||
file_or_filename,
|
||||
encoding=encoding,
|
||||
xml_declaration=xml_declaration,
|
||||
method=method,
|
||||
)
|
||||
return
|
||||
|
||||
if encoding is not None and encoding.lower() == "unicode":
|
||||
if xml_declaration:
|
||||
raise ValueError(
|
||||
"Serialisation to unicode must not request an XML declaration"
|
||||
)
|
||||
write_declaration = False
|
||||
encoding = "unicode"
|
||||
elif xml_declaration is None:
|
||||
# by default, write an XML declaration only for non-standard encodings
|
||||
write_declaration = encoding is not None and encoding.upper() not in (
|
||||
"ASCII",
|
||||
"UTF-8",
|
||||
"UTF8",
|
||||
"US-ASCII",
|
||||
)
|
||||
else:
|
||||
write_declaration = xml_declaration
|
||||
|
||||
if encoding is None:
|
||||
encoding = "ASCII"
|
||||
|
||||
if pretty_print:
|
||||
# NOTE this will modify the tree in-place
|
||||
_indent(self._root)
|
||||
|
||||
with _get_writer(file_or_filename, encoding) as write:
|
||||
if write_declaration:
|
||||
write(XML_DECLARATION % encoding.upper())
|
||||
if pretty_print:
|
||||
write("\n")
|
||||
if doctype:
|
||||
write(_tounicode(doctype))
|
||||
if pretty_print:
|
||||
write("\n")
|
||||
|
||||
qnames, namespaces = _namespaces(self._root)
|
||||
_serialize_xml(write, self._root, qnames, namespaces)
|
||||
|
||||
import io
|
||||
|
||||
def tostring(
|
||||
element,
|
||||
encoding=None,
|
||||
xml_declaration=None,
|
||||
method=None,
|
||||
doctype=None,
|
||||
pretty_print=False,
|
||||
):
|
||||
"""Custom 'tostring' function that uses our ElementTree subclass, with
|
||||
pretty_print support.
|
||||
"""
|
||||
stream = io.StringIO() if encoding == "unicode" else io.BytesIO()
|
||||
ElementTree(element).write(
|
||||
stream,
|
||||
encoding=encoding,
|
||||
xml_declaration=xml_declaration,
|
||||
method=method,
|
||||
doctype=doctype,
|
||||
pretty_print=pretty_print,
|
||||
)
|
||||
return stream.getvalue()
|
||||
|
||||
# serialization support
|
||||
|
||||
import re
|
||||
|
||||
# Valid XML strings can include any Unicode character, excluding control
|
||||
# characters, the surrogate blocks, FFFE, and FFFF:
|
||||
# Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
|
||||
# Here we reversed the pattern to match only the invalid characters.
|
||||
# For the 'narrow' python builds supporting only UCS-2, which represent
|
||||
# characters beyond BMP as UTF-16 surrogate pairs, we need to pass through
|
||||
# the surrogate block. I haven't found a more elegant solution...
|
||||
UCS2 = sys.maxunicode < 0x10FFFF
|
||||
if UCS2:
|
||||
_invalid_xml_string = re.compile(
|
||||
"[\u0000-\u0008\u000B-\u000C\u000E-\u001F\uFFFE-\uFFFF]"
|
||||
)
|
||||
else:
|
||||
_invalid_xml_string = re.compile(
|
||||
"[\u0000-\u0008\u000B-\u000C\u000E-\u001F\uD800-\uDFFF\uFFFE-\uFFFF]"
|
||||
)
|
||||
|
||||
def _tounicode(s):
|
||||
"""Test if a string is valid user input and decode it to unicode string
|
||||
using ASCII encoding if it's a bytes string.
|
||||
Reject all bytes/unicode input that contains non-XML characters.
|
||||
Reject all bytes input that contains non-ASCII characters.
|
||||
"""
|
||||
try:
|
||||
s = tostr(s, encoding="ascii", errors="strict")
|
||||
except UnicodeDecodeError:
|
||||
raise ValueError(
|
||||
"Bytes strings can only contain ASCII characters. "
|
||||
"Use unicode strings for non-ASCII characters."
|
||||
)
|
||||
except AttributeError:
|
||||
_raise_serialization_error(s)
|
||||
if s and _invalid_xml_string.search(s):
|
||||
raise ValueError(
|
||||
"All strings must be XML compatible: Unicode or ASCII, "
|
||||
"no NULL bytes or control characters"
|
||||
)
|
||||
return s
|
||||
|
||||
import contextlib
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _get_writer(file_or_filename, encoding):
|
||||
# returns text write method and release all resources after using
|
||||
try:
|
||||
write = file_or_filename.write
|
||||
except AttributeError:
|
||||
# file_or_filename is a file name
|
||||
f = open(
|
||||
file_or_filename,
|
||||
"w",
|
||||
encoding="utf-8" if encoding == "unicode" else encoding,
|
||||
errors="xmlcharrefreplace",
|
||||
)
|
||||
with f:
|
||||
yield f.write
|
||||
else:
|
||||
# file_or_filename is a file-like object
|
||||
# encoding determines if it is a text or binary writer
|
||||
if encoding == "unicode":
|
||||
# use a text writer as is
|
||||
yield write
|
||||
else:
|
||||
# wrap a binary writer with TextIOWrapper
|
||||
detach_buffer = False
|
||||
if isinstance(file_or_filename, io.BufferedIOBase):
|
||||
buf = file_or_filename
|
||||
elif isinstance(file_or_filename, io.RawIOBase):
|
||||
buf = io.BufferedWriter(file_or_filename)
|
||||
detach_buffer = True
|
||||
else:
|
||||
# This is to handle passed objects that aren't in the
|
||||
# IOBase hierarchy, but just have a write method
|
||||
buf = io.BufferedIOBase()
|
||||
buf.writable = lambda: True
|
||||
buf.write = write
|
||||
try:
|
||||
# TextIOWrapper uses this methods to determine
|
||||
# if BOM (for UTF-16, etc) should be added
|
||||
buf.seekable = file_or_filename.seekable
|
||||
buf.tell = file_or_filename.tell
|
||||
except AttributeError:
|
||||
pass
|
||||
wrapper = io.TextIOWrapper(
|
||||
buf,
|
||||
encoding=encoding,
|
||||
errors="xmlcharrefreplace",
|
||||
newline="\n",
|
||||
)
|
||||
try:
|
||||
yield wrapper.write
|
||||
finally:
|
||||
# Keep the original file open when the TextIOWrapper and
|
||||
# the BufferedWriter are destroyed
|
||||
wrapper.detach()
|
||||
if detach_buffer:
|
||||
buf.detach()
|
||||
|
||||
from xml.etree.ElementTree import _namespace_map
|
||||
|
||||
def _namespaces(elem):
|
||||
# identify namespaces used in this tree
|
||||
|
||||
# maps qnames to *encoded* prefix:local names
|
||||
qnames = {None: None}
|
||||
|
||||
# maps uri:s to prefixes
|
||||
namespaces = {}
|
||||
|
||||
def add_qname(qname):
|
||||
# calculate serialized qname representation
|
||||
try:
|
||||
qname = _tounicode(qname)
|
||||
if qname[:1] == "{":
|
||||
uri, tag = qname[1:].rsplit("}", 1)
|
||||
prefix = namespaces.get(uri)
|
||||
if prefix is None:
|
||||
prefix = _namespace_map.get(uri)
|
||||
if prefix is None:
|
||||
prefix = "ns%d" % len(namespaces)
|
||||
else:
|
||||
prefix = _tounicode(prefix)
|
||||
if prefix != "xml":
|
||||
namespaces[uri] = prefix
|
||||
if prefix:
|
||||
qnames[qname] = "%s:%s" % (prefix, tag)
|
||||
else:
|
||||
qnames[qname] = tag # default element
|
||||
else:
|
||||
qnames[qname] = qname
|
||||
except TypeError:
|
||||
_raise_serialization_error(qname)
|
||||
|
||||
# populate qname and namespaces table
|
||||
for elem in elem.iter():
|
||||
tag = elem.tag
|
||||
if isinstance(tag, QName):
|
||||
if tag.text not in qnames:
|
||||
add_qname(tag.text)
|
||||
elif isinstance(tag, str):
|
||||
if tag not in qnames:
|
||||
add_qname(tag)
|
||||
elif tag is not None and tag is not Comment and tag is not PI:
|
||||
_raise_serialization_error(tag)
|
||||
for key, value in elem.items():
|
||||
if isinstance(key, QName):
|
||||
key = key.text
|
||||
if key not in qnames:
|
||||
add_qname(key)
|
||||
if isinstance(value, QName) and value.text not in qnames:
|
||||
add_qname(value.text)
|
||||
text = elem.text
|
||||
if isinstance(text, QName) and text.text not in qnames:
|
||||
add_qname(text.text)
|
||||
return qnames, namespaces
|
||||
|
||||
def _serialize_xml(write, elem, qnames, namespaces, **kwargs):
|
||||
tag = elem.tag
|
||||
text = elem.text
|
||||
if tag is Comment:
|
||||
write("<!--%s-->" % _tounicode(text))
|
||||
elif tag is ProcessingInstruction:
|
||||
write("<?%s?>" % _tounicode(text))
|
||||
else:
|
||||
tag = qnames[_tounicode(tag) if tag is not None else None]
|
||||
if tag is None:
|
||||
if text:
|
||||
write(_escape_cdata(text))
|
||||
for e in elem:
|
||||
_serialize_xml(write, e, qnames, None)
|
||||
else:
|
||||
write("<" + tag)
|
||||
if namespaces:
|
||||
for uri, prefix in sorted(
|
||||
namespaces.items(), key=lambda x: x[1]
|
||||
): # sort on prefix
|
||||
if prefix:
|
||||
prefix = ":" + prefix
|
||||
write(' xmlns%s="%s"' % (prefix, _escape_attrib(uri)))
|
||||
attrs = elem.attrib
|
||||
if attrs:
|
||||
# try to keep existing attrib order
|
||||
if len(attrs) <= 1 or type(attrs) is _Attrib:
|
||||
items = attrs.items()
|
||||
else:
|
||||
# if plain dict, use lexical order
|
||||
items = sorted(attrs.items())
|
||||
for k, v in items:
|
||||
if isinstance(k, QName):
|
||||
k = _tounicode(k.text)
|
||||
else:
|
||||
k = _tounicode(k)
|
||||
if isinstance(v, QName):
|
||||
v = qnames[_tounicode(v.text)]
|
||||
else:
|
||||
v = _escape_attrib(v)
|
||||
write(' %s="%s"' % (qnames[k], v))
|
||||
if text is not None or len(elem):
|
||||
write(">")
|
||||
if text:
|
||||
write(_escape_cdata(text))
|
||||
for e in elem:
|
||||
_serialize_xml(write, e, qnames, None)
|
||||
write("</" + tag + ">")
|
||||
else:
|
||||
write("/>")
|
||||
if elem.tail:
|
||||
write(_escape_cdata(elem.tail))
|
||||
|
||||
def _raise_serialization_error(text):
|
||||
raise TypeError("cannot serialize %r (type %s)" % (text, type(text).__name__))
|
||||
|
||||
def _escape_cdata(text):
|
||||
# escape character data
|
||||
try:
|
||||
text = _tounicode(text)
|
||||
# it's worth avoiding do-nothing calls for short strings
|
||||
if "&" in text:
|
||||
text = text.replace("&", "&")
|
||||
if "<" in text:
|
||||
text = text.replace("<", "<")
|
||||
if ">" in text:
|
||||
text = text.replace(">", ">")
|
||||
return text
|
||||
except (TypeError, AttributeError):
|
||||
_raise_serialization_error(text)
|
||||
|
||||
def _escape_attrib(text):
|
||||
# escape attribute value
|
||||
try:
|
||||
text = _tounicode(text)
|
||||
if "&" in text:
|
||||
text = text.replace("&", "&")
|
||||
if "<" in text:
|
||||
text = text.replace("<", "<")
|
||||
if ">" in text:
|
||||
text = text.replace(">", ">")
|
||||
if '"' in text:
|
||||
text = text.replace('"', """)
|
||||
if "\n" in text:
|
||||
text = text.replace("\n", " ")
|
||||
return text
|
||||
except (TypeError, AttributeError):
|
||||
_raise_serialization_error(text)
|
||||
|
||||
def _indent(elem, level=0):
|
||||
# From http://effbot.org/zone/element-lib.htm#prettyprint
|
||||
i = "\n" + level * " "
|
||||
if len(elem):
|
||||
if not elem.text or not elem.text.strip():
|
||||
elem.text = i + " "
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
for elem in elem:
|
||||
_indent(elem, level + 1)
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
else:
|
||||
if level and (not elem.tail or not elem.tail.strip()):
|
||||
elem.tail = i
|
||||
245
venv/lib/python3.12/site-packages/fontTools/misc/filenames.py
Normal file
245
venv/lib/python3.12/site-packages/fontTools/misc/filenames.py
Normal file
@ -0,0 +1,245 @@
|
||||
"""
|
||||
This module implements the algorithm for converting between a "user name" -
|
||||
something that a user can choose arbitrarily inside a font editor - and a file
|
||||
name suitable for use in a wide range of operating systems and filesystems.
|
||||
|
||||
The `UFO 3 specification <http://unifiedfontobject.org/versions/ufo3/conventions/>`_
|
||||
provides an example of an algorithm for such conversion, which avoids illegal
|
||||
characters, reserved file names, ambiguity between upper- and lower-case
|
||||
characters, and clashes with existing files.
|
||||
|
||||
This code was originally copied from
|
||||
`ufoLib <https://github.com/unified-font-object/ufoLib/blob/8747da7/Lib/ufoLib/filenames.py>`_
|
||||
by Tal Leming and is copyright (c) 2005-2016, The RoboFab Developers:
|
||||
|
||||
- Erik van Blokland
|
||||
- Tal Leming
|
||||
- Just van Rossum
|
||||
"""
|
||||
|
||||
illegalCharacters = r"\" * + / : < > ? [ \ ] | \0".split(" ")
|
||||
illegalCharacters += [chr(i) for i in range(1, 32)]
|
||||
illegalCharacters += [chr(0x7F)]
|
||||
reservedFileNames = "CON PRN AUX CLOCK$ NUL A:-Z: COM1".lower().split(" ")
|
||||
reservedFileNames += "LPT1 LPT2 LPT3 COM2 COM3 COM4".lower().split(" ")
|
||||
maxFileNameLength = 255
|
||||
|
||||
|
||||
class NameTranslationError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def userNameToFileName(userName, existing=[], prefix="", suffix=""):
|
||||
"""Converts from a user name to a file name.
|
||||
|
||||
Takes care to avoid illegal characters, reserved file names, ambiguity between
|
||||
upper- and lower-case characters, and clashes with existing files.
|
||||
|
||||
Args:
|
||||
userName (str): The input file name.
|
||||
existing: A case-insensitive list of all existing file names.
|
||||
prefix: Prefix to be prepended to the file name.
|
||||
suffix: Suffix to be appended to the file name.
|
||||
|
||||
Returns:
|
||||
A suitable filename.
|
||||
|
||||
Raises:
|
||||
NameTranslationError: If no suitable name could be generated.
|
||||
|
||||
Examples::
|
||||
|
||||
>>> userNameToFileName("a") == "a"
|
||||
True
|
||||
>>> userNameToFileName("A") == "A_"
|
||||
True
|
||||
>>> userNameToFileName("AE") == "A_E_"
|
||||
True
|
||||
>>> userNameToFileName("Ae") == "A_e"
|
||||
True
|
||||
>>> userNameToFileName("ae") == "ae"
|
||||
True
|
||||
>>> userNameToFileName("aE") == "aE_"
|
||||
True
|
||||
>>> userNameToFileName("a.alt") == "a.alt"
|
||||
True
|
||||
>>> userNameToFileName("A.alt") == "A_.alt"
|
||||
True
|
||||
>>> userNameToFileName("A.Alt") == "A_.A_lt"
|
||||
True
|
||||
>>> userNameToFileName("A.aLt") == "A_.aL_t"
|
||||
True
|
||||
>>> userNameToFileName(u"A.alT") == "A_.alT_"
|
||||
True
|
||||
>>> userNameToFileName("T_H") == "T__H_"
|
||||
True
|
||||
>>> userNameToFileName("T_h") == "T__h"
|
||||
True
|
||||
>>> userNameToFileName("t_h") == "t_h"
|
||||
True
|
||||
>>> userNameToFileName("F_F_I") == "F__F__I_"
|
||||
True
|
||||
>>> userNameToFileName("f_f_i") == "f_f_i"
|
||||
True
|
||||
>>> userNameToFileName("Aacute_V.swash") == "A_acute_V_.swash"
|
||||
True
|
||||
>>> userNameToFileName(".notdef") == "_notdef"
|
||||
True
|
||||
>>> userNameToFileName("con") == "_con"
|
||||
True
|
||||
>>> userNameToFileName("CON") == "C_O_N_"
|
||||
True
|
||||
>>> userNameToFileName("con.alt") == "_con.alt"
|
||||
True
|
||||
>>> userNameToFileName("alt.con") == "alt._con"
|
||||
True
|
||||
"""
|
||||
# the incoming name must be a str
|
||||
if not isinstance(userName, str):
|
||||
raise ValueError("The value for userName must be a string.")
|
||||
# establish the prefix and suffix lengths
|
||||
prefixLength = len(prefix)
|
||||
suffixLength = len(suffix)
|
||||
# replace an initial period with an _
|
||||
# if no prefix is to be added
|
||||
if not prefix and userName[0] == ".":
|
||||
userName = "_" + userName[1:]
|
||||
# filter the user name
|
||||
filteredUserName = []
|
||||
for character in userName:
|
||||
# replace illegal characters with _
|
||||
if character in illegalCharacters:
|
||||
character = "_"
|
||||
# add _ to all non-lower characters
|
||||
elif character != character.lower():
|
||||
character += "_"
|
||||
filteredUserName.append(character)
|
||||
userName = "".join(filteredUserName)
|
||||
# clip to 255
|
||||
sliceLength = maxFileNameLength - prefixLength - suffixLength
|
||||
userName = userName[:sliceLength]
|
||||
# test for illegal files names
|
||||
parts = []
|
||||
for part in userName.split("."):
|
||||
if part.lower() in reservedFileNames:
|
||||
part = "_" + part
|
||||
parts.append(part)
|
||||
userName = ".".join(parts)
|
||||
# test for clash
|
||||
fullName = prefix + userName + suffix
|
||||
if fullName.lower() in existing:
|
||||
fullName = handleClash1(userName, existing, prefix, suffix)
|
||||
# finished
|
||||
return fullName
|
||||
|
||||
|
||||
def handleClash1(userName, existing=[], prefix="", suffix=""):
|
||||
"""
|
||||
existing should be a case-insensitive list
|
||||
of all existing file names.
|
||||
|
||||
>>> prefix = ("0" * 5) + "."
|
||||
>>> suffix = "." + ("0" * 10)
|
||||
>>> existing = ["a" * 5]
|
||||
|
||||
>>> e = list(existing)
|
||||
>>> handleClash1(userName="A" * 5, existing=e,
|
||||
... prefix=prefix, suffix=suffix) == (
|
||||
... '00000.AAAAA000000000000001.0000000000')
|
||||
True
|
||||
|
||||
>>> e = list(existing)
|
||||
>>> e.append(prefix + "aaaaa" + "1".zfill(15) + suffix)
|
||||
>>> handleClash1(userName="A" * 5, existing=e,
|
||||
... prefix=prefix, suffix=suffix) == (
|
||||
... '00000.AAAAA000000000000002.0000000000')
|
||||
True
|
||||
|
||||
>>> e = list(existing)
|
||||
>>> e.append(prefix + "AAAAA" + "2".zfill(15) + suffix)
|
||||
>>> handleClash1(userName="A" * 5, existing=e,
|
||||
... prefix=prefix, suffix=suffix) == (
|
||||
... '00000.AAAAA000000000000001.0000000000')
|
||||
True
|
||||
"""
|
||||
# if the prefix length + user name length + suffix length + 15 is at
|
||||
# or past the maximum length, silce 15 characters off of the user name
|
||||
prefixLength = len(prefix)
|
||||
suffixLength = len(suffix)
|
||||
if prefixLength + len(userName) + suffixLength + 15 > maxFileNameLength:
|
||||
l = prefixLength + len(userName) + suffixLength + 15
|
||||
sliceLength = maxFileNameLength - l
|
||||
userName = userName[:sliceLength]
|
||||
finalName = None
|
||||
# try to add numbers to create a unique name
|
||||
counter = 1
|
||||
while finalName is None:
|
||||
name = userName + str(counter).zfill(15)
|
||||
fullName = prefix + name + suffix
|
||||
if fullName.lower() not in existing:
|
||||
finalName = fullName
|
||||
break
|
||||
else:
|
||||
counter += 1
|
||||
if counter >= 999999999999999:
|
||||
break
|
||||
# if there is a clash, go to the next fallback
|
||||
if finalName is None:
|
||||
finalName = handleClash2(existing, prefix, suffix)
|
||||
# finished
|
||||
return finalName
|
||||
|
||||
|
||||
def handleClash2(existing=[], prefix="", suffix=""):
|
||||
"""
|
||||
existing should be a case-insensitive list
|
||||
of all existing file names.
|
||||
|
||||
>>> prefix = ("0" * 5) + "."
|
||||
>>> suffix = "." + ("0" * 10)
|
||||
>>> existing = [prefix + str(i) + suffix for i in range(100)]
|
||||
|
||||
>>> e = list(existing)
|
||||
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
|
||||
... '00000.100.0000000000')
|
||||
True
|
||||
|
||||
>>> e = list(existing)
|
||||
>>> e.remove(prefix + "1" + suffix)
|
||||
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
|
||||
... '00000.1.0000000000')
|
||||
True
|
||||
|
||||
>>> e = list(existing)
|
||||
>>> e.remove(prefix + "2" + suffix)
|
||||
>>> handleClash2(existing=e, prefix=prefix, suffix=suffix) == (
|
||||
... '00000.2.0000000000')
|
||||
True
|
||||
"""
|
||||
# calculate the longest possible string
|
||||
maxLength = maxFileNameLength - len(prefix) - len(suffix)
|
||||
maxValue = int("9" * maxLength)
|
||||
# try to find a number
|
||||
finalName = None
|
||||
counter = 1
|
||||
while finalName is None:
|
||||
fullName = prefix + str(counter) + suffix
|
||||
if fullName.lower() not in existing:
|
||||
finalName = fullName
|
||||
break
|
||||
else:
|
||||
counter += 1
|
||||
if counter >= maxValue:
|
||||
break
|
||||
# raise an error if nothing has been found
|
||||
if finalName is None:
|
||||
raise NameTranslationError("No unique name could be found.")
|
||||
# finished
|
||||
return finalName
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest
|
||||
import sys
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
253
venv/lib/python3.12/site-packages/fontTools/misc/fixedTools.py
Normal file
253
venv/lib/python3.12/site-packages/fontTools/misc/fixedTools.py
Normal file
@ -0,0 +1,253 @@
|
||||
"""
|
||||
The `OpenType specification <https://docs.microsoft.com/en-us/typography/opentype/spec/otff#data-types>`_
|
||||
defines two fixed-point data types:
|
||||
|
||||
``Fixed``
|
||||
A 32-bit signed fixed-point number with a 16 bit twos-complement
|
||||
magnitude component and 16 fractional bits.
|
||||
``F2DOT14``
|
||||
A 16-bit signed fixed-point number with a 2 bit twos-complement
|
||||
magnitude component and 14 fractional bits.
|
||||
|
||||
To support reading and writing data with these data types, this module provides
|
||||
functions for converting between fixed-point, float and string representations.
|
||||
|
||||
.. data:: MAX_F2DOT14
|
||||
|
||||
The maximum value that can still fit in an F2Dot14. (1.99993896484375)
|
||||
"""
|
||||
|
||||
from .roundTools import otRound, nearestMultipleShortestRepr
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__all__ = [
|
||||
"MAX_F2DOT14",
|
||||
"fixedToFloat",
|
||||
"floatToFixed",
|
||||
"floatToFixedToFloat",
|
||||
"floatToFixedToStr",
|
||||
"fixedToStr",
|
||||
"strToFixed",
|
||||
"strToFixedToFloat",
|
||||
"ensureVersionIsLong",
|
||||
"versionToFixed",
|
||||
]
|
||||
|
||||
|
||||
MAX_F2DOT14 = 0x7FFF / (1 << 14)
|
||||
|
||||
|
||||
def fixedToFloat(value, precisionBits):
|
||||
"""Converts a fixed-point number to a float given the number of
|
||||
precision bits.
|
||||
|
||||
Args:
|
||||
value (int): Number in fixed-point format.
|
||||
precisionBits (int): Number of precision bits.
|
||||
|
||||
Returns:
|
||||
Floating point value.
|
||||
|
||||
Examples::
|
||||
|
||||
>>> import math
|
||||
>>> f = fixedToFloat(-10139, precisionBits=14)
|
||||
>>> math.isclose(f, -0.61883544921875)
|
||||
True
|
||||
"""
|
||||
return value / (1 << precisionBits)
|
||||
|
||||
|
||||
def floatToFixed(value, precisionBits):
|
||||
"""Converts a float to a fixed-point number given the number of
|
||||
precision bits.
|
||||
|
||||
Args:
|
||||
value (float): Floating point value.
|
||||
precisionBits (int): Number of precision bits.
|
||||
|
||||
Returns:
|
||||
int: Fixed-point representation.
|
||||
|
||||
Examples::
|
||||
|
||||
>>> floatToFixed(-0.61883544921875, precisionBits=14)
|
||||
-10139
|
||||
>>> floatToFixed(-0.61884, precisionBits=14)
|
||||
-10139
|
||||
"""
|
||||
return otRound(value * (1 << precisionBits))
|
||||
|
||||
|
||||
def floatToFixedToFloat(value, precisionBits):
|
||||
"""Converts a float to a fixed-point number and back again.
|
||||
|
||||
By converting the float to fixed, rounding it, and converting it back
|
||||
to float again, this returns a floating point values which is exactly
|
||||
representable in fixed-point format.
|
||||
|
||||
Note: this **is** equivalent to ``fixedToFloat(floatToFixed(value))``.
|
||||
|
||||
Args:
|
||||
value (float): The input floating point value.
|
||||
precisionBits (int): Number of precision bits.
|
||||
|
||||
Returns:
|
||||
float: The transformed and rounded value.
|
||||
|
||||
Examples::
|
||||
>>> import math
|
||||
>>> f1 = -0.61884
|
||||
>>> f2 = floatToFixedToFloat(-0.61884, precisionBits=14)
|
||||
>>> f1 != f2
|
||||
True
|
||||
>>> math.isclose(f2, -0.61883544921875)
|
||||
True
|
||||
"""
|
||||
scale = 1 << precisionBits
|
||||
return otRound(value * scale) / scale
|
||||
|
||||
|
||||
def fixedToStr(value, precisionBits):
|
||||
"""Converts a fixed-point number to a string representing a decimal float.
|
||||
|
||||
This chooses the float that has the shortest decimal representation (the least
|
||||
number of fractional decimal digits).
|
||||
|
||||
For example, to convert a fixed-point number in a 2.14 format, use
|
||||
``precisionBits=14``::
|
||||
|
||||
>>> fixedToStr(-10139, precisionBits=14)
|
||||
'-0.61884'
|
||||
|
||||
This is pretty slow compared to the simple division used in ``fixedToFloat``.
|
||||
Use sporadically when you need to serialize or print the fixed-point number in
|
||||
a human-readable form.
|
||||
It uses nearestMultipleShortestRepr under the hood.
|
||||
|
||||
Args:
|
||||
value (int): The fixed-point value to convert.
|
||||
precisionBits (int): Number of precision bits, *up to a maximum of 16*.
|
||||
|
||||
Returns:
|
||||
str: A string representation of the value.
|
||||
"""
|
||||
scale = 1 << precisionBits
|
||||
return nearestMultipleShortestRepr(value / scale, factor=1.0 / scale)
|
||||
|
||||
|
||||
def strToFixed(string, precisionBits):
|
||||
"""Converts a string representing a decimal float to a fixed-point number.
|
||||
|
||||
Args:
|
||||
string (str): A string representing a decimal float.
|
||||
precisionBits (int): Number of precision bits, *up to a maximum of 16*.
|
||||
|
||||
Returns:
|
||||
int: Fixed-point representation.
|
||||
|
||||
Examples::
|
||||
|
||||
>>> ## to convert a float string to a 2.14 fixed-point number:
|
||||
>>> strToFixed('-0.61884', precisionBits=14)
|
||||
-10139
|
||||
"""
|
||||
value = float(string)
|
||||
return otRound(value * (1 << precisionBits))
|
||||
|
||||
|
||||
def strToFixedToFloat(string, precisionBits):
|
||||
"""Convert a string to a decimal float with fixed-point rounding.
|
||||
|
||||
This first converts string to a float, then turns it into a fixed-point
|
||||
number with ``precisionBits`` fractional binary digits, then back to a
|
||||
float again.
|
||||
|
||||
This is simply a shorthand for fixedToFloat(floatToFixed(float(s))).
|
||||
|
||||
Args:
|
||||
string (str): A string representing a decimal float.
|
||||
precisionBits (int): Number of precision bits.
|
||||
|
||||
Returns:
|
||||
float: The transformed and rounded value.
|
||||
|
||||
Examples::
|
||||
|
||||
>>> import math
|
||||
>>> s = '-0.61884'
|
||||
>>> bits = 14
|
||||
>>> f = strToFixedToFloat(s, precisionBits=bits)
|
||||
>>> math.isclose(f, -0.61883544921875)
|
||||
True
|
||||
>>> f == fixedToFloat(floatToFixed(float(s), precisionBits=bits), precisionBits=bits)
|
||||
True
|
||||
"""
|
||||
value = float(string)
|
||||
scale = 1 << precisionBits
|
||||
return otRound(value * scale) / scale
|
||||
|
||||
|
||||
def floatToFixedToStr(value, precisionBits):
|
||||
"""Convert float to string with fixed-point rounding.
|
||||
|
||||
This uses the shortest decimal representation (ie. the least
|
||||
number of fractional decimal digits) to represent the equivalent
|
||||
fixed-point number with ``precisionBits`` fractional binary digits.
|
||||
It uses nearestMultipleShortestRepr under the hood.
|
||||
|
||||
>>> floatToFixedToStr(-0.61883544921875, precisionBits=14)
|
||||
'-0.61884'
|
||||
|
||||
Args:
|
||||
value (float): The float value to convert.
|
||||
precisionBits (int): Number of precision bits, *up to a maximum of 16*.
|
||||
|
||||
Returns:
|
||||
str: A string representation of the value.
|
||||
|
||||
"""
|
||||
scale = 1 << precisionBits
|
||||
return nearestMultipleShortestRepr(value, factor=1.0 / scale)
|
||||
|
||||
|
||||
def ensureVersionIsLong(value):
|
||||
"""Ensure a table version is an unsigned long.
|
||||
|
||||
OpenType table version numbers are expressed as a single unsigned long
|
||||
comprising of an unsigned short major version and unsigned short minor
|
||||
version. This function detects if the value to be used as a version number
|
||||
looks too small (i.e. is less than ``0x10000``), and converts it to
|
||||
fixed-point using :func:`floatToFixed` if so.
|
||||
|
||||
Args:
|
||||
value (Number): a candidate table version number.
|
||||
|
||||
Returns:
|
||||
int: A table version number, possibly corrected to fixed-point.
|
||||
"""
|
||||
if value < 0x10000:
|
||||
newValue = floatToFixed(value, 16)
|
||||
log.warning(
|
||||
"Table version value is a float: %.4f; " "fix to use hex instead: 0x%08x",
|
||||
value,
|
||||
newValue,
|
||||
)
|
||||
value = newValue
|
||||
return value
|
||||
|
||||
|
||||
def versionToFixed(value):
|
||||
"""Ensure a table version number is fixed-point.
|
||||
|
||||
Args:
|
||||
value (str): a candidate table version number.
|
||||
|
||||
Returns:
|
||||
int: A table version number, possibly corrected to fixed-point.
|
||||
"""
|
||||
value = int(value, 0) if value.startswith("0") else float(value)
|
||||
value = ensureVersionIsLong(value)
|
||||
return value
|
||||
25
venv/lib/python3.12/site-packages/fontTools/misc/intTools.py
Normal file
25
venv/lib/python3.12/site-packages/fontTools/misc/intTools.py
Normal file
@ -0,0 +1,25 @@
|
||||
__all__ = ["popCount", "bit_count", "bit_indices"]
|
||||
|
||||
|
||||
try:
|
||||
bit_count = int.bit_count
|
||||
except AttributeError:
|
||||
|
||||
def bit_count(v):
|
||||
return bin(v).count("1")
|
||||
|
||||
|
||||
"""Return number of 1 bits (population count) of the absolute value of an integer.
|
||||
|
||||
See https://docs.python.org/3.10/library/stdtypes.html#int.bit_count
|
||||
"""
|
||||
popCount = bit_count # alias
|
||||
|
||||
|
||||
def bit_indices(v):
|
||||
"""Return list of indices where bits are set, 0 being the index of the least significant bit.
|
||||
|
||||
>>> bit_indices(0b101)
|
||||
[0, 2]
|
||||
"""
|
||||
return [i for i, b in enumerate(bin(v)[::-1]) if b == "1"]
|
||||
@ -0,0 +1,12 @@
|
||||
from itertools import *
|
||||
|
||||
# Python 3.12:
|
||||
if "batched" not in globals():
|
||||
# https://docs.python.org/3/library/itertools.html#itertools.batched
|
||||
def batched(iterable, n):
|
||||
# batched('ABCDEFG', 3) --> ABC DEF G
|
||||
if n < 1:
|
||||
raise ValueError("n must be at least one")
|
||||
it = iter(iterable)
|
||||
while batch := tuple(islice(it, n)):
|
||||
yield batch
|
||||
@ -0,0 +1,42 @@
|
||||
from collections import UserDict, UserList
|
||||
|
||||
__all__ = ["LazyDict", "LazyList"]
|
||||
|
||||
|
||||
class LazyDict(UserDict):
|
||||
def __init__(self, data):
|
||||
super().__init__()
|
||||
self.data = data
|
||||
|
||||
def __getitem__(self, k):
|
||||
v = self.data[k]
|
||||
if callable(v):
|
||||
v = v(k)
|
||||
self.data[k] = v
|
||||
return v
|
||||
|
||||
|
||||
class LazyList(UserList):
|
||||
def __getitem__(self, k):
|
||||
if isinstance(k, slice):
|
||||
indices = range(*k.indices(len(self)))
|
||||
return [self[i] for i in indices]
|
||||
v = self.data[k]
|
||||
if callable(v):
|
||||
v = v(k)
|
||||
self.data[k] = v
|
||||
return v
|
||||
|
||||
def __add__(self, other):
|
||||
if isinstance(other, LazyList):
|
||||
other = list(other)
|
||||
elif isinstance(other, list):
|
||||
pass
|
||||
else:
|
||||
return NotImplemented
|
||||
return list(self) + other
|
||||
|
||||
def __radd__(self, other):
|
||||
if not isinstance(other, list):
|
||||
return NotImplemented
|
||||
return other + list(self)
|
||||
543
venv/lib/python3.12/site-packages/fontTools/misc/loggingTools.py
Normal file
543
venv/lib/python3.12/site-packages/fontTools/misc/loggingTools.py
Normal file
@ -0,0 +1,543 @@
|
||||
import sys
|
||||
import logging
|
||||
import timeit
|
||||
from functools import wraps
|
||||
from collections.abc import Mapping, Callable
|
||||
import warnings
|
||||
from logging import PercentStyle
|
||||
|
||||
|
||||
# default logging level used by Timer class
|
||||
TIME_LEVEL = logging.DEBUG
|
||||
|
||||
# per-level format strings used by the default formatter
|
||||
# (the level name is not printed for INFO and DEBUG messages)
|
||||
DEFAULT_FORMATS = {
|
||||
"*": "%(levelname)s: %(message)s",
|
||||
"INFO": "%(message)s",
|
||||
"DEBUG": "%(message)s",
|
||||
}
|
||||
|
||||
|
||||
class LevelFormatter(logging.Formatter):
|
||||
"""Log formatter with level-specific formatting.
|
||||
|
||||
Formatter class which optionally takes a dict of logging levels to
|
||||
format strings, allowing to customise the log records appearance for
|
||||
specific levels.
|
||||
|
||||
|
||||
Attributes:
|
||||
fmt: A dictionary mapping logging levels to format strings.
|
||||
The ``*`` key identifies the default format string.
|
||||
datefmt: As per py:class:`logging.Formatter`
|
||||
style: As per py:class:`logging.Formatter`
|
||||
|
||||
>>> import sys
|
||||
>>> handler = logging.StreamHandler(sys.stdout)
|
||||
>>> formatter = LevelFormatter(
|
||||
... fmt={
|
||||
... '*': '[%(levelname)s] %(message)s',
|
||||
... 'DEBUG': '%(name)s [%(levelname)s] %(message)s',
|
||||
... 'INFO': '%(message)s',
|
||||
... })
|
||||
>>> handler.setFormatter(formatter)
|
||||
>>> log = logging.getLogger('test')
|
||||
>>> log.setLevel(logging.DEBUG)
|
||||
>>> log.addHandler(handler)
|
||||
>>> log.debug('this uses a custom format string')
|
||||
test [DEBUG] this uses a custom format string
|
||||
>>> log.info('this also uses a custom format string')
|
||||
this also uses a custom format string
|
||||
>>> log.warning("this one uses the default format string")
|
||||
[WARNING] this one uses the default format string
|
||||
"""
|
||||
|
||||
def __init__(self, fmt=None, datefmt=None, style="%"):
|
||||
if style != "%":
|
||||
raise ValueError(
|
||||
"only '%' percent style is supported in both python 2 and 3"
|
||||
)
|
||||
if fmt is None:
|
||||
fmt = DEFAULT_FORMATS
|
||||
if isinstance(fmt, str):
|
||||
default_format = fmt
|
||||
custom_formats = {}
|
||||
elif isinstance(fmt, Mapping):
|
||||
custom_formats = dict(fmt)
|
||||
default_format = custom_formats.pop("*", None)
|
||||
else:
|
||||
raise TypeError("fmt must be a str or a dict of str: %r" % fmt)
|
||||
super(LevelFormatter, self).__init__(default_format, datefmt)
|
||||
self.default_format = self._fmt
|
||||
self.custom_formats = {}
|
||||
for level, fmt in custom_formats.items():
|
||||
level = logging._checkLevel(level)
|
||||
self.custom_formats[level] = fmt
|
||||
|
||||
def format(self, record):
|
||||
if self.custom_formats:
|
||||
fmt = self.custom_formats.get(record.levelno, self.default_format)
|
||||
if self._fmt != fmt:
|
||||
self._fmt = fmt
|
||||
# for python >= 3.2, _style needs to be set if _fmt changes
|
||||
if PercentStyle:
|
||||
self._style = PercentStyle(fmt)
|
||||
return super(LevelFormatter, self).format(record)
|
||||
|
||||
|
||||
def configLogger(**kwargs):
|
||||
"""A more sophisticated logging system configuation manager.
|
||||
|
||||
This is more or less the same as :py:func:`logging.basicConfig`,
|
||||
with some additional options and defaults.
|
||||
|
||||
The default behaviour is to create a ``StreamHandler`` which writes to
|
||||
sys.stderr, set a formatter using the ``DEFAULT_FORMATS`` strings, and add
|
||||
the handler to the top-level library logger ("fontTools").
|
||||
|
||||
A number of optional keyword arguments may be specified, which can alter
|
||||
the default behaviour.
|
||||
|
||||
Args:
|
||||
|
||||
logger: Specifies the logger name or a Logger instance to be
|
||||
configured. (Defaults to "fontTools" logger). Unlike ``basicConfig``,
|
||||
this function can be called multiple times to reconfigure a logger.
|
||||
If the logger or any of its children already exists before the call is
|
||||
made, they will be reset before the new configuration is applied.
|
||||
filename: Specifies that a ``FileHandler`` be created, using the
|
||||
specified filename, rather than a ``StreamHandler``.
|
||||
filemode: Specifies the mode to open the file, if filename is
|
||||
specified. (If filemode is unspecified, it defaults to ``a``).
|
||||
format: Use the specified format string for the handler. This
|
||||
argument also accepts a dictionary of format strings keyed by
|
||||
level name, to allow customising the records appearance for
|
||||
specific levels. The special ``'*'`` key is for 'any other' level.
|
||||
datefmt: Use the specified date/time format.
|
||||
level: Set the logger level to the specified level.
|
||||
stream: Use the specified stream to initialize the StreamHandler. Note
|
||||
that this argument is incompatible with ``filename`` - if both
|
||||
are present, ``stream`` is ignored.
|
||||
handlers: If specified, this should be an iterable of already created
|
||||
handlers, which will be added to the logger. Any handler in the
|
||||
list which does not have a formatter assigned will be assigned the
|
||||
formatter created in this function.
|
||||
filters: If specified, this should be an iterable of already created
|
||||
filters. If the ``handlers`` do not already have filters assigned,
|
||||
these filters will be added to them.
|
||||
propagate: All loggers have a ``propagate`` attribute which determines
|
||||
whether to continue searching for handlers up the logging hierarchy.
|
||||
If not provided, the "propagate" attribute will be set to ``False``.
|
||||
"""
|
||||
# using kwargs to enforce keyword-only arguments in py2.
|
||||
handlers = kwargs.pop("handlers", None)
|
||||
if handlers is None:
|
||||
if "stream" in kwargs and "filename" in kwargs:
|
||||
raise ValueError(
|
||||
"'stream' and 'filename' should not be " "specified together"
|
||||
)
|
||||
else:
|
||||
if "stream" in kwargs or "filename" in kwargs:
|
||||
raise ValueError(
|
||||
"'stream' or 'filename' should not be "
|
||||
"specified together with 'handlers'"
|
||||
)
|
||||
if handlers is None:
|
||||
filename = kwargs.pop("filename", None)
|
||||
mode = kwargs.pop("filemode", "a")
|
||||
if filename:
|
||||
h = logging.FileHandler(filename, mode)
|
||||
else:
|
||||
stream = kwargs.pop("stream", None)
|
||||
h = logging.StreamHandler(stream)
|
||||
handlers = [h]
|
||||
# By default, the top-level library logger is configured.
|
||||
logger = kwargs.pop("logger", "fontTools")
|
||||
if not logger or isinstance(logger, str):
|
||||
# empty "" or None means the 'root' logger
|
||||
logger = logging.getLogger(logger)
|
||||
# before (re)configuring, reset named logger and its children (if exist)
|
||||
_resetExistingLoggers(parent=logger.name)
|
||||
# use DEFAULT_FORMATS if 'format' is None
|
||||
fs = kwargs.pop("format", None)
|
||||
dfs = kwargs.pop("datefmt", None)
|
||||
# XXX: '%' is the only format style supported on both py2 and 3
|
||||
style = kwargs.pop("style", "%")
|
||||
fmt = LevelFormatter(fs, dfs, style)
|
||||
filters = kwargs.pop("filters", [])
|
||||
for h in handlers:
|
||||
if h.formatter is None:
|
||||
h.setFormatter(fmt)
|
||||
if not h.filters:
|
||||
for f in filters:
|
||||
h.addFilter(f)
|
||||
logger.addHandler(h)
|
||||
if logger.name != "root":
|
||||
# stop searching up the hierarchy for handlers
|
||||
logger.propagate = kwargs.pop("propagate", False)
|
||||
# set a custom severity level
|
||||
level = kwargs.pop("level", None)
|
||||
if level is not None:
|
||||
logger.setLevel(level)
|
||||
if kwargs:
|
||||
keys = ", ".join(kwargs.keys())
|
||||
raise ValueError("Unrecognised argument(s): %s" % keys)
|
||||
|
||||
|
||||
def _resetExistingLoggers(parent="root"):
|
||||
"""Reset the logger named 'parent' and all its children to their initial
|
||||
state, if they already exist in the current configuration.
|
||||
"""
|
||||
root = logging.root
|
||||
# get sorted list of all existing loggers
|
||||
existing = sorted(root.manager.loggerDict.keys())
|
||||
if parent == "root":
|
||||
# all the existing loggers are children of 'root'
|
||||
loggers_to_reset = [parent] + existing
|
||||
elif parent not in existing:
|
||||
# nothing to do
|
||||
return
|
||||
elif parent in existing:
|
||||
loggers_to_reset = [parent]
|
||||
# collect children, starting with the entry after parent name
|
||||
i = existing.index(parent) + 1
|
||||
prefixed = parent + "."
|
||||
pflen = len(prefixed)
|
||||
num_existing = len(existing)
|
||||
while i < num_existing:
|
||||
if existing[i][:pflen] == prefixed:
|
||||
loggers_to_reset.append(existing[i])
|
||||
i += 1
|
||||
for name in loggers_to_reset:
|
||||
if name == "root":
|
||||
root.setLevel(logging.WARNING)
|
||||
for h in root.handlers[:]:
|
||||
root.removeHandler(h)
|
||||
for f in root.filters[:]:
|
||||
root.removeFilters(f)
|
||||
root.disabled = False
|
||||
else:
|
||||
logger = root.manager.loggerDict[name]
|
||||
logger.level = logging.NOTSET
|
||||
logger.handlers = []
|
||||
logger.filters = []
|
||||
logger.propagate = True
|
||||
logger.disabled = False
|
||||
|
||||
|
||||
class Timer(object):
|
||||
"""Keeps track of overall time and split/lap times.
|
||||
|
||||
>>> import time
|
||||
>>> timer = Timer()
|
||||
>>> time.sleep(0.01)
|
||||
>>> print("First lap:", timer.split())
|
||||
First lap: ...
|
||||
>>> time.sleep(0.02)
|
||||
>>> print("Second lap:", timer.split())
|
||||
Second lap: ...
|
||||
>>> print("Overall time:", timer.time())
|
||||
Overall time: ...
|
||||
|
||||
Can be used as a context manager inside with-statements.
|
||||
|
||||
>>> with Timer() as t:
|
||||
... time.sleep(0.01)
|
||||
>>> print("%0.3f seconds" % t.elapsed)
|
||||
0... seconds
|
||||
|
||||
If initialised with a logger, it can log the elapsed time automatically
|
||||
upon exiting the with-statement.
|
||||
|
||||
>>> import logging
|
||||
>>> log = logging.getLogger("my-fancy-timer-logger")
|
||||
>>> configLogger(logger=log, level="DEBUG", format="%(message)s", stream=sys.stdout)
|
||||
>>> with Timer(log, 'do something'):
|
||||
... time.sleep(0.01)
|
||||
Took ... to do something
|
||||
|
||||
The same Timer instance, holding a reference to a logger, can be reused
|
||||
in multiple with-statements, optionally with different messages or levels.
|
||||
|
||||
>>> timer = Timer(log)
|
||||
>>> with timer():
|
||||
... time.sleep(0.01)
|
||||
elapsed time: ...s
|
||||
>>> with timer('redo it', level=logging.INFO):
|
||||
... time.sleep(0.02)
|
||||
Took ... to redo it
|
||||
|
||||
It can also be used as a function decorator to log the time elapsed to run
|
||||
the decorated function.
|
||||
|
||||
>>> @timer()
|
||||
... def test1():
|
||||
... time.sleep(0.01)
|
||||
>>> @timer('run test 2', level=logging.INFO)
|
||||
... def test2():
|
||||
... time.sleep(0.02)
|
||||
>>> test1()
|
||||
Took ... to run 'test1'
|
||||
>>> test2()
|
||||
Took ... to run test 2
|
||||
"""
|
||||
|
||||
# timeit.default_timer choses the most accurate clock for each platform
|
||||
_time = timeit.default_timer
|
||||
default_msg = "elapsed time: %(time).3fs"
|
||||
default_format = "Took %(time).3fs to %(msg)s"
|
||||
|
||||
def __init__(self, logger=None, msg=None, level=None, start=None):
|
||||
self.reset(start)
|
||||
if logger is None:
|
||||
for arg in ("msg", "level"):
|
||||
if locals().get(arg) is not None:
|
||||
raise ValueError("'%s' can't be specified without a 'logger'" % arg)
|
||||
self.logger = logger
|
||||
self.level = level if level is not None else TIME_LEVEL
|
||||
self.msg = msg
|
||||
|
||||
def reset(self, start=None):
|
||||
"""Reset timer to 'start_time' or the current time."""
|
||||
if start is None:
|
||||
self.start = self._time()
|
||||
else:
|
||||
self.start = start
|
||||
self.last = self.start
|
||||
self.elapsed = 0.0
|
||||
|
||||
def time(self):
|
||||
"""Return the overall time (in seconds) since the timer started."""
|
||||
return self._time() - self.start
|
||||
|
||||
def split(self):
|
||||
"""Split and return the lap time (in seconds) in between splits."""
|
||||
current = self._time()
|
||||
self.elapsed = current - self.last
|
||||
self.last = current
|
||||
return self.elapsed
|
||||
|
||||
def formatTime(self, msg, time):
|
||||
"""Format 'time' value in 'msg' and return formatted string.
|
||||
If 'msg' contains a '%(time)' format string, try to use that.
|
||||
Otherwise, use the predefined 'default_format'.
|
||||
If 'msg' is empty or None, fall back to 'default_msg'.
|
||||
"""
|
||||
if not msg:
|
||||
msg = self.default_msg
|
||||
if msg.find("%(time)") < 0:
|
||||
msg = self.default_format % {"msg": msg, "time": time}
|
||||
else:
|
||||
try:
|
||||
msg = msg % {"time": time}
|
||||
except (KeyError, ValueError):
|
||||
pass # skip if the format string is malformed
|
||||
return msg
|
||||
|
||||
def __enter__(self):
|
||||
"""Start a new lap"""
|
||||
self.last = self._time()
|
||||
self.elapsed = 0.0
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
"""End the current lap. If timer has a logger, log the time elapsed,
|
||||
using the format string in self.msg (or the default one).
|
||||
"""
|
||||
time = self.split()
|
||||
if self.logger is None or exc_type:
|
||||
# if there's no logger attached, or if any exception occurred in
|
||||
# the with-statement, exit without logging the time
|
||||
return
|
||||
message = self.formatTime(self.msg, time)
|
||||
# Allow log handlers to see the individual parts to facilitate things
|
||||
# like a server accumulating aggregate stats.
|
||||
msg_parts = {"msg": self.msg, "time": time}
|
||||
self.logger.log(self.level, message, msg_parts)
|
||||
|
||||
def __call__(self, func_or_msg=None, **kwargs):
|
||||
"""If the first argument is a function, return a decorator which runs
|
||||
the wrapped function inside Timer's context manager.
|
||||
Otherwise, treat the first argument as a 'msg' string and return an updated
|
||||
Timer instance, referencing the same logger.
|
||||
A 'level' keyword can also be passed to override self.level.
|
||||
"""
|
||||
if isinstance(func_or_msg, Callable):
|
||||
func = func_or_msg
|
||||
# use the function name when no explicit 'msg' is provided
|
||||
if not self.msg:
|
||||
self.msg = "run '%s'" % func.__name__
|
||||
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwds):
|
||||
with self:
|
||||
return func(*args, **kwds)
|
||||
|
||||
return wrapper
|
||||
else:
|
||||
msg = func_or_msg or kwargs.get("msg")
|
||||
level = kwargs.get("level", self.level)
|
||||
return self.__class__(self.logger, msg, level)
|
||||
|
||||
def __float__(self):
|
||||
return self.elapsed
|
||||
|
||||
def __int__(self):
|
||||
return int(self.elapsed)
|
||||
|
||||
def __str__(self):
|
||||
return "%.3f" % self.elapsed
|
||||
|
||||
|
||||
class ChannelsFilter(logging.Filter):
|
||||
"""Provides a hierarchical filter for log entries based on channel names.
|
||||
|
||||
Filters out records emitted from a list of enabled channel names,
|
||||
including their children. It works the same as the ``logging.Filter``
|
||||
class, but allows the user to specify multiple channel names.
|
||||
|
||||
>>> import sys
|
||||
>>> handler = logging.StreamHandler(sys.stdout)
|
||||
>>> handler.setFormatter(logging.Formatter("%(message)s"))
|
||||
>>> filter = ChannelsFilter("A.B", "C.D")
|
||||
>>> handler.addFilter(filter)
|
||||
>>> root = logging.getLogger()
|
||||
>>> root.addHandler(handler)
|
||||
>>> root.setLevel(level=logging.DEBUG)
|
||||
>>> logging.getLogger('A.B').debug('this record passes through')
|
||||
this record passes through
|
||||
>>> logging.getLogger('A.B.C').debug('records from children also pass')
|
||||
records from children also pass
|
||||
>>> logging.getLogger('C.D').debug('this one as well')
|
||||
this one as well
|
||||
>>> logging.getLogger('A.B.').debug('also this one')
|
||||
also this one
|
||||
>>> logging.getLogger('A.F').debug('but this one does not!')
|
||||
>>> logging.getLogger('C.DE').debug('neither this one!')
|
||||
"""
|
||||
|
||||
def __init__(self, *names):
|
||||
self.names = names
|
||||
self.num = len(names)
|
||||
self.lengths = {n: len(n) for n in names}
|
||||
|
||||
def filter(self, record):
|
||||
if self.num == 0:
|
||||
return True
|
||||
for name in self.names:
|
||||
nlen = self.lengths[name]
|
||||
if name == record.name:
|
||||
return True
|
||||
elif record.name.find(name, 0, nlen) == 0 and record.name[nlen] == ".":
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class CapturingLogHandler(logging.Handler):
|
||||
def __init__(self, logger, level):
|
||||
super(CapturingLogHandler, self).__init__(level=level)
|
||||
self.records = []
|
||||
if isinstance(logger, str):
|
||||
self.logger = logging.getLogger(logger)
|
||||
else:
|
||||
self.logger = logger
|
||||
|
||||
def __enter__(self):
|
||||
self.original_disabled = self.logger.disabled
|
||||
self.original_level = self.logger.level
|
||||
self.original_propagate = self.logger.propagate
|
||||
|
||||
self.logger.addHandler(self)
|
||||
self.logger.setLevel(self.level)
|
||||
self.logger.disabled = False
|
||||
self.logger.propagate = False
|
||||
|
||||
return self
|
||||
|
||||
def __exit__(self, type, value, traceback):
|
||||
self.logger.removeHandler(self)
|
||||
self.logger.setLevel(self.original_level)
|
||||
self.logger.disabled = self.original_disabled
|
||||
self.logger.propagate = self.original_propagate
|
||||
|
||||
return self
|
||||
|
||||
def emit(self, record):
|
||||
self.records.append(record)
|
||||
|
||||
def assertRegex(self, regexp, msg=None):
|
||||
import re
|
||||
|
||||
pattern = re.compile(regexp)
|
||||
for r in self.records:
|
||||
if pattern.search(r.getMessage()):
|
||||
return True
|
||||
if msg is None:
|
||||
msg = "Pattern '%s' not found in logger records" % regexp
|
||||
assert 0, msg
|
||||
|
||||
|
||||
class LogMixin(object):
|
||||
"""Mixin class that adds logging functionality to another class.
|
||||
|
||||
You can define a new class that subclasses from ``LogMixin`` as well as
|
||||
other base classes through multiple inheritance.
|
||||
All instances of that class will have a ``log`` property that returns
|
||||
a ``logging.Logger`` named after their respective ``<module>.<class>``.
|
||||
|
||||
For example:
|
||||
|
||||
>>> class BaseClass(object):
|
||||
... pass
|
||||
>>> class MyClass(LogMixin, BaseClass):
|
||||
... pass
|
||||
>>> a = MyClass()
|
||||
>>> isinstance(a.log, logging.Logger)
|
||||
True
|
||||
>>> print(a.log.name)
|
||||
fontTools.misc.loggingTools.MyClass
|
||||
>>> class AnotherClass(MyClass):
|
||||
... pass
|
||||
>>> b = AnotherClass()
|
||||
>>> isinstance(b.log, logging.Logger)
|
||||
True
|
||||
>>> print(b.log.name)
|
||||
fontTools.misc.loggingTools.AnotherClass
|
||||
"""
|
||||
|
||||
@property
|
||||
def log(self):
|
||||
if not hasattr(self, "_log"):
|
||||
name = ".".join((self.__class__.__module__, self.__class__.__name__))
|
||||
self._log = logging.getLogger(name)
|
||||
return self._log
|
||||
|
||||
|
||||
def deprecateArgument(name, msg, category=UserWarning):
|
||||
"""Raise a warning about deprecated function argument 'name'."""
|
||||
warnings.warn("%r is deprecated; %s" % (name, msg), category=category, stacklevel=3)
|
||||
|
||||
|
||||
def deprecateFunction(msg, category=UserWarning):
|
||||
"""Decorator to raise a warning when a deprecated function is called."""
|
||||
|
||||
def decorator(func):
|
||||
@wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
warnings.warn(
|
||||
"%r is deprecated; %s" % (func.__name__, msg),
|
||||
category=category,
|
||||
stacklevel=2,
|
||||
)
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest
|
||||
|
||||
sys.exit(doctest.testmod(optionflags=doctest.ELLIPSIS).failed)
|
||||
@ -0,0 +1,56 @@
|
||||
from fontTools.misc.textTools import Tag, bytesjoin, strjoin
|
||||
|
||||
try:
|
||||
import xattr
|
||||
except ImportError:
|
||||
xattr = None
|
||||
|
||||
|
||||
def _reverseString(s):
|
||||
s = list(s)
|
||||
s.reverse()
|
||||
return strjoin(s)
|
||||
|
||||
|
||||
def getMacCreatorAndType(path):
|
||||
"""Returns file creator and file type codes for a path.
|
||||
|
||||
Args:
|
||||
path (str): A file path.
|
||||
|
||||
Returns:
|
||||
A tuple of two :py:class:`fontTools.textTools.Tag` objects, the first
|
||||
representing the file creator and the second representing the
|
||||
file type.
|
||||
"""
|
||||
if xattr is not None:
|
||||
try:
|
||||
finderInfo = xattr.getxattr(path, "com.apple.FinderInfo")
|
||||
except (KeyError, IOError):
|
||||
pass
|
||||
else:
|
||||
fileType = Tag(finderInfo[:4])
|
||||
fileCreator = Tag(finderInfo[4:8])
|
||||
return fileCreator, fileType
|
||||
return None, None
|
||||
|
||||
|
||||
def setMacCreatorAndType(path, fileCreator, fileType):
|
||||
"""Set file creator and file type codes for a path.
|
||||
|
||||
Note that if the ``xattr`` module is not installed, no action is
|
||||
taken but no error is raised.
|
||||
|
||||
Args:
|
||||
path (str): A file path.
|
||||
fileCreator: A four-character file creator tag.
|
||||
fileType: A four-character file type tag.
|
||||
|
||||
"""
|
||||
if xattr is not None:
|
||||
from fontTools.misc.textTools import pad
|
||||
|
||||
if not all(len(s) == 4 for s in (fileCreator, fileType)):
|
||||
raise TypeError("arg must be string of 4 chars")
|
||||
finderInfo = pad(bytesjoin([fileType, fileCreator]), 32)
|
||||
xattr.setxattr(path, "com.apple.FinderInfo", finderInfo)
|
||||
261
venv/lib/python3.12/site-packages/fontTools/misc/macRes.py
Normal file
261
venv/lib/python3.12/site-packages/fontTools/misc/macRes.py
Normal file
@ -0,0 +1,261 @@
|
||||
from io import BytesIO
|
||||
import struct
|
||||
from fontTools.misc import sstruct
|
||||
from fontTools.misc.textTools import bytesjoin, tostr
|
||||
from collections import OrderedDict
|
||||
from collections.abc import MutableMapping
|
||||
|
||||
|
||||
class ResourceError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class ResourceReader(MutableMapping):
|
||||
"""Reader for Mac OS resource forks.
|
||||
|
||||
Parses a resource fork and returns resources according to their type.
|
||||
If run on OS X, this will open the resource fork in the filesystem.
|
||||
Otherwise, it will open the file itself and attempt to read it as
|
||||
though it were a resource fork.
|
||||
|
||||
The returned object can be indexed by type and iterated over,
|
||||
returning in each case a list of py:class:`Resource` objects
|
||||
representing all the resources of a certain type.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, fileOrPath):
|
||||
"""Open a file
|
||||
|
||||
Args:
|
||||
fileOrPath: Either an object supporting a ``read`` method, an
|
||||
``os.PathLike`` object, or a string.
|
||||
"""
|
||||
self._resources = OrderedDict()
|
||||
if hasattr(fileOrPath, "read"):
|
||||
self.file = fileOrPath
|
||||
else:
|
||||
try:
|
||||
# try reading from the resource fork (only works on OS X)
|
||||
self.file = self.openResourceFork(fileOrPath)
|
||||
self._readFile()
|
||||
return
|
||||
except (ResourceError, IOError):
|
||||
# if it fails, use the data fork
|
||||
self.file = self.openDataFork(fileOrPath)
|
||||
self._readFile()
|
||||
|
||||
@staticmethod
|
||||
def openResourceFork(path):
|
||||
if hasattr(path, "__fspath__"): # support os.PathLike objects
|
||||
path = path.__fspath__()
|
||||
with open(path + "/..namedfork/rsrc", "rb") as resfork:
|
||||
data = resfork.read()
|
||||
infile = BytesIO(data)
|
||||
infile.name = path
|
||||
return infile
|
||||
|
||||
@staticmethod
|
||||
def openDataFork(path):
|
||||
with open(path, "rb") as datafork:
|
||||
data = datafork.read()
|
||||
infile = BytesIO(data)
|
||||
infile.name = path
|
||||
return infile
|
||||
|
||||
def _readFile(self):
|
||||
self._readHeaderAndMap()
|
||||
self._readTypeList()
|
||||
|
||||
def _read(self, numBytes, offset=None):
|
||||
if offset is not None:
|
||||
try:
|
||||
self.file.seek(offset)
|
||||
except OverflowError:
|
||||
raise ResourceError("Failed to seek offset ('offset' is too large)")
|
||||
if self.file.tell() != offset:
|
||||
raise ResourceError("Failed to seek offset (reached EOF)")
|
||||
try:
|
||||
data = self.file.read(numBytes)
|
||||
except OverflowError:
|
||||
raise ResourceError("Cannot read resource ('numBytes' is too large)")
|
||||
if len(data) != numBytes:
|
||||
raise ResourceError("Cannot read resource (not enough data)")
|
||||
return data
|
||||
|
||||
def _readHeaderAndMap(self):
|
||||
self.file.seek(0)
|
||||
headerData = self._read(ResourceForkHeaderSize)
|
||||
sstruct.unpack(ResourceForkHeader, headerData, self)
|
||||
# seek to resource map, skip reserved
|
||||
mapOffset = self.mapOffset + 22
|
||||
resourceMapData = self._read(ResourceMapHeaderSize, mapOffset)
|
||||
sstruct.unpack(ResourceMapHeader, resourceMapData, self)
|
||||
self.absTypeListOffset = self.mapOffset + self.typeListOffset
|
||||
self.absNameListOffset = self.mapOffset + self.nameListOffset
|
||||
|
||||
def _readTypeList(self):
|
||||
absTypeListOffset = self.absTypeListOffset
|
||||
numTypesData = self._read(2, absTypeListOffset)
|
||||
(self.numTypes,) = struct.unpack(">H", numTypesData)
|
||||
absTypeListOffset2 = absTypeListOffset + 2
|
||||
for i in range(self.numTypes + 1):
|
||||
resTypeItemOffset = absTypeListOffset2 + ResourceTypeItemSize * i
|
||||
resTypeItemData = self._read(ResourceTypeItemSize, resTypeItemOffset)
|
||||
item = sstruct.unpack(ResourceTypeItem, resTypeItemData)
|
||||
resType = tostr(item["type"], encoding="mac-roman")
|
||||
refListOffset = absTypeListOffset + item["refListOffset"]
|
||||
numRes = item["numRes"] + 1
|
||||
resources = self._readReferenceList(resType, refListOffset, numRes)
|
||||
self._resources[resType] = resources
|
||||
|
||||
def _readReferenceList(self, resType, refListOffset, numRes):
|
||||
resources = []
|
||||
for i in range(numRes):
|
||||
refOffset = refListOffset + ResourceRefItemSize * i
|
||||
refData = self._read(ResourceRefItemSize, refOffset)
|
||||
res = Resource(resType)
|
||||
res.decompile(refData, self)
|
||||
resources.append(res)
|
||||
return resources
|
||||
|
||||
def __getitem__(self, resType):
|
||||
return self._resources[resType]
|
||||
|
||||
def __delitem__(self, resType):
|
||||
del self._resources[resType]
|
||||
|
||||
def __setitem__(self, resType, resources):
|
||||
self._resources[resType] = resources
|
||||
|
||||
def __len__(self):
|
||||
return len(self._resources)
|
||||
|
||||
def __iter__(self):
|
||||
return iter(self._resources)
|
||||
|
||||
def keys(self):
|
||||
return self._resources.keys()
|
||||
|
||||
@property
|
||||
def types(self):
|
||||
"""A list of the types of resources in the resource fork."""
|
||||
return list(self._resources.keys())
|
||||
|
||||
def countResources(self, resType):
|
||||
"""Return the number of resources of a given type."""
|
||||
try:
|
||||
return len(self[resType])
|
||||
except KeyError:
|
||||
return 0
|
||||
|
||||
def getIndices(self, resType):
|
||||
"""Returns a list of indices of resources of a given type."""
|
||||
numRes = self.countResources(resType)
|
||||
if numRes:
|
||||
return list(range(1, numRes + 1))
|
||||
else:
|
||||
return []
|
||||
|
||||
def getNames(self, resType):
|
||||
"""Return list of names of all resources of a given type."""
|
||||
return [res.name for res in self.get(resType, []) if res.name is not None]
|
||||
|
||||
def getIndResource(self, resType, index):
|
||||
"""Return resource of given type located at an index ranging from 1
|
||||
to the number of resources for that type, or None if not found.
|
||||
"""
|
||||
if index < 1:
|
||||
return None
|
||||
try:
|
||||
res = self[resType][index - 1]
|
||||
except (KeyError, IndexError):
|
||||
return None
|
||||
return res
|
||||
|
||||
def getNamedResource(self, resType, name):
|
||||
"""Return the named resource of given type, else return None."""
|
||||
name = tostr(name, encoding="mac-roman")
|
||||
for res in self.get(resType, []):
|
||||
if res.name == name:
|
||||
return res
|
||||
return None
|
||||
|
||||
def close(self):
|
||||
if not self.file.closed:
|
||||
self.file.close()
|
||||
|
||||
|
||||
class Resource(object):
|
||||
"""Represents a resource stored within a resource fork.
|
||||
|
||||
Attributes:
|
||||
type: resource type.
|
||||
data: resource data.
|
||||
id: ID.
|
||||
name: resource name.
|
||||
attr: attributes.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self, resType=None, resData=None, resID=None, resName=None, resAttr=None
|
||||
):
|
||||
self.type = resType
|
||||
self.data = resData
|
||||
self.id = resID
|
||||
self.name = resName
|
||||
self.attr = resAttr
|
||||
|
||||
def decompile(self, refData, reader):
|
||||
sstruct.unpack(ResourceRefItem, refData, self)
|
||||
# interpret 3-byte dataOffset as (padded) ULONG to unpack it with struct
|
||||
(self.dataOffset,) = struct.unpack(">L", bytesjoin([b"\0", self.dataOffset]))
|
||||
absDataOffset = reader.dataOffset + self.dataOffset
|
||||
(dataLength,) = struct.unpack(">L", reader._read(4, absDataOffset))
|
||||
self.data = reader._read(dataLength)
|
||||
if self.nameOffset == -1:
|
||||
return
|
||||
absNameOffset = reader.absNameListOffset + self.nameOffset
|
||||
(nameLength,) = struct.unpack("B", reader._read(1, absNameOffset))
|
||||
(name,) = struct.unpack(">%ss" % nameLength, reader._read(nameLength))
|
||||
self.name = tostr(name, encoding="mac-roman")
|
||||
|
||||
|
||||
ResourceForkHeader = """
|
||||
> # big endian
|
||||
dataOffset: L
|
||||
mapOffset: L
|
||||
dataLen: L
|
||||
mapLen: L
|
||||
"""
|
||||
|
||||
ResourceForkHeaderSize = sstruct.calcsize(ResourceForkHeader)
|
||||
|
||||
ResourceMapHeader = """
|
||||
> # big endian
|
||||
attr: H
|
||||
typeListOffset: H
|
||||
nameListOffset: H
|
||||
"""
|
||||
|
||||
ResourceMapHeaderSize = sstruct.calcsize(ResourceMapHeader)
|
||||
|
||||
ResourceTypeItem = """
|
||||
> # big endian
|
||||
type: 4s
|
||||
numRes: H
|
||||
refListOffset: H
|
||||
"""
|
||||
|
||||
ResourceTypeItemSize = sstruct.calcsize(ResourceTypeItem)
|
||||
|
||||
ResourceRefItem = """
|
||||
> # big endian
|
||||
id: h
|
||||
nameOffset: h
|
||||
attr: B
|
||||
dataOffset: 3s
|
||||
reserved: L
|
||||
"""
|
||||
|
||||
ResourceRefItemSize = sstruct.calcsize(ResourceRefItem)
|
||||
@ -0,0 +1,681 @@
|
||||
import collections.abc
|
||||
import re
|
||||
from typing import (
|
||||
Any,
|
||||
Callable,
|
||||
Dict,
|
||||
List,
|
||||
Mapping,
|
||||
MutableMapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Type,
|
||||
Union,
|
||||
IO,
|
||||
)
|
||||
import warnings
|
||||
from io import BytesIO
|
||||
from datetime import datetime
|
||||
from base64 import b64encode, b64decode
|
||||
from numbers import Integral
|
||||
from types import SimpleNamespace
|
||||
from functools import singledispatch
|
||||
|
||||
from fontTools.misc import etree
|
||||
|
||||
from fontTools.misc.textTools import tostr
|
||||
|
||||
|
||||
# By default, we
|
||||
# - deserialize <data> elements as bytes and
|
||||
# - serialize bytes as <data> elements.
|
||||
# Before, on Python 2, we
|
||||
# - deserialized <data> elements as plistlib.Data objects, in order to
|
||||
# distinguish them from the built-in str type (which is bytes on python2)
|
||||
# - serialized bytes as <string> elements (they must have only contained
|
||||
# ASCII characters in this case)
|
||||
# You can pass use_builtin_types=[True|False] to the load/dump etc. functions
|
||||
# to enforce a specific treatment.
|
||||
# NOTE that unicode type always maps to <string> element, and plistlib.Data
|
||||
# always maps to <data> element, regardless of use_builtin_types.
|
||||
USE_BUILTIN_TYPES = True
|
||||
|
||||
XML_DECLARATION = b"""<?xml version='1.0' encoding='UTF-8'?>"""
|
||||
|
||||
PLIST_DOCTYPE = (
|
||||
b'<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" '
|
||||
b'"http://www.apple.com/DTDs/PropertyList-1.0.dtd">'
|
||||
)
|
||||
|
||||
|
||||
# Date should conform to a subset of ISO 8601:
|
||||
# YYYY '-' MM '-' DD 'T' HH ':' MM ':' SS 'Z'
|
||||
_date_parser = re.compile(
|
||||
r"(?P<year>\d\d\d\d)"
|
||||
r"(?:-(?P<month>\d\d)"
|
||||
r"(?:-(?P<day>\d\d)"
|
||||
r"(?:T(?P<hour>\d\d)"
|
||||
r"(?::(?P<minute>\d\d)"
|
||||
r"(?::(?P<second>\d\d))"
|
||||
r"?)?)?)?)?Z",
|
||||
re.ASCII,
|
||||
)
|
||||
|
||||
|
||||
def _date_from_string(s: str) -> datetime:
|
||||
order = ("year", "month", "day", "hour", "minute", "second")
|
||||
m = _date_parser.match(s)
|
||||
if m is None:
|
||||
raise ValueError(f"Expected ISO 8601 date string, but got '{s:r}'.")
|
||||
gd = m.groupdict()
|
||||
lst = []
|
||||
for key in order:
|
||||
val = gd[key]
|
||||
if val is None:
|
||||
break
|
||||
lst.append(int(val))
|
||||
# NOTE: mypy doesn't know that lst is 6 elements long.
|
||||
return datetime(*lst) # type:ignore
|
||||
|
||||
|
||||
def _date_to_string(d: datetime) -> str:
|
||||
return "%04d-%02d-%02dT%02d:%02d:%02dZ" % (
|
||||
d.year,
|
||||
d.month,
|
||||
d.day,
|
||||
d.hour,
|
||||
d.minute,
|
||||
d.second,
|
||||
)
|
||||
|
||||
|
||||
class Data:
|
||||
"""Represents binary data when ``use_builtin_types=False.``
|
||||
|
||||
This class wraps binary data loaded from a plist file when the
|
||||
``use_builtin_types`` argument to the loading function (:py:func:`fromtree`,
|
||||
:py:func:`load`, :py:func:`loads`) is false.
|
||||
|
||||
The actual binary data is retrieved using the ``data`` attribute.
|
||||
"""
|
||||
|
||||
def __init__(self, data: bytes) -> None:
|
||||
if not isinstance(data, bytes):
|
||||
raise TypeError("Expected bytes, found %s" % type(data).__name__)
|
||||
self.data = data
|
||||
|
||||
@classmethod
|
||||
def fromBase64(cls, data: Union[bytes, str]) -> "Data":
|
||||
return cls(b64decode(data))
|
||||
|
||||
def asBase64(self, maxlinelength: int = 76, indent_level: int = 1) -> bytes:
|
||||
return _encode_base64(
|
||||
self.data, maxlinelength=maxlinelength, indent_level=indent_level
|
||||
)
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
if isinstance(other, self.__class__):
|
||||
return self.data == other.data
|
||||
elif isinstance(other, bytes):
|
||||
return self.data == other
|
||||
else:
|
||||
return NotImplemented
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "%s(%s)" % (self.__class__.__name__, repr(self.data))
|
||||
|
||||
|
||||
def _encode_base64(
|
||||
data: bytes, maxlinelength: Optional[int] = 76, indent_level: int = 1
|
||||
) -> bytes:
|
||||
data = b64encode(data)
|
||||
if data and maxlinelength:
|
||||
# split into multiple lines right-justified to 'maxlinelength' chars
|
||||
indent = b"\n" + b" " * indent_level
|
||||
max_length = max(16, maxlinelength - len(indent))
|
||||
chunks = []
|
||||
for i in range(0, len(data), max_length):
|
||||
chunks.append(indent)
|
||||
chunks.append(data[i : i + max_length])
|
||||
chunks.append(indent)
|
||||
data = b"".join(chunks)
|
||||
return data
|
||||
|
||||
|
||||
# Mypy does not support recursive type aliases as of 0.782, Pylance does.
|
||||
# https://github.com/python/mypy/issues/731
|
||||
# https://devblogs.microsoft.com/python/pylance-introduces-five-new-features-that-enable-type-magic-for-python-developers/#1-support-for-recursive-type-aliases
|
||||
PlistEncodable = Union[
|
||||
bool,
|
||||
bytes,
|
||||
Data,
|
||||
datetime,
|
||||
float,
|
||||
Integral,
|
||||
Mapping[str, Any],
|
||||
Sequence[Any],
|
||||
str,
|
||||
]
|
||||
|
||||
|
||||
class PlistTarget:
|
||||
"""Event handler using the ElementTree Target API that can be
|
||||
passed to a XMLParser to produce property list objects from XML.
|
||||
It is based on the CPython plistlib module's _PlistParser class,
|
||||
but does not use the expat parser.
|
||||
|
||||
>>> from fontTools.misc import etree
|
||||
>>> parser = etree.XMLParser(target=PlistTarget())
|
||||
>>> result = etree.XML(
|
||||
... "<dict>"
|
||||
... " <key>something</key>"
|
||||
... " <string>blah</string>"
|
||||
... "</dict>",
|
||||
... parser=parser)
|
||||
>>> result == {"something": "blah"}
|
||||
True
|
||||
|
||||
Links:
|
||||
https://github.com/python/cpython/blob/main/Lib/plistlib.py
|
||||
http://lxml.de/parsing.html#the-target-parser-interface
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
use_builtin_types: Optional[bool] = None,
|
||||
dict_type: Type[MutableMapping[str, Any]] = dict,
|
||||
) -> None:
|
||||
self.stack: List[PlistEncodable] = []
|
||||
self.current_key: Optional[str] = None
|
||||
self.root: Optional[PlistEncodable] = None
|
||||
if use_builtin_types is None:
|
||||
self._use_builtin_types = USE_BUILTIN_TYPES
|
||||
else:
|
||||
if use_builtin_types is False:
|
||||
warnings.warn(
|
||||
"Setting use_builtin_types to False is deprecated and will be "
|
||||
"removed soon.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
self._use_builtin_types = use_builtin_types
|
||||
self._dict_type = dict_type
|
||||
|
||||
def start(self, tag: str, attrib: Mapping[str, str]) -> None:
|
||||
self._data: List[str] = []
|
||||
handler = _TARGET_START_HANDLERS.get(tag)
|
||||
if handler is not None:
|
||||
handler(self)
|
||||
|
||||
def end(self, tag: str) -> None:
|
||||
handler = _TARGET_END_HANDLERS.get(tag)
|
||||
if handler is not None:
|
||||
handler(self)
|
||||
|
||||
def data(self, data: str) -> None:
|
||||
self._data.append(data)
|
||||
|
||||
def close(self) -> PlistEncodable:
|
||||
if self.root is None:
|
||||
raise ValueError("No root set.")
|
||||
return self.root
|
||||
|
||||
# helpers
|
||||
|
||||
def add_object(self, value: PlistEncodable) -> None:
|
||||
if self.current_key is not None:
|
||||
stack_top = self.stack[-1]
|
||||
if not isinstance(stack_top, collections.abc.MutableMapping):
|
||||
raise ValueError("unexpected element: %r" % stack_top)
|
||||
stack_top[self.current_key] = value
|
||||
self.current_key = None
|
||||
elif not self.stack:
|
||||
# this is the root object
|
||||
self.root = value
|
||||
else:
|
||||
stack_top = self.stack[-1]
|
||||
if not isinstance(stack_top, list):
|
||||
raise ValueError("unexpected element: %r" % stack_top)
|
||||
stack_top.append(value)
|
||||
|
||||
def get_data(self) -> str:
|
||||
data = "".join(self._data)
|
||||
self._data = []
|
||||
return data
|
||||
|
||||
|
||||
# event handlers
|
||||
|
||||
|
||||
def start_dict(self: PlistTarget) -> None:
|
||||
d = self._dict_type()
|
||||
self.add_object(d)
|
||||
self.stack.append(d)
|
||||
|
||||
|
||||
def end_dict(self: PlistTarget) -> None:
|
||||
if self.current_key:
|
||||
raise ValueError("missing value for key '%s'" % self.current_key)
|
||||
self.stack.pop()
|
||||
|
||||
|
||||
def end_key(self: PlistTarget) -> None:
|
||||
if self.current_key or not isinstance(self.stack[-1], collections.abc.Mapping):
|
||||
raise ValueError("unexpected key")
|
||||
self.current_key = self.get_data()
|
||||
|
||||
|
||||
def start_array(self: PlistTarget) -> None:
|
||||
a: List[PlistEncodable] = []
|
||||
self.add_object(a)
|
||||
self.stack.append(a)
|
||||
|
||||
|
||||
def end_array(self: PlistTarget) -> None:
|
||||
self.stack.pop()
|
||||
|
||||
|
||||
def end_true(self: PlistTarget) -> None:
|
||||
self.add_object(True)
|
||||
|
||||
|
||||
def end_false(self: PlistTarget) -> None:
|
||||
self.add_object(False)
|
||||
|
||||
|
||||
def end_integer(self: PlistTarget) -> None:
|
||||
self.add_object(int(self.get_data()))
|
||||
|
||||
|
||||
def end_real(self: PlistTarget) -> None:
|
||||
self.add_object(float(self.get_data()))
|
||||
|
||||
|
||||
def end_string(self: PlistTarget) -> None:
|
||||
self.add_object(self.get_data())
|
||||
|
||||
|
||||
def end_data(self: PlistTarget) -> None:
|
||||
if self._use_builtin_types:
|
||||
self.add_object(b64decode(self.get_data()))
|
||||
else:
|
||||
self.add_object(Data.fromBase64(self.get_data()))
|
||||
|
||||
|
||||
def end_date(self: PlistTarget) -> None:
|
||||
self.add_object(_date_from_string(self.get_data()))
|
||||
|
||||
|
||||
_TARGET_START_HANDLERS: Dict[str, Callable[[PlistTarget], None]] = {
|
||||
"dict": start_dict,
|
||||
"array": start_array,
|
||||
}
|
||||
|
||||
_TARGET_END_HANDLERS: Dict[str, Callable[[PlistTarget], None]] = {
|
||||
"dict": end_dict,
|
||||
"array": end_array,
|
||||
"key": end_key,
|
||||
"true": end_true,
|
||||
"false": end_false,
|
||||
"integer": end_integer,
|
||||
"real": end_real,
|
||||
"string": end_string,
|
||||
"data": end_data,
|
||||
"date": end_date,
|
||||
}
|
||||
|
||||
|
||||
# functions to build element tree from plist data
|
||||
|
||||
|
||||
def _string_element(value: str, ctx: SimpleNamespace) -> etree.Element:
|
||||
el = etree.Element("string")
|
||||
el.text = value
|
||||
return el
|
||||
|
||||
|
||||
def _bool_element(value: bool, ctx: SimpleNamespace) -> etree.Element:
|
||||
if value:
|
||||
return etree.Element("true")
|
||||
return etree.Element("false")
|
||||
|
||||
|
||||
def _integer_element(value: int, ctx: SimpleNamespace) -> etree.Element:
|
||||
if -1 << 63 <= value < 1 << 64:
|
||||
el = etree.Element("integer")
|
||||
el.text = "%d" % value
|
||||
return el
|
||||
raise OverflowError(value)
|
||||
|
||||
|
||||
def _real_element(value: float, ctx: SimpleNamespace) -> etree.Element:
|
||||
el = etree.Element("real")
|
||||
el.text = repr(value)
|
||||
return el
|
||||
|
||||
|
||||
def _dict_element(
|
||||
d: Mapping[str, PlistEncodable], ctx: SimpleNamespace
|
||||
) -> etree.Element:
|
||||
el = etree.Element("dict")
|
||||
items = d.items()
|
||||
if ctx.sort_keys:
|
||||
items = sorted(items) # type: ignore
|
||||
ctx.indent_level += 1
|
||||
for key, value in items:
|
||||
if not isinstance(key, str):
|
||||
if ctx.skipkeys:
|
||||
continue
|
||||
raise TypeError("keys must be strings")
|
||||
k = etree.SubElement(el, "key")
|
||||
k.text = tostr(key, "utf-8")
|
||||
el.append(_make_element(value, ctx))
|
||||
ctx.indent_level -= 1
|
||||
return el
|
||||
|
||||
|
||||
def _array_element(
|
||||
array: Sequence[PlistEncodable], ctx: SimpleNamespace
|
||||
) -> etree.Element:
|
||||
el = etree.Element("array")
|
||||
if len(array) == 0:
|
||||
return el
|
||||
ctx.indent_level += 1
|
||||
for value in array:
|
||||
el.append(_make_element(value, ctx))
|
||||
ctx.indent_level -= 1
|
||||
return el
|
||||
|
||||
|
||||
def _date_element(date: datetime, ctx: SimpleNamespace) -> etree.Element:
|
||||
el = etree.Element("date")
|
||||
el.text = _date_to_string(date)
|
||||
return el
|
||||
|
||||
|
||||
def _data_element(data: bytes, ctx: SimpleNamespace) -> etree.Element:
|
||||
el = etree.Element("data")
|
||||
# NOTE: mypy is confused about whether el.text should be str or bytes.
|
||||
el.text = _encode_base64( # type: ignore
|
||||
data,
|
||||
maxlinelength=(76 if ctx.pretty_print else None),
|
||||
indent_level=ctx.indent_level,
|
||||
)
|
||||
return el
|
||||
|
||||
|
||||
def _string_or_data_element(raw_bytes: bytes, ctx: SimpleNamespace) -> etree.Element:
|
||||
if ctx.use_builtin_types:
|
||||
return _data_element(raw_bytes, ctx)
|
||||
else:
|
||||
try:
|
||||
string = raw_bytes.decode(encoding="ascii", errors="strict")
|
||||
except UnicodeDecodeError:
|
||||
raise ValueError(
|
||||
"invalid non-ASCII bytes; use unicode string instead: %r" % raw_bytes
|
||||
)
|
||||
return _string_element(string, ctx)
|
||||
|
||||
|
||||
# The following is probably not entirely correct. The signature should take `Any`
|
||||
# and return `NoReturn`. At the time of this writing, neither mypy nor Pyright
|
||||
# can deal with singledispatch properly and will apply the signature of the base
|
||||
# function to all others. Being slightly dishonest makes it type-check and return
|
||||
# usable typing information for the optimistic case.
|
||||
@singledispatch
|
||||
def _make_element(value: PlistEncodable, ctx: SimpleNamespace) -> etree.Element:
|
||||
raise TypeError("unsupported type: %s" % type(value))
|
||||
|
||||
|
||||
_make_element.register(str)(_string_element)
|
||||
_make_element.register(bool)(_bool_element)
|
||||
_make_element.register(Integral)(_integer_element)
|
||||
_make_element.register(float)(_real_element)
|
||||
_make_element.register(collections.abc.Mapping)(_dict_element)
|
||||
_make_element.register(list)(_array_element)
|
||||
_make_element.register(tuple)(_array_element)
|
||||
_make_element.register(datetime)(_date_element)
|
||||
_make_element.register(bytes)(_string_or_data_element)
|
||||
_make_element.register(bytearray)(_data_element)
|
||||
_make_element.register(Data)(lambda v, ctx: _data_element(v.data, ctx))
|
||||
|
||||
|
||||
# Public functions to create element tree from plist-compatible python
|
||||
# data structures and viceversa, for use when (de)serializing GLIF xml.
|
||||
|
||||
|
||||
def totree(
|
||||
value: PlistEncodable,
|
||||
sort_keys: bool = True,
|
||||
skipkeys: bool = False,
|
||||
use_builtin_types: Optional[bool] = None,
|
||||
pretty_print: bool = True,
|
||||
indent_level: int = 1,
|
||||
) -> etree.Element:
|
||||
"""Convert a value derived from a plist into an XML tree.
|
||||
|
||||
Args:
|
||||
value: Any kind of value to be serialized to XML.
|
||||
sort_keys: Whether keys of dictionaries should be sorted.
|
||||
skipkeys (bool): Whether to silently skip non-string dictionary
|
||||
keys.
|
||||
use_builtin_types (bool): If true, byte strings will be
|
||||
encoded in Base-64 and wrapped in a ``data`` tag; if
|
||||
false, they will be either stored as ASCII strings or an
|
||||
exception raised if they cannot be decoded as such. Defaults
|
||||
to ``True`` if not present. Deprecated.
|
||||
pretty_print (bool): Whether to indent the output.
|
||||
indent_level (int): Level of indentation when serializing.
|
||||
|
||||
Returns: an ``etree`` ``Element`` object.
|
||||
|
||||
Raises:
|
||||
``TypeError``
|
||||
if non-string dictionary keys are serialized
|
||||
and ``skipkeys`` is false.
|
||||
``ValueError``
|
||||
if non-ASCII binary data is present
|
||||
and `use_builtin_types` is false.
|
||||
"""
|
||||
if use_builtin_types is None:
|
||||
use_builtin_types = USE_BUILTIN_TYPES
|
||||
else:
|
||||
use_builtin_types = use_builtin_types
|
||||
context = SimpleNamespace(
|
||||
sort_keys=sort_keys,
|
||||
skipkeys=skipkeys,
|
||||
use_builtin_types=use_builtin_types,
|
||||
pretty_print=pretty_print,
|
||||
indent_level=indent_level,
|
||||
)
|
||||
return _make_element(value, context)
|
||||
|
||||
|
||||
def fromtree(
|
||||
tree: etree.Element,
|
||||
use_builtin_types: Optional[bool] = None,
|
||||
dict_type: Type[MutableMapping[str, Any]] = dict,
|
||||
) -> Any:
|
||||
"""Convert an XML tree to a plist structure.
|
||||
|
||||
Args:
|
||||
tree: An ``etree`` ``Element``.
|
||||
use_builtin_types: If True, binary data is deserialized to
|
||||
bytes strings. If False, it is wrapped in :py:class:`Data`
|
||||
objects. Defaults to True if not provided. Deprecated.
|
||||
dict_type: What type to use for dictionaries.
|
||||
|
||||
Returns: An object (usually a dictionary).
|
||||
"""
|
||||
target = PlistTarget(use_builtin_types=use_builtin_types, dict_type=dict_type)
|
||||
for action, element in etree.iterwalk(tree, events=("start", "end")):
|
||||
if action == "start":
|
||||
target.start(element.tag, element.attrib)
|
||||
elif action == "end":
|
||||
# if there are no children, parse the leaf's data
|
||||
if not len(element):
|
||||
# always pass str, not None
|
||||
target.data(element.text or "")
|
||||
target.end(element.tag)
|
||||
return target.close()
|
||||
|
||||
|
||||
# python3 plistlib API
|
||||
|
||||
|
||||
def load(
|
||||
fp: IO[bytes],
|
||||
use_builtin_types: Optional[bool] = None,
|
||||
dict_type: Type[MutableMapping[str, Any]] = dict,
|
||||
) -> Any:
|
||||
"""Load a plist file into an object.
|
||||
|
||||
Args:
|
||||
fp: An opened file.
|
||||
use_builtin_types: If True, binary data is deserialized to
|
||||
bytes strings. If False, it is wrapped in :py:class:`Data`
|
||||
objects. Defaults to True if not provided. Deprecated.
|
||||
dict_type: What type to use for dictionaries.
|
||||
|
||||
Returns:
|
||||
An object (usually a dictionary) representing the top level of
|
||||
the plist file.
|
||||
"""
|
||||
|
||||
if not hasattr(fp, "read"):
|
||||
raise AttributeError("'%s' object has no attribute 'read'" % type(fp).__name__)
|
||||
target = PlistTarget(use_builtin_types=use_builtin_types, dict_type=dict_type)
|
||||
parser = etree.XMLParser(target=target)
|
||||
result = etree.parse(fp, parser=parser)
|
||||
# lxml returns the target object directly, while ElementTree wraps
|
||||
# it as the root of an ElementTree object
|
||||
try:
|
||||
return result.getroot()
|
||||
except AttributeError:
|
||||
return result
|
||||
|
||||
|
||||
def loads(
|
||||
value: bytes,
|
||||
use_builtin_types: Optional[bool] = None,
|
||||
dict_type: Type[MutableMapping[str, Any]] = dict,
|
||||
) -> Any:
|
||||
"""Load a plist file from a string into an object.
|
||||
|
||||
Args:
|
||||
value: A bytes string containing a plist.
|
||||
use_builtin_types: If True, binary data is deserialized to
|
||||
bytes strings. If False, it is wrapped in :py:class:`Data`
|
||||
objects. Defaults to True if not provided. Deprecated.
|
||||
dict_type: What type to use for dictionaries.
|
||||
|
||||
Returns:
|
||||
An object (usually a dictionary) representing the top level of
|
||||
the plist file.
|
||||
"""
|
||||
|
||||
fp = BytesIO(value)
|
||||
return load(fp, use_builtin_types=use_builtin_types, dict_type=dict_type)
|
||||
|
||||
|
||||
def dump(
|
||||
value: PlistEncodable,
|
||||
fp: IO[bytes],
|
||||
sort_keys: bool = True,
|
||||
skipkeys: bool = False,
|
||||
use_builtin_types: Optional[bool] = None,
|
||||
pretty_print: bool = True,
|
||||
) -> None:
|
||||
"""Write a Python object to a plist file.
|
||||
|
||||
Args:
|
||||
value: An object to write.
|
||||
fp: A file opened for writing.
|
||||
sort_keys (bool): Whether keys of dictionaries should be sorted.
|
||||
skipkeys (bool): Whether to silently skip non-string dictionary
|
||||
keys.
|
||||
use_builtin_types (bool): If true, byte strings will be
|
||||
encoded in Base-64 and wrapped in a ``data`` tag; if
|
||||
false, they will be either stored as ASCII strings or an
|
||||
exception raised if they cannot be represented. Defaults
|
||||
pretty_print (bool): Whether to indent the output.
|
||||
indent_level (int): Level of indentation when serializing.
|
||||
|
||||
Raises:
|
||||
``TypeError``
|
||||
if non-string dictionary keys are serialized
|
||||
and ``skipkeys`` is false.
|
||||
``ValueError``
|
||||
if non-representable binary data is present
|
||||
and `use_builtin_types` is false.
|
||||
"""
|
||||
|
||||
if not hasattr(fp, "write"):
|
||||
raise AttributeError("'%s' object has no attribute 'write'" % type(fp).__name__)
|
||||
root = etree.Element("plist", version="1.0")
|
||||
el = totree(
|
||||
value,
|
||||
sort_keys=sort_keys,
|
||||
skipkeys=skipkeys,
|
||||
use_builtin_types=use_builtin_types,
|
||||
pretty_print=pretty_print,
|
||||
)
|
||||
root.append(el)
|
||||
tree = etree.ElementTree(root)
|
||||
# we write the doctype ourselves instead of using the 'doctype' argument
|
||||
# of 'write' method, becuse lxml will force adding a '\n' even when
|
||||
# pretty_print is False.
|
||||
if pretty_print:
|
||||
header = b"\n".join((XML_DECLARATION, PLIST_DOCTYPE, b""))
|
||||
else:
|
||||
header = XML_DECLARATION + PLIST_DOCTYPE
|
||||
fp.write(header)
|
||||
tree.write( # type: ignore
|
||||
fp,
|
||||
encoding="utf-8",
|
||||
pretty_print=pretty_print,
|
||||
xml_declaration=False,
|
||||
)
|
||||
|
||||
|
||||
def dumps(
|
||||
value: PlistEncodable,
|
||||
sort_keys: bool = True,
|
||||
skipkeys: bool = False,
|
||||
use_builtin_types: Optional[bool] = None,
|
||||
pretty_print: bool = True,
|
||||
) -> bytes:
|
||||
"""Write a Python object to a string in plist format.
|
||||
|
||||
Args:
|
||||
value: An object to write.
|
||||
sort_keys (bool): Whether keys of dictionaries should be sorted.
|
||||
skipkeys (bool): Whether to silently skip non-string dictionary
|
||||
keys.
|
||||
use_builtin_types (bool): If true, byte strings will be
|
||||
encoded in Base-64 and wrapped in a ``data`` tag; if
|
||||
false, they will be either stored as strings or an
|
||||
exception raised if they cannot be represented. Defaults
|
||||
pretty_print (bool): Whether to indent the output.
|
||||
indent_level (int): Level of indentation when serializing.
|
||||
|
||||
Returns:
|
||||
string: A plist representation of the Python object.
|
||||
|
||||
Raises:
|
||||
``TypeError``
|
||||
if non-string dictionary keys are serialized
|
||||
and ``skipkeys`` is false.
|
||||
``ValueError``
|
||||
if non-representable binary data is present
|
||||
and `use_builtin_types` is false.
|
||||
"""
|
||||
fp = BytesIO()
|
||||
dump(
|
||||
value,
|
||||
fp,
|
||||
sort_keys=sort_keys,
|
||||
skipkeys=skipkeys,
|
||||
use_builtin_types=use_builtin_types,
|
||||
pretty_print=pretty_print,
|
||||
)
|
||||
return fp.getvalue()
|
||||
1496
venv/lib/python3.12/site-packages/fontTools/misc/psCharStrings.py
Normal file
1496
venv/lib/python3.12/site-packages/fontTools/misc/psCharStrings.py
Normal file
File diff suppressed because it is too large
Load Diff
398
venv/lib/python3.12/site-packages/fontTools/misc/psLib.py
Normal file
398
venv/lib/python3.12/site-packages/fontTools/misc/psLib.py
Normal file
@ -0,0 +1,398 @@
|
||||
from fontTools.misc.textTools import bytechr, byteord, bytesjoin, tobytes, tostr
|
||||
from fontTools.misc import eexec
|
||||
from .psOperators import (
|
||||
PSOperators,
|
||||
ps_StandardEncoding,
|
||||
ps_array,
|
||||
ps_boolean,
|
||||
ps_dict,
|
||||
ps_integer,
|
||||
ps_literal,
|
||||
ps_mark,
|
||||
ps_name,
|
||||
ps_operator,
|
||||
ps_procedure,
|
||||
ps_procmark,
|
||||
ps_real,
|
||||
ps_string,
|
||||
)
|
||||
import re
|
||||
from collections.abc import Callable
|
||||
from string import whitespace
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
ps_special = b"()<>[]{}%" # / is one too, but we take care of that one differently
|
||||
|
||||
skipwhiteRE = re.compile(bytesjoin([b"[", whitespace, b"]*"]))
|
||||
endofthingPat = bytesjoin([b"[^][(){}<>/%", whitespace, b"]*"])
|
||||
endofthingRE = re.compile(endofthingPat)
|
||||
commentRE = re.compile(b"%[^\n\r]*")
|
||||
|
||||
# XXX This not entirely correct as it doesn't allow *nested* embedded parens:
|
||||
stringPat = rb"""
|
||||
\(
|
||||
(
|
||||
(
|
||||
[^()]* \ [()]
|
||||
)
|
||||
|
|
||||
(
|
||||
[^()]* \( [^()]* \)
|
||||
)
|
||||
)*
|
||||
[^()]*
|
||||
\)
|
||||
"""
|
||||
stringPat = b"".join(stringPat.split())
|
||||
stringRE = re.compile(stringPat)
|
||||
|
||||
hexstringRE = re.compile(bytesjoin([b"<[", whitespace, b"0-9A-Fa-f]*>"]))
|
||||
|
||||
|
||||
class PSTokenError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class PSError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class PSTokenizer(object):
|
||||
def __init__(self, buf=b"", encoding="ascii"):
|
||||
# Force self.buf to be a byte string
|
||||
buf = tobytes(buf)
|
||||
self.buf = buf
|
||||
self.len = len(buf)
|
||||
self.pos = 0
|
||||
self.closed = False
|
||||
self.encoding = encoding
|
||||
|
||||
def read(self, n=-1):
|
||||
"""Read at most 'n' bytes from the buffer, or less if the read
|
||||
hits EOF before obtaining 'n' bytes.
|
||||
If 'n' is negative or omitted, read all data until EOF is reached.
|
||||
"""
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
if n is None or n < 0:
|
||||
newpos = self.len
|
||||
else:
|
||||
newpos = min(self.pos + n, self.len)
|
||||
r = self.buf[self.pos : newpos]
|
||||
self.pos = newpos
|
||||
return r
|
||||
|
||||
def close(self):
|
||||
if not self.closed:
|
||||
self.closed = True
|
||||
del self.buf, self.pos
|
||||
|
||||
def getnexttoken(
|
||||
self,
|
||||
# localize some stuff, for performance
|
||||
len=len,
|
||||
ps_special=ps_special,
|
||||
stringmatch=stringRE.match,
|
||||
hexstringmatch=hexstringRE.match,
|
||||
commentmatch=commentRE.match,
|
||||
endmatch=endofthingRE.match,
|
||||
):
|
||||
self.skipwhite()
|
||||
if self.pos >= self.len:
|
||||
return None, None
|
||||
pos = self.pos
|
||||
buf = self.buf
|
||||
char = bytechr(byteord(buf[pos]))
|
||||
if char in ps_special:
|
||||
if char in b"{}[]":
|
||||
tokentype = "do_special"
|
||||
token = char
|
||||
elif char == b"%":
|
||||
tokentype = "do_comment"
|
||||
_, nextpos = commentmatch(buf, pos).span()
|
||||
token = buf[pos:nextpos]
|
||||
elif char == b"(":
|
||||
tokentype = "do_string"
|
||||
m = stringmatch(buf, pos)
|
||||
if m is None:
|
||||
raise PSTokenError("bad string at character %d" % pos)
|
||||
_, nextpos = m.span()
|
||||
token = buf[pos:nextpos]
|
||||
elif char == b"<":
|
||||
tokentype = "do_hexstring"
|
||||
m = hexstringmatch(buf, pos)
|
||||
if m is None:
|
||||
raise PSTokenError("bad hexstring at character %d" % pos)
|
||||
_, nextpos = m.span()
|
||||
token = buf[pos:nextpos]
|
||||
else:
|
||||
raise PSTokenError("bad token at character %d" % pos)
|
||||
else:
|
||||
if char == b"/":
|
||||
tokentype = "do_literal"
|
||||
m = endmatch(buf, pos + 1)
|
||||
else:
|
||||
tokentype = ""
|
||||
m = endmatch(buf, pos)
|
||||
if m is None:
|
||||
raise PSTokenError("bad token at character %d" % pos)
|
||||
_, nextpos = m.span()
|
||||
token = buf[pos:nextpos]
|
||||
self.pos = pos + len(token)
|
||||
token = tostr(token, encoding=self.encoding)
|
||||
return tokentype, token
|
||||
|
||||
def skipwhite(self, whitematch=skipwhiteRE.match):
|
||||
_, nextpos = whitematch(self.buf, self.pos).span()
|
||||
self.pos = nextpos
|
||||
|
||||
def starteexec(self):
|
||||
self.pos = self.pos + 1
|
||||
self.dirtybuf = self.buf[self.pos :]
|
||||
self.buf, R = eexec.decrypt(self.dirtybuf, 55665)
|
||||
self.len = len(self.buf)
|
||||
self.pos = 4
|
||||
|
||||
def stopeexec(self):
|
||||
if not hasattr(self, "dirtybuf"):
|
||||
return
|
||||
self.buf = self.dirtybuf
|
||||
del self.dirtybuf
|
||||
|
||||
|
||||
class PSInterpreter(PSOperators):
|
||||
def __init__(self, encoding="ascii"):
|
||||
systemdict = {}
|
||||
userdict = {}
|
||||
self.encoding = encoding
|
||||
self.dictstack = [systemdict, userdict]
|
||||
self.stack = []
|
||||
self.proclevel = 0
|
||||
self.procmark = ps_procmark()
|
||||
self.fillsystemdict()
|
||||
|
||||
def fillsystemdict(self):
|
||||
systemdict = self.dictstack[0]
|
||||
systemdict["["] = systemdict["mark"] = self.mark = ps_mark()
|
||||
systemdict["]"] = ps_operator("]", self.do_makearray)
|
||||
systemdict["true"] = ps_boolean(1)
|
||||
systemdict["false"] = ps_boolean(0)
|
||||
systemdict["StandardEncoding"] = ps_array(ps_StandardEncoding)
|
||||
systemdict["FontDirectory"] = ps_dict({})
|
||||
self.suckoperators(systemdict, self.__class__)
|
||||
|
||||
def suckoperators(self, systemdict, klass):
|
||||
for name in dir(klass):
|
||||
attr = getattr(self, name)
|
||||
if isinstance(attr, Callable) and name[:3] == "ps_":
|
||||
name = name[3:]
|
||||
systemdict[name] = ps_operator(name, attr)
|
||||
for baseclass in klass.__bases__:
|
||||
self.suckoperators(systemdict, baseclass)
|
||||
|
||||
def interpret(self, data, getattr=getattr):
|
||||
tokenizer = self.tokenizer = PSTokenizer(data, self.encoding)
|
||||
getnexttoken = tokenizer.getnexttoken
|
||||
do_token = self.do_token
|
||||
handle_object = self.handle_object
|
||||
try:
|
||||
while 1:
|
||||
tokentype, token = getnexttoken()
|
||||
if not token:
|
||||
break
|
||||
if tokentype:
|
||||
handler = getattr(self, tokentype)
|
||||
object = handler(token)
|
||||
else:
|
||||
object = do_token(token)
|
||||
if object is not None:
|
||||
handle_object(object)
|
||||
tokenizer.close()
|
||||
self.tokenizer = None
|
||||
except:
|
||||
if self.tokenizer is not None:
|
||||
log.debug(
|
||||
"ps error:\n"
|
||||
"- - - - - - -\n"
|
||||
"%s\n"
|
||||
">>>\n"
|
||||
"%s\n"
|
||||
"- - - - - - -",
|
||||
self.tokenizer.buf[self.tokenizer.pos - 50 : self.tokenizer.pos],
|
||||
self.tokenizer.buf[self.tokenizer.pos : self.tokenizer.pos + 50],
|
||||
)
|
||||
raise
|
||||
|
||||
def handle_object(self, object):
|
||||
if not (self.proclevel or object.literal or object.type == "proceduretype"):
|
||||
if object.type != "operatortype":
|
||||
object = self.resolve_name(object.value)
|
||||
if object.literal:
|
||||
self.push(object)
|
||||
else:
|
||||
if object.type == "proceduretype":
|
||||
self.call_procedure(object)
|
||||
else:
|
||||
object.function()
|
||||
else:
|
||||
self.push(object)
|
||||
|
||||
def call_procedure(self, proc):
|
||||
handle_object = self.handle_object
|
||||
for item in proc.value:
|
||||
handle_object(item)
|
||||
|
||||
def resolve_name(self, name):
|
||||
dictstack = self.dictstack
|
||||
for i in range(len(dictstack) - 1, -1, -1):
|
||||
if name in dictstack[i]:
|
||||
return dictstack[i][name]
|
||||
raise PSError("name error: " + str(name))
|
||||
|
||||
def do_token(
|
||||
self,
|
||||
token,
|
||||
int=int,
|
||||
float=float,
|
||||
ps_name=ps_name,
|
||||
ps_integer=ps_integer,
|
||||
ps_real=ps_real,
|
||||
):
|
||||
try:
|
||||
num = int(token)
|
||||
except (ValueError, OverflowError):
|
||||
try:
|
||||
num = float(token)
|
||||
except (ValueError, OverflowError):
|
||||
if "#" in token:
|
||||
hashpos = token.find("#")
|
||||
try:
|
||||
base = int(token[:hashpos])
|
||||
num = int(token[hashpos + 1 :], base)
|
||||
except (ValueError, OverflowError):
|
||||
return ps_name(token)
|
||||
else:
|
||||
return ps_integer(num)
|
||||
else:
|
||||
return ps_name(token)
|
||||
else:
|
||||
return ps_real(num)
|
||||
else:
|
||||
return ps_integer(num)
|
||||
|
||||
def do_comment(self, token):
|
||||
pass
|
||||
|
||||
def do_literal(self, token):
|
||||
return ps_literal(token[1:])
|
||||
|
||||
def do_string(self, token):
|
||||
return ps_string(token[1:-1])
|
||||
|
||||
def do_hexstring(self, token):
|
||||
hexStr = "".join(token[1:-1].split())
|
||||
if len(hexStr) % 2:
|
||||
hexStr = hexStr + "0"
|
||||
cleanstr = []
|
||||
for i in range(0, len(hexStr), 2):
|
||||
cleanstr.append(chr(int(hexStr[i : i + 2], 16)))
|
||||
cleanstr = "".join(cleanstr)
|
||||
return ps_string(cleanstr)
|
||||
|
||||
def do_special(self, token):
|
||||
if token == "{":
|
||||
self.proclevel = self.proclevel + 1
|
||||
return self.procmark
|
||||
elif token == "}":
|
||||
proc = []
|
||||
while 1:
|
||||
topobject = self.pop()
|
||||
if topobject == self.procmark:
|
||||
break
|
||||
proc.append(topobject)
|
||||
self.proclevel = self.proclevel - 1
|
||||
proc.reverse()
|
||||
return ps_procedure(proc)
|
||||
elif token == "[":
|
||||
return self.mark
|
||||
elif token == "]":
|
||||
return ps_name("]")
|
||||
else:
|
||||
raise PSTokenError("huh?")
|
||||
|
||||
def push(self, object):
|
||||
self.stack.append(object)
|
||||
|
||||
def pop(self, *types):
|
||||
stack = self.stack
|
||||
if not stack:
|
||||
raise PSError("stack underflow")
|
||||
object = stack[-1]
|
||||
if types:
|
||||
if object.type not in types:
|
||||
raise PSError(
|
||||
"typecheck, expected %s, found %s" % (repr(types), object.type)
|
||||
)
|
||||
del stack[-1]
|
||||
return object
|
||||
|
||||
def do_makearray(self):
|
||||
array = []
|
||||
while 1:
|
||||
topobject = self.pop()
|
||||
if topobject == self.mark:
|
||||
break
|
||||
array.append(topobject)
|
||||
array.reverse()
|
||||
self.push(ps_array(array))
|
||||
|
||||
def close(self):
|
||||
"""Remove circular references."""
|
||||
del self.stack
|
||||
del self.dictstack
|
||||
|
||||
|
||||
def unpack_item(item):
|
||||
tp = type(item.value)
|
||||
if tp == dict:
|
||||
newitem = {}
|
||||
for key, value in item.value.items():
|
||||
newitem[key] = unpack_item(value)
|
||||
elif tp == list:
|
||||
newitem = [None] * len(item.value)
|
||||
for i in range(len(item.value)):
|
||||
newitem[i] = unpack_item(item.value[i])
|
||||
if item.type == "proceduretype":
|
||||
newitem = tuple(newitem)
|
||||
else:
|
||||
newitem = item.value
|
||||
return newitem
|
||||
|
||||
|
||||
def suckfont(data, encoding="ascii"):
|
||||
m = re.search(rb"/FontName\s+/([^ \t\n\r]+)\s+def", data)
|
||||
if m:
|
||||
fontName = m.group(1)
|
||||
fontName = fontName.decode()
|
||||
else:
|
||||
fontName = None
|
||||
interpreter = PSInterpreter(encoding=encoding)
|
||||
interpreter.interpret(
|
||||
b"/Helvetica 4 dict dup /Encoding StandardEncoding put definefont pop"
|
||||
)
|
||||
interpreter.interpret(data)
|
||||
fontdir = interpreter.dictstack[0]["FontDirectory"].value
|
||||
if fontName in fontdir:
|
||||
rawfont = fontdir[fontName]
|
||||
else:
|
||||
# fall back, in case fontName wasn't found
|
||||
fontNames = list(fontdir.keys())
|
||||
if len(fontNames) > 1:
|
||||
fontNames.remove("Helvetica")
|
||||
fontNames.sort()
|
||||
rawfont = fontdir[fontNames[0]]
|
||||
interpreter.close()
|
||||
return unpack_item(rawfont)
|
||||
572
venv/lib/python3.12/site-packages/fontTools/misc/psOperators.py
Normal file
572
venv/lib/python3.12/site-packages/fontTools/misc/psOperators.py
Normal file
@ -0,0 +1,572 @@
|
||||
_accessstrings = {0: "", 1: "readonly", 2: "executeonly", 3: "noaccess"}
|
||||
|
||||
|
||||
class ps_object(object):
|
||||
literal = 1
|
||||
access = 0
|
||||
value = None
|
||||
|
||||
def __init__(self, value):
|
||||
self.value = value
|
||||
self.type = self.__class__.__name__[3:] + "type"
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %s>" % (self.__class__.__name__[3:], repr(self.value))
|
||||
|
||||
|
||||
class ps_operator(ps_object):
|
||||
literal = 0
|
||||
|
||||
def __init__(self, name, function):
|
||||
self.name = name
|
||||
self.function = function
|
||||
self.type = self.__class__.__name__[3:] + "type"
|
||||
|
||||
def __repr__(self):
|
||||
return "<operator %s>" % self.name
|
||||
|
||||
|
||||
class ps_procedure(ps_object):
|
||||
literal = 0
|
||||
|
||||
def __repr__(self):
|
||||
return "<procedure>"
|
||||
|
||||
def __str__(self):
|
||||
psstring = "{"
|
||||
for i in range(len(self.value)):
|
||||
if i:
|
||||
psstring = psstring + " " + str(self.value[i])
|
||||
else:
|
||||
psstring = psstring + str(self.value[i])
|
||||
return psstring + "}"
|
||||
|
||||
|
||||
class ps_name(ps_object):
|
||||
literal = 0
|
||||
|
||||
def __str__(self):
|
||||
if self.literal:
|
||||
return "/" + self.value
|
||||
else:
|
||||
return self.value
|
||||
|
||||
|
||||
class ps_literal(ps_object):
|
||||
def __str__(self):
|
||||
return "/" + self.value
|
||||
|
||||
|
||||
class ps_array(ps_object):
|
||||
def __str__(self):
|
||||
psstring = "["
|
||||
for i in range(len(self.value)):
|
||||
item = self.value[i]
|
||||
access = _accessstrings[item.access]
|
||||
if access:
|
||||
access = " " + access
|
||||
if i:
|
||||
psstring = psstring + " " + str(item) + access
|
||||
else:
|
||||
psstring = psstring + str(item) + access
|
||||
return psstring + "]"
|
||||
|
||||
def __repr__(self):
|
||||
return "<array>"
|
||||
|
||||
|
||||
_type1_pre_eexec_order = [
|
||||
"FontInfo",
|
||||
"FontName",
|
||||
"Encoding",
|
||||
"PaintType",
|
||||
"FontType",
|
||||
"FontMatrix",
|
||||
"FontBBox",
|
||||
"UniqueID",
|
||||
"Metrics",
|
||||
"StrokeWidth",
|
||||
]
|
||||
|
||||
_type1_fontinfo_order = [
|
||||
"version",
|
||||
"Notice",
|
||||
"FullName",
|
||||
"FamilyName",
|
||||
"Weight",
|
||||
"ItalicAngle",
|
||||
"isFixedPitch",
|
||||
"UnderlinePosition",
|
||||
"UnderlineThickness",
|
||||
]
|
||||
|
||||
_type1_post_eexec_order = ["Private", "CharStrings", "FID"]
|
||||
|
||||
|
||||
def _type1_item_repr(key, value):
|
||||
psstring = ""
|
||||
access = _accessstrings[value.access]
|
||||
if access:
|
||||
access = access + " "
|
||||
if key == "CharStrings":
|
||||
psstring = psstring + "/%s %s def\n" % (
|
||||
key,
|
||||
_type1_CharString_repr(value.value),
|
||||
)
|
||||
elif key == "Encoding":
|
||||
psstring = psstring + _type1_Encoding_repr(value, access)
|
||||
else:
|
||||
psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
|
||||
return psstring
|
||||
|
||||
|
||||
def _type1_Encoding_repr(encoding, access):
|
||||
encoding = encoding.value
|
||||
psstring = "/Encoding 256 array\n0 1 255 {1 index exch /.notdef put} for\n"
|
||||
for i in range(256):
|
||||
name = encoding[i].value
|
||||
if name != ".notdef":
|
||||
psstring = psstring + "dup %d /%s put\n" % (i, name)
|
||||
return psstring + access + "def\n"
|
||||
|
||||
|
||||
def _type1_CharString_repr(charstrings):
|
||||
items = sorted(charstrings.items())
|
||||
return "xxx"
|
||||
|
||||
|
||||
class ps_font(ps_object):
|
||||
def __str__(self):
|
||||
psstring = "%d dict dup begin\n" % len(self.value)
|
||||
for key in _type1_pre_eexec_order:
|
||||
try:
|
||||
value = self.value[key]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
psstring = psstring + _type1_item_repr(key, value)
|
||||
items = sorted(self.value.items())
|
||||
for key, value in items:
|
||||
if key not in _type1_pre_eexec_order + _type1_post_eexec_order:
|
||||
psstring = psstring + _type1_item_repr(key, value)
|
||||
psstring = psstring + "currentdict end\ncurrentfile eexec\ndup "
|
||||
for key in _type1_post_eexec_order:
|
||||
try:
|
||||
value = self.value[key]
|
||||
except KeyError:
|
||||
pass
|
||||
else:
|
||||
psstring = psstring + _type1_item_repr(key, value)
|
||||
return (
|
||||
psstring
|
||||
+ "dup/FontName get exch definefont pop\nmark currentfile closefile\n"
|
||||
+ 8 * (64 * "0" + "\n")
|
||||
+ "cleartomark"
|
||||
+ "\n"
|
||||
)
|
||||
|
||||
def __repr__(self):
|
||||
return "<font>"
|
||||
|
||||
|
||||
class ps_file(ps_object):
|
||||
pass
|
||||
|
||||
|
||||
class ps_dict(ps_object):
|
||||
def __str__(self):
|
||||
psstring = "%d dict dup begin\n" % len(self.value)
|
||||
items = sorted(self.value.items())
|
||||
for key, value in items:
|
||||
access = _accessstrings[value.access]
|
||||
if access:
|
||||
access = access + " "
|
||||
psstring = psstring + "/%s %s %sdef\n" % (str(key), str(value), access)
|
||||
return psstring + "end "
|
||||
|
||||
def __repr__(self):
|
||||
return "<dict>"
|
||||
|
||||
|
||||
class ps_mark(ps_object):
|
||||
def __init__(self):
|
||||
self.value = "mark"
|
||||
self.type = self.__class__.__name__[3:] + "type"
|
||||
|
||||
|
||||
class ps_procmark(ps_object):
|
||||
def __init__(self):
|
||||
self.value = "procmark"
|
||||
self.type = self.__class__.__name__[3:] + "type"
|
||||
|
||||
|
||||
class ps_null(ps_object):
|
||||
def __init__(self):
|
||||
self.type = self.__class__.__name__[3:] + "type"
|
||||
|
||||
|
||||
class ps_boolean(ps_object):
|
||||
def __str__(self):
|
||||
if self.value:
|
||||
return "true"
|
||||
else:
|
||||
return "false"
|
||||
|
||||
|
||||
class ps_string(ps_object):
|
||||
def __str__(self):
|
||||
return "(%s)" % repr(self.value)[1:-1]
|
||||
|
||||
|
||||
class ps_integer(ps_object):
|
||||
def __str__(self):
|
||||
return repr(self.value)
|
||||
|
||||
|
||||
class ps_real(ps_object):
|
||||
def __str__(self):
|
||||
return repr(self.value)
|
||||
|
||||
|
||||
class PSOperators(object):
|
||||
def ps_def(self):
|
||||
obj = self.pop()
|
||||
name = self.pop()
|
||||
self.dictstack[-1][name.value] = obj
|
||||
|
||||
def ps_bind(self):
|
||||
proc = self.pop("proceduretype")
|
||||
self.proc_bind(proc)
|
||||
self.push(proc)
|
||||
|
||||
def proc_bind(self, proc):
|
||||
for i in range(len(proc.value)):
|
||||
item = proc.value[i]
|
||||
if item.type == "proceduretype":
|
||||
self.proc_bind(item)
|
||||
else:
|
||||
if not item.literal:
|
||||
try:
|
||||
obj = self.resolve_name(item.value)
|
||||
except:
|
||||
pass
|
||||
else:
|
||||
if obj.type == "operatortype":
|
||||
proc.value[i] = obj
|
||||
|
||||
def ps_exch(self):
|
||||
if len(self.stack) < 2:
|
||||
raise RuntimeError("stack underflow")
|
||||
obj1 = self.pop()
|
||||
obj2 = self.pop()
|
||||
self.push(obj1)
|
||||
self.push(obj2)
|
||||
|
||||
def ps_dup(self):
|
||||
if not self.stack:
|
||||
raise RuntimeError("stack underflow")
|
||||
self.push(self.stack[-1])
|
||||
|
||||
def ps_exec(self):
|
||||
obj = self.pop()
|
||||
if obj.type == "proceduretype":
|
||||
self.call_procedure(obj)
|
||||
else:
|
||||
self.handle_object(obj)
|
||||
|
||||
def ps_count(self):
|
||||
self.push(ps_integer(len(self.stack)))
|
||||
|
||||
def ps_eq(self):
|
||||
any1 = self.pop()
|
||||
any2 = self.pop()
|
||||
self.push(ps_boolean(any1.value == any2.value))
|
||||
|
||||
def ps_ne(self):
|
||||
any1 = self.pop()
|
||||
any2 = self.pop()
|
||||
self.push(ps_boolean(any1.value != any2.value))
|
||||
|
||||
def ps_cvx(self):
|
||||
obj = self.pop()
|
||||
obj.literal = 0
|
||||
self.push(obj)
|
||||
|
||||
def ps_matrix(self):
|
||||
matrix = [
|
||||
ps_real(1.0),
|
||||
ps_integer(0),
|
||||
ps_integer(0),
|
||||
ps_real(1.0),
|
||||
ps_integer(0),
|
||||
ps_integer(0),
|
||||
]
|
||||
self.push(ps_array(matrix))
|
||||
|
||||
def ps_string(self):
|
||||
num = self.pop("integertype").value
|
||||
self.push(ps_string("\0" * num))
|
||||
|
||||
def ps_type(self):
|
||||
obj = self.pop()
|
||||
self.push(ps_string(obj.type))
|
||||
|
||||
def ps_store(self):
|
||||
value = self.pop()
|
||||
key = self.pop()
|
||||
name = key.value
|
||||
for i in range(len(self.dictstack) - 1, -1, -1):
|
||||
if name in self.dictstack[i]:
|
||||
self.dictstack[i][name] = value
|
||||
break
|
||||
self.dictstack[-1][name] = value
|
||||
|
||||
def ps_where(self):
|
||||
name = self.pop()
|
||||
# XXX
|
||||
self.push(ps_boolean(0))
|
||||
|
||||
def ps_systemdict(self):
|
||||
self.push(ps_dict(self.dictstack[0]))
|
||||
|
||||
def ps_userdict(self):
|
||||
self.push(ps_dict(self.dictstack[1]))
|
||||
|
||||
def ps_currentdict(self):
|
||||
self.push(ps_dict(self.dictstack[-1]))
|
||||
|
||||
def ps_currentfile(self):
|
||||
self.push(ps_file(self.tokenizer))
|
||||
|
||||
def ps_eexec(self):
|
||||
f = self.pop("filetype").value
|
||||
f.starteexec()
|
||||
|
||||
def ps_closefile(self):
|
||||
f = self.pop("filetype").value
|
||||
f.skipwhite()
|
||||
f.stopeexec()
|
||||
|
||||
def ps_cleartomark(self):
|
||||
obj = self.pop()
|
||||
while obj != self.mark:
|
||||
obj = self.pop()
|
||||
|
||||
def ps_readstring(self, ps_boolean=ps_boolean, len=len):
|
||||
s = self.pop("stringtype")
|
||||
oldstr = s.value
|
||||
f = self.pop("filetype")
|
||||
# pad = file.value.read(1)
|
||||
# for StringIO, this is faster
|
||||
f.value.pos = f.value.pos + 1
|
||||
newstr = f.value.read(len(oldstr))
|
||||
s.value = newstr
|
||||
self.push(s)
|
||||
self.push(ps_boolean(len(oldstr) == len(newstr)))
|
||||
|
||||
def ps_known(self):
|
||||
key = self.pop()
|
||||
d = self.pop("dicttype", "fonttype")
|
||||
self.push(ps_boolean(key.value in d.value))
|
||||
|
||||
def ps_if(self):
|
||||
proc = self.pop("proceduretype")
|
||||
if self.pop("booleantype").value:
|
||||
self.call_procedure(proc)
|
||||
|
||||
def ps_ifelse(self):
|
||||
proc2 = self.pop("proceduretype")
|
||||
proc1 = self.pop("proceduretype")
|
||||
if self.pop("booleantype").value:
|
||||
self.call_procedure(proc1)
|
||||
else:
|
||||
self.call_procedure(proc2)
|
||||
|
||||
def ps_readonly(self):
|
||||
obj = self.pop()
|
||||
if obj.access < 1:
|
||||
obj.access = 1
|
||||
self.push(obj)
|
||||
|
||||
def ps_executeonly(self):
|
||||
obj = self.pop()
|
||||
if obj.access < 2:
|
||||
obj.access = 2
|
||||
self.push(obj)
|
||||
|
||||
def ps_noaccess(self):
|
||||
obj = self.pop()
|
||||
if obj.access < 3:
|
||||
obj.access = 3
|
||||
self.push(obj)
|
||||
|
||||
def ps_not(self):
|
||||
obj = self.pop("booleantype", "integertype")
|
||||
if obj.type == "booleantype":
|
||||
self.push(ps_boolean(not obj.value))
|
||||
else:
|
||||
self.push(ps_integer(~obj.value))
|
||||
|
||||
def ps_print(self):
|
||||
str = self.pop("stringtype")
|
||||
print("PS output --->", str.value)
|
||||
|
||||
def ps_anchorsearch(self):
|
||||
seek = self.pop("stringtype")
|
||||
s = self.pop("stringtype")
|
||||
seeklen = len(seek.value)
|
||||
if s.value[:seeklen] == seek.value:
|
||||
self.push(ps_string(s.value[seeklen:]))
|
||||
self.push(seek)
|
||||
self.push(ps_boolean(1))
|
||||
else:
|
||||
self.push(s)
|
||||
self.push(ps_boolean(0))
|
||||
|
||||
def ps_array(self):
|
||||
num = self.pop("integertype")
|
||||
array = ps_array([None] * num.value)
|
||||
self.push(array)
|
||||
|
||||
def ps_astore(self):
|
||||
array = self.pop("arraytype")
|
||||
for i in range(len(array.value) - 1, -1, -1):
|
||||
array.value[i] = self.pop()
|
||||
self.push(array)
|
||||
|
||||
def ps_load(self):
|
||||
name = self.pop()
|
||||
self.push(self.resolve_name(name.value))
|
||||
|
||||
def ps_put(self):
|
||||
obj1 = self.pop()
|
||||
obj2 = self.pop()
|
||||
obj3 = self.pop("arraytype", "dicttype", "stringtype", "proceduretype")
|
||||
tp = obj3.type
|
||||
if tp == "arraytype" or tp == "proceduretype":
|
||||
obj3.value[obj2.value] = obj1
|
||||
elif tp == "dicttype":
|
||||
obj3.value[obj2.value] = obj1
|
||||
elif tp == "stringtype":
|
||||
index = obj2.value
|
||||
obj3.value = obj3.value[:index] + chr(obj1.value) + obj3.value[index + 1 :]
|
||||
|
||||
def ps_get(self):
|
||||
obj1 = self.pop()
|
||||
if obj1.value == "Encoding":
|
||||
pass
|
||||
obj2 = self.pop(
|
||||
"arraytype", "dicttype", "stringtype", "proceduretype", "fonttype"
|
||||
)
|
||||
tp = obj2.type
|
||||
if tp in ("arraytype", "proceduretype"):
|
||||
self.push(obj2.value[obj1.value])
|
||||
elif tp in ("dicttype", "fonttype"):
|
||||
self.push(obj2.value[obj1.value])
|
||||
elif tp == "stringtype":
|
||||
self.push(ps_integer(ord(obj2.value[obj1.value])))
|
||||
else:
|
||||
assert False, "shouldn't get here"
|
||||
|
||||
def ps_getinterval(self):
|
||||
obj1 = self.pop("integertype")
|
||||
obj2 = self.pop("integertype")
|
||||
obj3 = self.pop("arraytype", "stringtype")
|
||||
tp = obj3.type
|
||||
if tp == "arraytype":
|
||||
self.push(ps_array(obj3.value[obj2.value : obj2.value + obj1.value]))
|
||||
elif tp == "stringtype":
|
||||
self.push(ps_string(obj3.value[obj2.value : obj2.value + obj1.value]))
|
||||
|
||||
def ps_putinterval(self):
|
||||
obj1 = self.pop("arraytype", "stringtype")
|
||||
obj2 = self.pop("integertype")
|
||||
obj3 = self.pop("arraytype", "stringtype")
|
||||
tp = obj3.type
|
||||
if tp == "arraytype":
|
||||
obj3.value[obj2.value : obj2.value + len(obj1.value)] = obj1.value
|
||||
elif tp == "stringtype":
|
||||
newstr = obj3.value[: obj2.value]
|
||||
newstr = newstr + obj1.value
|
||||
newstr = newstr + obj3.value[obj2.value + len(obj1.value) :]
|
||||
obj3.value = newstr
|
||||
|
||||
def ps_cvn(self):
|
||||
self.push(ps_name(self.pop("stringtype").value))
|
||||
|
||||
def ps_index(self):
|
||||
n = self.pop("integertype").value
|
||||
if n < 0:
|
||||
raise RuntimeError("index may not be negative")
|
||||
self.push(self.stack[-1 - n])
|
||||
|
||||
def ps_for(self):
|
||||
proc = self.pop("proceduretype")
|
||||
limit = self.pop("integertype", "realtype").value
|
||||
increment = self.pop("integertype", "realtype").value
|
||||
i = self.pop("integertype", "realtype").value
|
||||
while 1:
|
||||
if increment > 0:
|
||||
if i > limit:
|
||||
break
|
||||
else:
|
||||
if i < limit:
|
||||
break
|
||||
if type(i) == type(0.0):
|
||||
self.push(ps_real(i))
|
||||
else:
|
||||
self.push(ps_integer(i))
|
||||
self.call_procedure(proc)
|
||||
i = i + increment
|
||||
|
||||
def ps_forall(self):
|
||||
proc = self.pop("proceduretype")
|
||||
obj = self.pop("arraytype", "stringtype", "dicttype")
|
||||
tp = obj.type
|
||||
if tp == "arraytype":
|
||||
for item in obj.value:
|
||||
self.push(item)
|
||||
self.call_procedure(proc)
|
||||
elif tp == "stringtype":
|
||||
for item in obj.value:
|
||||
self.push(ps_integer(ord(item)))
|
||||
self.call_procedure(proc)
|
||||
elif tp == "dicttype":
|
||||
for key, value in obj.value.items():
|
||||
self.push(ps_name(key))
|
||||
self.push(value)
|
||||
self.call_procedure(proc)
|
||||
|
||||
def ps_definefont(self):
|
||||
font = self.pop("dicttype")
|
||||
name = self.pop()
|
||||
font = ps_font(font.value)
|
||||
self.dictstack[0]["FontDirectory"].value[name.value] = font
|
||||
self.push(font)
|
||||
|
||||
def ps_findfont(self):
|
||||
name = self.pop()
|
||||
font = self.dictstack[0]["FontDirectory"].value[name.value]
|
||||
self.push(font)
|
||||
|
||||
def ps_pop(self):
|
||||
self.pop()
|
||||
|
||||
def ps_dict(self):
|
||||
self.pop("integertype")
|
||||
self.push(ps_dict({}))
|
||||
|
||||
def ps_begin(self):
|
||||
self.dictstack.append(self.pop("dicttype").value)
|
||||
|
||||
def ps_end(self):
|
||||
if len(self.dictstack) > 2:
|
||||
del self.dictstack[-1]
|
||||
else:
|
||||
raise RuntimeError("dictstack underflow")
|
||||
|
||||
|
||||
notdef = ".notdef"
|
||||
from fontTools.encodings.StandardEncoding import StandardEncoding
|
||||
|
||||
ps_StandardEncoding = list(map(ps_name, StandardEncoding))
|
||||
96
venv/lib/python3.12/site-packages/fontTools/misc/py23.py
Normal file
96
venv/lib/python3.12/site-packages/fontTools/misc/py23.py
Normal file
@ -0,0 +1,96 @@
|
||||
"""Python 2/3 compat layer leftovers."""
|
||||
|
||||
import decimal as _decimal
|
||||
import math as _math
|
||||
import warnings
|
||||
from contextlib import redirect_stderr, redirect_stdout
|
||||
from io import BytesIO
|
||||
from io import StringIO as UnicodeIO
|
||||
from types import SimpleNamespace
|
||||
|
||||
from .textTools import Tag, bytechr, byteord, bytesjoin, strjoin, tobytes, tostr
|
||||
|
||||
warnings.warn(
|
||||
"The py23 module has been deprecated and will be removed in a future release. "
|
||||
"Please update your code.",
|
||||
DeprecationWarning,
|
||||
)
|
||||
|
||||
__all__ = [
|
||||
"basestring",
|
||||
"bytechr",
|
||||
"byteord",
|
||||
"BytesIO",
|
||||
"bytesjoin",
|
||||
"open",
|
||||
"Py23Error",
|
||||
"range",
|
||||
"RecursionError",
|
||||
"round",
|
||||
"SimpleNamespace",
|
||||
"StringIO",
|
||||
"strjoin",
|
||||
"Tag",
|
||||
"tobytes",
|
||||
"tostr",
|
||||
"tounicode",
|
||||
"unichr",
|
||||
"unicode",
|
||||
"UnicodeIO",
|
||||
"xrange",
|
||||
"zip",
|
||||
]
|
||||
|
||||
|
||||
class Py23Error(NotImplementedError):
|
||||
pass
|
||||
|
||||
|
||||
RecursionError = RecursionError
|
||||
StringIO = UnicodeIO
|
||||
|
||||
basestring = str
|
||||
isclose = _math.isclose
|
||||
isfinite = _math.isfinite
|
||||
open = open
|
||||
range = range
|
||||
round = round3 = round
|
||||
unichr = chr
|
||||
unicode = str
|
||||
zip = zip
|
||||
|
||||
tounicode = tostr
|
||||
|
||||
|
||||
def xrange(*args, **kwargs):
|
||||
raise Py23Error("'xrange' is not defined. Use 'range' instead.")
|
||||
|
||||
|
||||
def round2(number, ndigits=None):
|
||||
"""
|
||||
Implementation of Python 2 built-in round() function.
|
||||
Rounds a number to a given precision in decimal digits (default
|
||||
0 digits). The result is a floating point number. Values are rounded
|
||||
to the closest multiple of 10 to the power minus ndigits; if two
|
||||
multiples are equally close, rounding is done away from 0.
|
||||
ndigits may be negative.
|
||||
See Python 2 documentation:
|
||||
https://docs.python.org/2/library/functions.html?highlight=round#round
|
||||
"""
|
||||
if ndigits is None:
|
||||
ndigits = 0
|
||||
|
||||
if ndigits < 0:
|
||||
exponent = 10 ** (-ndigits)
|
||||
quotient, remainder = divmod(number, exponent)
|
||||
if remainder >= exponent // 2 and number >= 0:
|
||||
quotient += 1
|
||||
return float(quotient * exponent)
|
||||
else:
|
||||
exponent = _decimal.Decimal("10") ** (-ndigits)
|
||||
|
||||
d = _decimal.Decimal.from_float(number).quantize(
|
||||
exponent, rounding=_decimal.ROUND_HALF_UP
|
||||
)
|
||||
|
||||
return float(d)
|
||||
110
venv/lib/python3.12/site-packages/fontTools/misc/roundTools.py
Normal file
110
venv/lib/python3.12/site-packages/fontTools/misc/roundTools.py
Normal file
@ -0,0 +1,110 @@
|
||||
"""
|
||||
Various round-to-integer helpers.
|
||||
"""
|
||||
|
||||
import math
|
||||
import functools
|
||||
import logging
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
__all__ = [
|
||||
"noRound",
|
||||
"otRound",
|
||||
"maybeRound",
|
||||
"roundFunc",
|
||||
"nearestMultipleShortestRepr",
|
||||
]
|
||||
|
||||
|
||||
def noRound(value):
|
||||
return value
|
||||
|
||||
|
||||
def otRound(value):
|
||||
"""Round float value to nearest integer towards ``+Infinity``.
|
||||
|
||||
The OpenType spec (in the section on `"normalization" of OpenType Font Variations <https://docs.microsoft.com/en-us/typography/opentype/spec/otvaroverview#coordinate-scales-and-normalization>`_)
|
||||
defines the required method for converting floating point values to
|
||||
fixed-point. In particular it specifies the following rounding strategy:
|
||||
|
||||
for fractional values of 0.5 and higher, take the next higher integer;
|
||||
for other fractional values, truncate.
|
||||
|
||||
This function rounds the floating-point value according to this strategy
|
||||
in preparation for conversion to fixed-point.
|
||||
|
||||
Args:
|
||||
value (float): The input floating-point value.
|
||||
|
||||
Returns
|
||||
float: The rounded value.
|
||||
"""
|
||||
# See this thread for how we ended up with this implementation:
|
||||
# https://github.com/fonttools/fonttools/issues/1248#issuecomment-383198166
|
||||
return int(math.floor(value + 0.5))
|
||||
|
||||
|
||||
def maybeRound(v, tolerance, round=otRound):
|
||||
rounded = round(v)
|
||||
return rounded if abs(rounded - v) <= tolerance else v
|
||||
|
||||
|
||||
def roundFunc(tolerance, round=otRound):
|
||||
if tolerance < 0:
|
||||
raise ValueError("Rounding tolerance must be positive")
|
||||
|
||||
if tolerance == 0:
|
||||
return noRound
|
||||
|
||||
if tolerance >= 0.5:
|
||||
return round
|
||||
|
||||
return functools.partial(maybeRound, tolerance=tolerance, round=round)
|
||||
|
||||
|
||||
def nearestMultipleShortestRepr(value: float, factor: float) -> str:
|
||||
"""Round to nearest multiple of factor and return shortest decimal representation.
|
||||
|
||||
This chooses the float that is closer to a multiple of the given factor while
|
||||
having the shortest decimal representation (the least number of fractional decimal
|
||||
digits).
|
||||
|
||||
For example, given the following:
|
||||
|
||||
>>> nearestMultipleShortestRepr(-0.61883544921875, 1.0/(1<<14))
|
||||
'-0.61884'
|
||||
|
||||
Useful when you need to serialize or print a fixed-point number (or multiples
|
||||
thereof, such as F2Dot14 fractions of 180 degrees in COLRv1 PaintRotate) in
|
||||
a human-readable form.
|
||||
|
||||
Args:
|
||||
value (value): The value to be rounded and serialized.
|
||||
factor (float): The value which the result is a close multiple of.
|
||||
|
||||
Returns:
|
||||
str: A compact string representation of the value.
|
||||
"""
|
||||
if not value:
|
||||
return "0.0"
|
||||
|
||||
value = otRound(value / factor) * factor
|
||||
eps = 0.5 * factor
|
||||
lo = value - eps
|
||||
hi = value + eps
|
||||
# If the range of valid choices spans an integer, return the integer.
|
||||
if int(lo) != int(hi):
|
||||
return str(float(round(value)))
|
||||
|
||||
fmt = "%.8f"
|
||||
lo = fmt % lo
|
||||
hi = fmt % hi
|
||||
assert len(lo) == len(hi) and lo != hi
|
||||
for i in range(len(lo)):
|
||||
if lo[i] != hi[i]:
|
||||
break
|
||||
period = lo.find(".")
|
||||
assert period < i
|
||||
fmt = "%%.%df" % (i - period)
|
||||
return fmt % value
|
||||
231
venv/lib/python3.12/site-packages/fontTools/misc/sstruct.py
Normal file
231
venv/lib/python3.12/site-packages/fontTools/misc/sstruct.py
Normal file
@ -0,0 +1,231 @@
|
||||
"""sstruct.py -- SuperStruct
|
||||
|
||||
Higher level layer on top of the struct module, enabling to
|
||||
bind names to struct elements. The interface is similar to
|
||||
struct, except the objects passed and returned are not tuples
|
||||
(or argument lists), but dictionaries or instances.
|
||||
|
||||
Just like struct, we use fmt strings to describe a data
|
||||
structure, except we use one line per element. Lines are
|
||||
separated by newlines or semi-colons. Each line contains
|
||||
either one of the special struct characters ('@', '=', '<',
|
||||
'>' or '!') or a 'name:formatchar' combo (eg. 'myFloat:f').
|
||||
Repetitions, like the struct module offers them are not useful
|
||||
in this context, except for fixed length strings (eg. 'myInt:5h'
|
||||
is not allowed but 'myString:5s' is). The 'x' fmt character
|
||||
(pad byte) is treated as 'special', since it is by definition
|
||||
anonymous. Extra whitespace is allowed everywhere.
|
||||
|
||||
The sstruct module offers one feature that the "normal" struct
|
||||
module doesn't: support for fixed point numbers. These are spelled
|
||||
as "n.mF", where n is the number of bits before the point, and m
|
||||
the number of bits after the point. Fixed point numbers get
|
||||
converted to floats.
|
||||
|
||||
pack(fmt, object):
|
||||
'object' is either a dictionary or an instance (or actually
|
||||
anything that has a __dict__ attribute). If it is a dictionary,
|
||||
its keys are used for names. If it is an instance, it's
|
||||
attributes are used to grab struct elements from. Returns
|
||||
a string containing the data.
|
||||
|
||||
unpack(fmt, data, object=None)
|
||||
If 'object' is omitted (or None), a new dictionary will be
|
||||
returned. If 'object' is a dictionary, it will be used to add
|
||||
struct elements to. If it is an instance (or in fact anything
|
||||
that has a __dict__ attribute), an attribute will be added for
|
||||
each struct element. In the latter two cases, 'object' itself
|
||||
is returned.
|
||||
|
||||
unpack2(fmt, data, object=None)
|
||||
Convenience function. Same as unpack, except data may be longer
|
||||
than needed. The returned value is a tuple: (object, leftoverdata).
|
||||
|
||||
calcsize(fmt)
|
||||
like struct.calcsize(), but uses our own fmt strings:
|
||||
it returns the size of the data in bytes.
|
||||
"""
|
||||
|
||||
from fontTools.misc.fixedTools import fixedToFloat as fi2fl, floatToFixed as fl2fi
|
||||
from fontTools.misc.textTools import tobytes, tostr
|
||||
import struct
|
||||
import re
|
||||
|
||||
__version__ = "1.2"
|
||||
__copyright__ = "Copyright 1998, Just van Rossum <just@letterror.com>"
|
||||
|
||||
|
||||
class Error(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def pack(fmt, obj):
|
||||
formatstring, names, fixes = getformat(fmt, keep_pad_byte=True)
|
||||
elements = []
|
||||
if not isinstance(obj, dict):
|
||||
obj = obj.__dict__
|
||||
string_index = formatstring
|
||||
if formatstring.startswith(">"):
|
||||
string_index = formatstring[1:]
|
||||
for ix, name in enumerate(names.keys()):
|
||||
value = obj[name]
|
||||
if name in fixes:
|
||||
# fixed point conversion
|
||||
value = fl2fi(value, fixes[name])
|
||||
elif isinstance(value, str):
|
||||
value = tobytes(value)
|
||||
elements.append(value)
|
||||
# Check it fits
|
||||
try:
|
||||
struct.pack(names[name], value)
|
||||
except Exception as e:
|
||||
raise ValueError(
|
||||
"Value %s does not fit in format %s for %s" % (value, names[name], name)
|
||||
) from e
|
||||
data = struct.pack(*(formatstring,) + tuple(elements))
|
||||
return data
|
||||
|
||||
|
||||
def unpack(fmt, data, obj=None):
|
||||
if obj is None:
|
||||
obj = {}
|
||||
data = tobytes(data)
|
||||
formatstring, names, fixes = getformat(fmt)
|
||||
if isinstance(obj, dict):
|
||||
d = obj
|
||||
else:
|
||||
d = obj.__dict__
|
||||
elements = struct.unpack(formatstring, data)
|
||||
for i in range(len(names)):
|
||||
name = list(names.keys())[i]
|
||||
value = elements[i]
|
||||
if name in fixes:
|
||||
# fixed point conversion
|
||||
value = fi2fl(value, fixes[name])
|
||||
elif isinstance(value, bytes):
|
||||
try:
|
||||
value = tostr(value)
|
||||
except UnicodeDecodeError:
|
||||
pass
|
||||
d[name] = value
|
||||
return obj
|
||||
|
||||
|
||||
def unpack2(fmt, data, obj=None):
|
||||
length = calcsize(fmt)
|
||||
return unpack(fmt, data[:length], obj), data[length:]
|
||||
|
||||
|
||||
def calcsize(fmt):
|
||||
formatstring, names, fixes = getformat(fmt)
|
||||
return struct.calcsize(formatstring)
|
||||
|
||||
|
||||
# matches "name:formatchar" (whitespace is allowed)
|
||||
_elementRE = re.compile(
|
||||
r"\s*" # whitespace
|
||||
r"([A-Za-z_][A-Za-z_0-9]*)" # name (python identifier)
|
||||
r"\s*:\s*" # whitespace : whitespace
|
||||
r"([xcbB?hHiIlLqQfd]|" # formatchar...
|
||||
r"[0-9]+[ps]|" # ...formatchar...
|
||||
r"([0-9]+)\.([0-9]+)(F))" # ...formatchar
|
||||
r"\s*" # whitespace
|
||||
r"(#.*)?$" # [comment] + end of string
|
||||
)
|
||||
|
||||
# matches the special struct fmt chars and 'x' (pad byte)
|
||||
_extraRE = re.compile(r"\s*([x@=<>!])\s*(#.*)?$")
|
||||
|
||||
# matches an "empty" string, possibly containing whitespace and/or a comment
|
||||
_emptyRE = re.compile(r"\s*(#.*)?$")
|
||||
|
||||
_fixedpointmappings = {8: "b", 16: "h", 32: "l"}
|
||||
|
||||
_formatcache = {}
|
||||
|
||||
|
||||
def getformat(fmt, keep_pad_byte=False):
|
||||
fmt = tostr(fmt, encoding="ascii")
|
||||
try:
|
||||
formatstring, names, fixes = _formatcache[fmt]
|
||||
except KeyError:
|
||||
lines = re.split("[\n;]", fmt)
|
||||
formatstring = ""
|
||||
names = {}
|
||||
fixes = {}
|
||||
for line in lines:
|
||||
if _emptyRE.match(line):
|
||||
continue
|
||||
m = _extraRE.match(line)
|
||||
if m:
|
||||
formatchar = m.group(1)
|
||||
if formatchar != "x" and formatstring:
|
||||
raise Error("a special fmt char must be first")
|
||||
else:
|
||||
m = _elementRE.match(line)
|
||||
if not m:
|
||||
raise Error("syntax error in fmt: '%s'" % line)
|
||||
name = m.group(1)
|
||||
formatchar = m.group(2)
|
||||
if keep_pad_byte or formatchar != "x":
|
||||
names[name] = formatchar
|
||||
if m.group(3):
|
||||
# fixed point
|
||||
before = int(m.group(3))
|
||||
after = int(m.group(4))
|
||||
bits = before + after
|
||||
if bits not in [8, 16, 32]:
|
||||
raise Error("fixed point must be 8, 16 or 32 bits long")
|
||||
formatchar = _fixedpointmappings[bits]
|
||||
names[name] = formatchar
|
||||
assert m.group(5) == "F"
|
||||
fixes[name] = after
|
||||
formatstring += formatchar
|
||||
_formatcache[fmt] = formatstring, names, fixes
|
||||
return formatstring, names, fixes
|
||||
|
||||
|
||||
def _test():
|
||||
fmt = """
|
||||
# comments are allowed
|
||||
> # big endian (see documentation for struct)
|
||||
# empty lines are allowed:
|
||||
|
||||
ashort: h
|
||||
along: l
|
||||
abyte: b # a byte
|
||||
achar: c
|
||||
astr: 5s
|
||||
afloat: f; adouble: d # multiple "statements" are allowed
|
||||
afixed: 16.16F
|
||||
abool: ?
|
||||
apad: x
|
||||
"""
|
||||
|
||||
print("size:", calcsize(fmt))
|
||||
|
||||
class foo(object):
|
||||
pass
|
||||
|
||||
i = foo()
|
||||
|
||||
i.ashort = 0x7FFF
|
||||
i.along = 0x7FFFFFFF
|
||||
i.abyte = 0x7F
|
||||
i.achar = "a"
|
||||
i.astr = "12345"
|
||||
i.afloat = 0.5
|
||||
i.adouble = 0.5
|
||||
i.afixed = 1.5
|
||||
i.abool = True
|
||||
|
||||
data = pack(fmt, i)
|
||||
print("data:", repr(data))
|
||||
print(unpack(fmt, data))
|
||||
i2 = foo()
|
||||
unpack(fmt, data, i2)
|
||||
print(vars(i2))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
_test()
|
||||
247
venv/lib/python3.12/site-packages/fontTools/misc/symfont.py
Normal file
247
venv/lib/python3.12/site-packages/fontTools/misc/symfont.py
Normal file
@ -0,0 +1,247 @@
|
||||
from fontTools.pens.basePen import BasePen
|
||||
from functools import partial
|
||||
from itertools import count
|
||||
import sympy as sp
|
||||
import sys
|
||||
|
||||
n = 3 # Max Bezier degree; 3 for cubic, 2 for quadratic
|
||||
|
||||
t, x, y = sp.symbols("t x y", real=True)
|
||||
c = sp.symbols("c", real=False) # Complex representation instead of x/y
|
||||
|
||||
X = tuple(sp.symbols("x:%d" % (n + 1), real=True))
|
||||
Y = tuple(sp.symbols("y:%d" % (n + 1), real=True))
|
||||
P = tuple(zip(*(sp.symbols("p:%d[%s]" % (n + 1, w), real=True) for w in "01")))
|
||||
C = tuple(sp.symbols("c:%d" % (n + 1), real=False))
|
||||
|
||||
# Cubic Bernstein basis functions
|
||||
BinomialCoefficient = [(1, 0)]
|
||||
for i in range(1, n + 1):
|
||||
last = BinomialCoefficient[-1]
|
||||
this = tuple(last[j - 1] + last[j] for j in range(len(last))) + (0,)
|
||||
BinomialCoefficient.append(this)
|
||||
BinomialCoefficient = tuple(tuple(item[:-1]) for item in BinomialCoefficient)
|
||||
del last, this
|
||||
|
||||
BernsteinPolynomial = tuple(
|
||||
tuple(c * t**i * (1 - t) ** (n - i) for i, c in enumerate(coeffs))
|
||||
for n, coeffs in enumerate(BinomialCoefficient)
|
||||
)
|
||||
|
||||
BezierCurve = tuple(
|
||||
tuple(
|
||||
sum(P[i][j] * bernstein for i, bernstein in enumerate(bernsteins))
|
||||
for j in range(2)
|
||||
)
|
||||
for n, bernsteins in enumerate(BernsteinPolynomial)
|
||||
)
|
||||
BezierCurveC = tuple(
|
||||
sum(C[i] * bernstein for i, bernstein in enumerate(bernsteins))
|
||||
for n, bernsteins in enumerate(BernsteinPolynomial)
|
||||
)
|
||||
|
||||
|
||||
def green(f, curveXY):
|
||||
f = -sp.integrate(sp.sympify(f), y)
|
||||
f = f.subs({x: curveXY[0], y: curveXY[1]})
|
||||
f = sp.integrate(f * sp.diff(curveXY[0], t), (t, 0, 1))
|
||||
return f
|
||||
|
||||
|
||||
class _BezierFuncsLazy(dict):
|
||||
def __init__(self, symfunc):
|
||||
self._symfunc = symfunc
|
||||
self._bezfuncs = {}
|
||||
|
||||
def __missing__(self, i):
|
||||
args = ["p%d" % d for d in range(i + 1)]
|
||||
f = green(self._symfunc, BezierCurve[i])
|
||||
f = sp.gcd_terms(f.collect(sum(P, ()))) # Optimize
|
||||
return sp.lambdify(args, f)
|
||||
|
||||
|
||||
class GreenPen(BasePen):
|
||||
_BezierFuncs = {}
|
||||
|
||||
@classmethod
|
||||
def _getGreenBezierFuncs(celf, func):
|
||||
funcstr = str(func)
|
||||
if not funcstr in celf._BezierFuncs:
|
||||
celf._BezierFuncs[funcstr] = _BezierFuncsLazy(func)
|
||||
return celf._BezierFuncs[funcstr]
|
||||
|
||||
def __init__(self, func, glyphset=None):
|
||||
BasePen.__init__(self, glyphset)
|
||||
self._funcs = self._getGreenBezierFuncs(func)
|
||||
self.value = 0
|
||||
|
||||
def _moveTo(self, p0):
|
||||
self._startPoint = p0
|
||||
|
||||
def _closePath(self):
|
||||
p0 = self._getCurrentPoint()
|
||||
if p0 != self._startPoint:
|
||||
self._lineTo(self._startPoint)
|
||||
|
||||
def _endPath(self):
|
||||
p0 = self._getCurrentPoint()
|
||||
if p0 != self._startPoint:
|
||||
# Green theorem is not defined on open contours.
|
||||
raise NotImplementedError
|
||||
|
||||
def _lineTo(self, p1):
|
||||
p0 = self._getCurrentPoint()
|
||||
self.value += self._funcs[1](p0, p1)
|
||||
|
||||
def _qCurveToOne(self, p1, p2):
|
||||
p0 = self._getCurrentPoint()
|
||||
self.value += self._funcs[2](p0, p1, p2)
|
||||
|
||||
def _curveToOne(self, p1, p2, p3):
|
||||
p0 = self._getCurrentPoint()
|
||||
self.value += self._funcs[3](p0, p1, p2, p3)
|
||||
|
||||
|
||||
# Sample pens.
|
||||
# Do not use this in real code.
|
||||
# Use fontTools.pens.momentsPen.MomentsPen instead.
|
||||
AreaPen = partial(GreenPen, func=1)
|
||||
MomentXPen = partial(GreenPen, func=x)
|
||||
MomentYPen = partial(GreenPen, func=y)
|
||||
MomentXXPen = partial(GreenPen, func=x * x)
|
||||
MomentYYPen = partial(GreenPen, func=y * y)
|
||||
MomentXYPen = partial(GreenPen, func=x * y)
|
||||
|
||||
|
||||
def printGreenPen(penName, funcs, file=sys.stdout, docstring=None):
|
||||
if docstring is not None:
|
||||
print('"""%s"""' % docstring)
|
||||
|
||||
print(
|
||||
"""from fontTools.pens.basePen import BasePen, OpenContourError
|
||||
try:
|
||||
import cython
|
||||
|
||||
COMPILED = cython.compiled
|
||||
except (AttributeError, ImportError):
|
||||
# if cython not installed, use mock module with no-op decorators and types
|
||||
from fontTools.misc import cython
|
||||
|
||||
COMPILED = False
|
||||
|
||||
|
||||
__all__ = ["%s"]
|
||||
|
||||
class %s(BasePen):
|
||||
|
||||
def __init__(self, glyphset=None):
|
||||
BasePen.__init__(self, glyphset)
|
||||
"""
|
||||
% (penName, penName),
|
||||
file=file,
|
||||
)
|
||||
for name, f in funcs:
|
||||
print(" self.%s = 0" % name, file=file)
|
||||
print(
|
||||
"""
|
||||
def _moveTo(self, p0):
|
||||
self._startPoint = p0
|
||||
|
||||
def _closePath(self):
|
||||
p0 = self._getCurrentPoint()
|
||||
if p0 != self._startPoint:
|
||||
self._lineTo(self._startPoint)
|
||||
|
||||
def _endPath(self):
|
||||
p0 = self._getCurrentPoint()
|
||||
if p0 != self._startPoint:
|
||||
raise OpenContourError(
|
||||
"Glyph statistics is not defined on open contours."
|
||||
)
|
||||
""",
|
||||
end="",
|
||||
file=file,
|
||||
)
|
||||
|
||||
for n in (1, 2, 3):
|
||||
subs = {P[i][j]: [X, Y][j][i] for i in range(n + 1) for j in range(2)}
|
||||
greens = [green(f, BezierCurve[n]) for name, f in funcs]
|
||||
greens = [sp.gcd_terms(f.collect(sum(P, ()))) for f in greens] # Optimize
|
||||
greens = [f.subs(subs) for f in greens] # Convert to p to x/y
|
||||
defs, exprs = sp.cse(
|
||||
greens,
|
||||
optimizations="basic",
|
||||
symbols=(sp.Symbol("r%d" % i) for i in count()),
|
||||
)
|
||||
|
||||
print()
|
||||
for name, value in defs:
|
||||
print(" @cython.locals(%s=cython.double)" % name, file=file)
|
||||
if n == 1:
|
||||
print(
|
||||
"""\
|
||||
@cython.locals(x0=cython.double, y0=cython.double)
|
||||
@cython.locals(x1=cython.double, y1=cython.double)
|
||||
def _lineTo(self, p1):
|
||||
x0,y0 = self._getCurrentPoint()
|
||||
x1,y1 = p1
|
||||
""",
|
||||
file=file,
|
||||
)
|
||||
elif n == 2:
|
||||
print(
|
||||
"""\
|
||||
@cython.locals(x0=cython.double, y0=cython.double)
|
||||
@cython.locals(x1=cython.double, y1=cython.double)
|
||||
@cython.locals(x2=cython.double, y2=cython.double)
|
||||
def _qCurveToOne(self, p1, p2):
|
||||
x0,y0 = self._getCurrentPoint()
|
||||
x1,y1 = p1
|
||||
x2,y2 = p2
|
||||
""",
|
||||
file=file,
|
||||
)
|
||||
elif n == 3:
|
||||
print(
|
||||
"""\
|
||||
@cython.locals(x0=cython.double, y0=cython.double)
|
||||
@cython.locals(x1=cython.double, y1=cython.double)
|
||||
@cython.locals(x2=cython.double, y2=cython.double)
|
||||
@cython.locals(x3=cython.double, y3=cython.double)
|
||||
def _curveToOne(self, p1, p2, p3):
|
||||
x0,y0 = self._getCurrentPoint()
|
||||
x1,y1 = p1
|
||||
x2,y2 = p2
|
||||
x3,y3 = p3
|
||||
""",
|
||||
file=file,
|
||||
)
|
||||
for name, value in defs:
|
||||
print(" %s = %s" % (name, value), file=file)
|
||||
|
||||
print(file=file)
|
||||
for name, value in zip([f[0] for f in funcs], exprs):
|
||||
print(" self.%s += %s" % (name, value), file=file)
|
||||
|
||||
print(
|
||||
"""
|
||||
if __name__ == '__main__':
|
||||
from fontTools.misc.symfont import x, y, printGreenPen
|
||||
printGreenPen('%s', ["""
|
||||
% penName,
|
||||
file=file,
|
||||
)
|
||||
for name, f in funcs:
|
||||
print(" ('%s', %s)," % (name, str(f)), file=file)
|
||||
print(" ])", file=file)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
pen = AreaPen()
|
||||
pen.moveTo((100, 100))
|
||||
pen.lineTo((100, 200))
|
||||
pen.lineTo((200, 200))
|
||||
pen.curveTo((200, 250), (300, 300), (250, 350))
|
||||
pen.lineTo((200, 100))
|
||||
pen.closePath()
|
||||
print(pen.value)
|
||||
229
venv/lib/python3.12/site-packages/fontTools/misc/testTools.py
Normal file
229
venv/lib/python3.12/site-packages/fontTools/misc/testTools.py
Normal file
@ -0,0 +1,229 @@
|
||||
"""Helpers for writing unit tests."""
|
||||
|
||||
from collections.abc import Iterable
|
||||
from io import BytesIO
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
import tempfile
|
||||
from unittest import TestCase as _TestCase
|
||||
from fontTools.config import Config
|
||||
from fontTools.misc.textTools import tobytes
|
||||
from fontTools.misc.xmlWriter import XMLWriter
|
||||
|
||||
|
||||
def parseXML(xmlSnippet):
|
||||
"""Parses a snippet of XML.
|
||||
|
||||
Input can be either a single string (unicode or UTF-8 bytes), or a
|
||||
a sequence of strings.
|
||||
|
||||
The result is in the same format that would be returned by
|
||||
XMLReader, but the parser imposes no constraints on the root
|
||||
element so it can be called on small snippets of TTX files.
|
||||
"""
|
||||
# To support snippets with multiple elements, we add a fake root.
|
||||
reader = TestXMLReader_()
|
||||
xml = b"<root>"
|
||||
if isinstance(xmlSnippet, bytes):
|
||||
xml += xmlSnippet
|
||||
elif isinstance(xmlSnippet, str):
|
||||
xml += tobytes(xmlSnippet, "utf-8")
|
||||
elif isinstance(xmlSnippet, Iterable):
|
||||
xml += b"".join(tobytes(s, "utf-8") for s in xmlSnippet)
|
||||
else:
|
||||
raise TypeError(
|
||||
"expected string or sequence of strings; found %r"
|
||||
% type(xmlSnippet).__name__
|
||||
)
|
||||
xml += b"</root>"
|
||||
reader.parser.Parse(xml, 1)
|
||||
return reader.root[2]
|
||||
|
||||
|
||||
def parseXmlInto(font, parseInto, xmlSnippet):
|
||||
parsed_xml = [e for e in parseXML(xmlSnippet.strip()) if not isinstance(e, str)]
|
||||
for name, attrs, content in parsed_xml:
|
||||
parseInto.fromXML(name, attrs, content, font)
|
||||
parseInto.populateDefaults()
|
||||
return parseInto
|
||||
|
||||
|
||||
class FakeFont:
|
||||
def __init__(self, glyphs):
|
||||
self.glyphOrder_ = glyphs
|
||||
self.reverseGlyphOrderDict_ = {g: i for i, g in enumerate(glyphs)}
|
||||
self.lazy = False
|
||||
self.tables = {}
|
||||
self.cfg = Config()
|
||||
|
||||
def __getitem__(self, tag):
|
||||
return self.tables[tag]
|
||||
|
||||
def __setitem__(self, tag, table):
|
||||
self.tables[tag] = table
|
||||
|
||||
def get(self, tag, default=None):
|
||||
return self.tables.get(tag, default)
|
||||
|
||||
def getGlyphID(self, name):
|
||||
return self.reverseGlyphOrderDict_[name]
|
||||
|
||||
def getGlyphIDMany(self, lst):
|
||||
return [self.getGlyphID(gid) for gid in lst]
|
||||
|
||||
def getGlyphName(self, glyphID):
|
||||
if glyphID < len(self.glyphOrder_):
|
||||
return self.glyphOrder_[glyphID]
|
||||
else:
|
||||
return "glyph%.5d" % glyphID
|
||||
|
||||
def getGlyphNameMany(self, lst):
|
||||
return [self.getGlyphName(gid) for gid in lst]
|
||||
|
||||
def getGlyphOrder(self):
|
||||
return self.glyphOrder_
|
||||
|
||||
def getReverseGlyphMap(self):
|
||||
return self.reverseGlyphOrderDict_
|
||||
|
||||
def getGlyphNames(self):
|
||||
return sorted(self.getGlyphOrder())
|
||||
|
||||
|
||||
class TestXMLReader_(object):
|
||||
def __init__(self):
|
||||
from xml.parsers.expat import ParserCreate
|
||||
|
||||
self.parser = ParserCreate()
|
||||
self.parser.StartElementHandler = self.startElement_
|
||||
self.parser.EndElementHandler = self.endElement_
|
||||
self.parser.CharacterDataHandler = self.addCharacterData_
|
||||
self.root = None
|
||||
self.stack = []
|
||||
|
||||
def startElement_(self, name, attrs):
|
||||
element = (name, attrs, [])
|
||||
if self.stack:
|
||||
self.stack[-1][2].append(element)
|
||||
else:
|
||||
self.root = element
|
||||
self.stack.append(element)
|
||||
|
||||
def endElement_(self, name):
|
||||
self.stack.pop()
|
||||
|
||||
def addCharacterData_(self, data):
|
||||
self.stack[-1][2].append(data)
|
||||
|
||||
|
||||
def makeXMLWriter(newlinestr="\n"):
|
||||
# don't write OS-specific new lines
|
||||
writer = XMLWriter(BytesIO(), newlinestr=newlinestr)
|
||||
# erase XML declaration
|
||||
writer.file.seek(0)
|
||||
writer.file.truncate()
|
||||
return writer
|
||||
|
||||
|
||||
def getXML(func, ttFont=None):
|
||||
"""Call the passed toXML function and return the written content as a
|
||||
list of lines (unicode strings).
|
||||
Result is stripped of XML declaration and OS-specific newline characters.
|
||||
"""
|
||||
writer = makeXMLWriter()
|
||||
func(writer, ttFont)
|
||||
xml = writer.file.getvalue().decode("utf-8")
|
||||
# toXML methods must always end with a writer.newline()
|
||||
assert xml.endswith("\n")
|
||||
return xml.splitlines()
|
||||
|
||||
|
||||
def stripVariableItemsFromTTX(
|
||||
string: str,
|
||||
ttLibVersion: bool = True,
|
||||
checkSumAdjustment: bool = True,
|
||||
modified: bool = True,
|
||||
created: bool = True,
|
||||
sfntVersion: bool = False, # opt-in only
|
||||
) -> str:
|
||||
"""Strip stuff like ttLibVersion, checksums, timestamps, etc. from TTX dumps."""
|
||||
# ttlib changes with the fontTools version
|
||||
if ttLibVersion:
|
||||
string = re.sub(' ttLibVersion="[^"]+"', "", string)
|
||||
# sometimes (e.g. some subsetter tests) we don't care whether it's OTF or TTF
|
||||
if sfntVersion:
|
||||
string = re.sub(' sfntVersion="[^"]+"', "", string)
|
||||
# head table checksum and creation and mod date changes with each save.
|
||||
if checkSumAdjustment:
|
||||
string = re.sub('<checkSumAdjustment value="[^"]+"/>', "", string)
|
||||
if modified:
|
||||
string = re.sub('<modified value="[^"]+"/>', "", string)
|
||||
if created:
|
||||
string = re.sub('<created value="[^"]+"/>', "", string)
|
||||
return string
|
||||
|
||||
|
||||
class MockFont(object):
|
||||
"""A font-like object that automatically adds any looked up glyphname
|
||||
to its glyphOrder."""
|
||||
|
||||
def __init__(self):
|
||||
self._glyphOrder = [".notdef"]
|
||||
|
||||
class AllocatingDict(dict):
|
||||
def __missing__(reverseDict, key):
|
||||
self._glyphOrder.append(key)
|
||||
gid = len(reverseDict)
|
||||
reverseDict[key] = gid
|
||||
return gid
|
||||
|
||||
self._reverseGlyphOrder = AllocatingDict({".notdef": 0})
|
||||
self.lazy = False
|
||||
|
||||
def getGlyphID(self, glyph):
|
||||
gid = self._reverseGlyphOrder[glyph]
|
||||
return gid
|
||||
|
||||
def getReverseGlyphMap(self):
|
||||
return self._reverseGlyphOrder
|
||||
|
||||
def getGlyphName(self, gid):
|
||||
return self._glyphOrder[gid]
|
||||
|
||||
def getGlyphOrder(self):
|
||||
return self._glyphOrder
|
||||
|
||||
|
||||
class TestCase(_TestCase):
|
||||
def __init__(self, methodName):
|
||||
_TestCase.__init__(self, methodName)
|
||||
# Python 3 renamed assertRaisesRegexp to assertRaisesRegex,
|
||||
# and fires deprecation warnings if a program uses the old name.
|
||||
if not hasattr(self, "assertRaisesRegex"):
|
||||
self.assertRaisesRegex = self.assertRaisesRegexp
|
||||
|
||||
|
||||
class DataFilesHandler(TestCase):
|
||||
def setUp(self):
|
||||
self.tempdir = None
|
||||
self.num_tempfiles = 0
|
||||
|
||||
def tearDown(self):
|
||||
if self.tempdir:
|
||||
shutil.rmtree(self.tempdir)
|
||||
|
||||
def getpath(self, testfile):
|
||||
folder = os.path.dirname(sys.modules[self.__module__].__file__)
|
||||
return os.path.join(folder, "data", testfile)
|
||||
|
||||
def temp_dir(self):
|
||||
if not self.tempdir:
|
||||
self.tempdir = tempfile.mkdtemp()
|
||||
|
||||
def temp_font(self, font_path, file_name):
|
||||
self.temp_dir()
|
||||
temppath = os.path.join(self.tempdir, file_name)
|
||||
shutil.copy2(font_path, temppath)
|
||||
return temppath
|
||||
154
venv/lib/python3.12/site-packages/fontTools/misc/textTools.py
Normal file
154
venv/lib/python3.12/site-packages/fontTools/misc/textTools.py
Normal file
@ -0,0 +1,154 @@
|
||||
"""fontTools.misc.textTools.py -- miscellaneous routines."""
|
||||
|
||||
import ast
|
||||
import string
|
||||
|
||||
|
||||
# alias kept for backward compatibility
|
||||
safeEval = ast.literal_eval
|
||||
|
||||
|
||||
class Tag(str):
|
||||
@staticmethod
|
||||
def transcode(blob):
|
||||
if isinstance(blob, bytes):
|
||||
blob = blob.decode("latin-1")
|
||||
return blob
|
||||
|
||||
def __new__(self, content):
|
||||
return str.__new__(self, self.transcode(content))
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __eq__(self, other):
|
||||
return str.__eq__(self, self.transcode(other))
|
||||
|
||||
def __hash__(self):
|
||||
return str.__hash__(self)
|
||||
|
||||
def tobytes(self):
|
||||
return self.encode("latin-1")
|
||||
|
||||
|
||||
def readHex(content):
|
||||
"""Convert a list of hex strings to binary data."""
|
||||
return deHexStr(strjoin(chunk for chunk in content if isinstance(chunk, str)))
|
||||
|
||||
|
||||
def deHexStr(hexdata):
|
||||
"""Convert a hex string to binary data."""
|
||||
hexdata = strjoin(hexdata.split())
|
||||
if len(hexdata) % 2:
|
||||
hexdata = hexdata + "0"
|
||||
data = []
|
||||
for i in range(0, len(hexdata), 2):
|
||||
data.append(bytechr(int(hexdata[i : i + 2], 16)))
|
||||
return bytesjoin(data)
|
||||
|
||||
|
||||
def hexStr(data):
|
||||
"""Convert binary data to a hex string."""
|
||||
h = string.hexdigits
|
||||
r = ""
|
||||
for c in data:
|
||||
i = byteord(c)
|
||||
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
|
||||
return r
|
||||
|
||||
|
||||
def num2binary(l, bits=32):
|
||||
items = []
|
||||
binary = ""
|
||||
for i in range(bits):
|
||||
if l & 0x1:
|
||||
binary = "1" + binary
|
||||
else:
|
||||
binary = "0" + binary
|
||||
l = l >> 1
|
||||
if not ((i + 1) % 8):
|
||||
items.append(binary)
|
||||
binary = ""
|
||||
if binary:
|
||||
items.append(binary)
|
||||
items.reverse()
|
||||
assert l in (0, -1), "number doesn't fit in number of bits"
|
||||
return " ".join(items)
|
||||
|
||||
|
||||
def binary2num(bin):
|
||||
bin = strjoin(bin.split())
|
||||
l = 0
|
||||
for digit in bin:
|
||||
l = l << 1
|
||||
if digit != "0":
|
||||
l = l | 0x1
|
||||
return l
|
||||
|
||||
|
||||
def caselessSort(alist):
|
||||
"""Return a sorted copy of a list. If there are only strings
|
||||
in the list, it will not consider case.
|
||||
"""
|
||||
|
||||
try:
|
||||
return sorted(alist, key=lambda a: (a.lower(), a))
|
||||
except TypeError:
|
||||
return sorted(alist)
|
||||
|
||||
|
||||
def pad(data, size):
|
||||
r"""Pad byte string 'data' with null bytes until its length is a
|
||||
multiple of 'size'.
|
||||
|
||||
>>> len(pad(b'abcd', 4))
|
||||
4
|
||||
>>> len(pad(b'abcde', 2))
|
||||
6
|
||||
>>> len(pad(b'abcde', 4))
|
||||
8
|
||||
>>> pad(b'abcdef', 4) == b'abcdef\x00\x00'
|
||||
True
|
||||
"""
|
||||
data = tobytes(data)
|
||||
if size > 1:
|
||||
remainder = len(data) % size
|
||||
if remainder:
|
||||
data += b"\0" * (size - remainder)
|
||||
return data
|
||||
|
||||
|
||||
def tostr(s, encoding="ascii", errors="strict"):
|
||||
if not isinstance(s, str):
|
||||
return s.decode(encoding, errors)
|
||||
else:
|
||||
return s
|
||||
|
||||
|
||||
def tobytes(s, encoding="ascii", errors="strict"):
|
||||
if isinstance(s, str):
|
||||
return s.encode(encoding, errors)
|
||||
else:
|
||||
return bytes(s)
|
||||
|
||||
|
||||
def bytechr(n):
|
||||
return bytes([n])
|
||||
|
||||
|
||||
def byteord(c):
|
||||
return c if isinstance(c, int) else ord(c)
|
||||
|
||||
|
||||
def strjoin(iterable, joiner=""):
|
||||
return tostr(joiner).join(iterable)
|
||||
|
||||
|
||||
def bytesjoin(iterable, joiner=b""):
|
||||
return tobytes(joiner).join(tobytes(item) for item in iterable)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import doctest, sys
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
@ -0,0 +1,88 @@
|
||||
"""fontTools.misc.timeTools.py -- tools for working with OpenType timestamps.
|
||||
"""
|
||||
|
||||
import os
|
||||
import time
|
||||
from datetime import datetime, timezone
|
||||
import calendar
|
||||
|
||||
|
||||
epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))
|
||||
|
||||
DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
|
||||
MONTHNAMES = [
|
||||
None,
|
||||
"Jan",
|
||||
"Feb",
|
||||
"Mar",
|
||||
"Apr",
|
||||
"May",
|
||||
"Jun",
|
||||
"Jul",
|
||||
"Aug",
|
||||
"Sep",
|
||||
"Oct",
|
||||
"Nov",
|
||||
"Dec",
|
||||
]
|
||||
|
||||
|
||||
def asctime(t=None):
|
||||
"""
|
||||
Convert a tuple or struct_time representing a time as returned by gmtime()
|
||||
or localtime() to a 24-character string of the following form:
|
||||
|
||||
>>> asctime(time.gmtime(0))
|
||||
'Thu Jan 1 00:00:00 1970'
|
||||
|
||||
If t is not provided, the current time as returned by localtime() is used.
|
||||
Locale information is not used by asctime().
|
||||
|
||||
This is meant to normalise the output of the built-in time.asctime() across
|
||||
different platforms and Python versions.
|
||||
In Python 3.x, the day of the month is right-justified, whereas on Windows
|
||||
Python 2.7 it is padded with zeros.
|
||||
|
||||
See https://github.com/fonttools/fonttools/issues/455
|
||||
"""
|
||||
if t is None:
|
||||
t = time.localtime()
|
||||
s = "%s %s %2s %s" % (
|
||||
DAYNAMES[t.tm_wday],
|
||||
MONTHNAMES[t.tm_mon],
|
||||
t.tm_mday,
|
||||
time.strftime("%H:%M:%S %Y", t),
|
||||
)
|
||||
return s
|
||||
|
||||
|
||||
def timestampToString(value):
|
||||
return asctime(time.gmtime(max(0, value + epoch_diff)))
|
||||
|
||||
|
||||
def timestampFromString(value):
|
||||
wkday, mnth = value[:7].split()
|
||||
t = datetime.strptime(value[7:], " %d %H:%M:%S %Y")
|
||||
t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc)
|
||||
wkday_idx = DAYNAMES.index(wkday)
|
||||
assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday'
|
||||
return int(t.timestamp()) - epoch_diff
|
||||
|
||||
|
||||
def timestampNow():
|
||||
# https://reproducible-builds.org/specs/source-date-epoch/
|
||||
source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")
|
||||
if source_date_epoch is not None:
|
||||
return int(source_date_epoch) - epoch_diff
|
||||
return int(time.time() - epoch_diff)
|
||||
|
||||
|
||||
def timestampSinceEpoch(value):
|
||||
return int(value - epoch_diff)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import doctest
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
507
venv/lib/python3.12/site-packages/fontTools/misc/transform.py
Normal file
507
venv/lib/python3.12/site-packages/fontTools/misc/transform.py
Normal file
@ -0,0 +1,507 @@
|
||||
"""Affine 2D transformation matrix class.
|
||||
|
||||
The Transform class implements various transformation matrix operations,
|
||||
both on the matrix itself, as well as on 2D coordinates.
|
||||
|
||||
Transform instances are effectively immutable: all methods that operate on the
|
||||
transformation itself always return a new instance. This has as the
|
||||
interesting side effect that Transform instances are hashable, ie. they can be
|
||||
used as dictionary keys.
|
||||
|
||||
This module exports the following symbols:
|
||||
|
||||
Transform
|
||||
this is the main class
|
||||
Identity
|
||||
Transform instance set to the identity transformation
|
||||
Offset
|
||||
Convenience function that returns a translating transformation
|
||||
Scale
|
||||
Convenience function that returns a scaling transformation
|
||||
|
||||
The DecomposedTransform class implements a transformation with separate
|
||||
translate, rotation, scale, skew, and transformation-center components.
|
||||
|
||||
:Example:
|
||||
|
||||
>>> t = Transform(2, 0, 0, 3, 0, 0)
|
||||
>>> t.transformPoint((100, 100))
|
||||
(200, 300)
|
||||
>>> t = Scale(2, 3)
|
||||
>>> t.transformPoint((100, 100))
|
||||
(200, 300)
|
||||
>>> t.transformPoint((0, 0))
|
||||
(0, 0)
|
||||
>>> t = Offset(2, 3)
|
||||
>>> t.transformPoint((100, 100))
|
||||
(102, 103)
|
||||
>>> t.transformPoint((0, 0))
|
||||
(2, 3)
|
||||
>>> t2 = t.scale(0.5)
|
||||
>>> t2.transformPoint((100, 100))
|
||||
(52.0, 53.0)
|
||||
>>> import math
|
||||
>>> t3 = t2.rotate(math.pi / 2)
|
||||
>>> t3.transformPoint((0, 0))
|
||||
(2.0, 3.0)
|
||||
>>> t3.transformPoint((100, 100))
|
||||
(-48.0, 53.0)
|
||||
>>> t = Identity.scale(0.5).translate(100, 200).skew(0.1, 0.2)
|
||||
>>> t.transformPoints([(0, 0), (1, 1), (100, 100)])
|
||||
[(50.0, 100.0), (50.550167336042726, 100.60135501775433), (105.01673360427253, 160.13550177543362)]
|
||||
>>>
|
||||
"""
|
||||
|
||||
import math
|
||||
from typing import NamedTuple
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
__all__ = ["Transform", "Identity", "Offset", "Scale", "DecomposedTransform"]
|
||||
|
||||
|
||||
_EPSILON = 1e-15
|
||||
_ONE_EPSILON = 1 - _EPSILON
|
||||
_MINUS_ONE_EPSILON = -1 + _EPSILON
|
||||
|
||||
|
||||
def _normSinCos(v):
|
||||
if abs(v) < _EPSILON:
|
||||
v = 0
|
||||
elif v > _ONE_EPSILON:
|
||||
v = 1
|
||||
elif v < _MINUS_ONE_EPSILON:
|
||||
v = -1
|
||||
return v
|
||||
|
||||
|
||||
class Transform(NamedTuple):
|
||||
"""2x2 transformation matrix plus offset, a.k.a. Affine transform.
|
||||
Transform instances are immutable: all transforming methods, eg.
|
||||
rotate(), return a new Transform instance.
|
||||
|
||||
:Example:
|
||||
|
||||
>>> t = Transform()
|
||||
>>> t
|
||||
<Transform [1 0 0 1 0 0]>
|
||||
>>> t.scale(2)
|
||||
<Transform [2 0 0 2 0 0]>
|
||||
>>> t.scale(2.5, 5.5)
|
||||
<Transform [2.5 0 0 5.5 0 0]>
|
||||
>>>
|
||||
>>> t.scale(2, 3).transformPoint((100, 100))
|
||||
(200, 300)
|
||||
|
||||
Transform's constructor takes six arguments, all of which are
|
||||
optional, and can be used as keyword arguments::
|
||||
|
||||
>>> Transform(12)
|
||||
<Transform [12 0 0 1 0 0]>
|
||||
>>> Transform(dx=12)
|
||||
<Transform [1 0 0 1 12 0]>
|
||||
>>> Transform(yx=12)
|
||||
<Transform [1 0 12 1 0 0]>
|
||||
|
||||
Transform instances also behave like sequences of length 6::
|
||||
|
||||
>>> len(Identity)
|
||||
6
|
||||
>>> list(Identity)
|
||||
[1, 0, 0, 1, 0, 0]
|
||||
>>> tuple(Identity)
|
||||
(1, 0, 0, 1, 0, 0)
|
||||
|
||||
Transform instances are comparable::
|
||||
|
||||
>>> t1 = Identity.scale(2, 3).translate(4, 6)
|
||||
>>> t2 = Identity.translate(8, 18).scale(2, 3)
|
||||
>>> t1 == t2
|
||||
1
|
||||
|
||||
But beware of floating point rounding errors::
|
||||
|
||||
>>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
|
||||
>>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
|
||||
>>> t1
|
||||
<Transform [0.2 0 0 0.3 0.08 0.18]>
|
||||
>>> t2
|
||||
<Transform [0.2 0 0 0.3 0.08 0.18]>
|
||||
>>> t1 == t2
|
||||
0
|
||||
|
||||
Transform instances are hashable, meaning you can use them as
|
||||
keys in dictionaries::
|
||||
|
||||
>>> d = {Scale(12, 13): None}
|
||||
>>> d
|
||||
{<Transform [12 0 0 13 0 0]>: None}
|
||||
|
||||
But again, beware of floating point rounding errors::
|
||||
|
||||
>>> t1 = Identity.scale(0.2, 0.3).translate(0.4, 0.6)
|
||||
>>> t2 = Identity.translate(0.08, 0.18).scale(0.2, 0.3)
|
||||
>>> t1
|
||||
<Transform [0.2 0 0 0.3 0.08 0.18]>
|
||||
>>> t2
|
||||
<Transform [0.2 0 0 0.3 0.08 0.18]>
|
||||
>>> d = {t1: None}
|
||||
>>> d
|
||||
{<Transform [0.2 0 0 0.3 0.08 0.18]>: None}
|
||||
>>> d[t2]
|
||||
Traceback (most recent call last):
|
||||
File "<stdin>", line 1, in ?
|
||||
KeyError: <Transform [0.2 0 0 0.3 0.08 0.18]>
|
||||
"""
|
||||
|
||||
xx: float = 1
|
||||
xy: float = 0
|
||||
yx: float = 0
|
||||
yy: float = 1
|
||||
dx: float = 0
|
||||
dy: float = 0
|
||||
|
||||
def transformPoint(self, p):
|
||||
"""Transform a point.
|
||||
|
||||
:Example:
|
||||
|
||||
>>> t = Transform()
|
||||
>>> t = t.scale(2.5, 5.5)
|
||||
>>> t.transformPoint((100, 100))
|
||||
(250.0, 550.0)
|
||||
"""
|
||||
(x, y) = p
|
||||
xx, xy, yx, yy, dx, dy = self
|
||||
return (xx * x + yx * y + dx, xy * x + yy * y + dy)
|
||||
|
||||
def transformPoints(self, points):
|
||||
"""Transform a list of points.
|
||||
|
||||
:Example:
|
||||
|
||||
>>> t = Scale(2, 3)
|
||||
>>> t.transformPoints([(0, 0), (0, 100), (100, 100), (100, 0)])
|
||||
[(0, 0), (0, 300), (200, 300), (200, 0)]
|
||||
>>>
|
||||
"""
|
||||
xx, xy, yx, yy, dx, dy = self
|
||||
return [(xx * x + yx * y + dx, xy * x + yy * y + dy) for x, y in points]
|
||||
|
||||
def transformVector(self, v):
|
||||
"""Transform an (dx, dy) vector, treating translation as zero.
|
||||
|
||||
:Example:
|
||||
|
||||
>>> t = Transform(2, 0, 0, 2, 10, 20)
|
||||
>>> t.transformVector((3, -4))
|
||||
(6, -8)
|
||||
>>>
|
||||
"""
|
||||
(dx, dy) = v
|
||||
xx, xy, yx, yy = self[:4]
|
||||
return (xx * dx + yx * dy, xy * dx + yy * dy)
|
||||
|
||||
def transformVectors(self, vectors):
|
||||
"""Transform a list of (dx, dy) vector, treating translation as zero.
|
||||
|
||||
:Example:
|
||||
>>> t = Transform(2, 0, 0, 2, 10, 20)
|
||||
>>> t.transformVectors([(3, -4), (5, -6)])
|
||||
[(6, -8), (10, -12)]
|
||||
>>>
|
||||
"""
|
||||
xx, xy, yx, yy = self[:4]
|
||||
return [(xx * dx + yx * dy, xy * dx + yy * dy) for dx, dy in vectors]
|
||||
|
||||
def translate(self, x=0, y=0):
|
||||
"""Return a new transformation, translated (offset) by x, y.
|
||||
|
||||
:Example:
|
||||
>>> t = Transform()
|
||||
>>> t.translate(20, 30)
|
||||
<Transform [1 0 0 1 20 30]>
|
||||
>>>
|
||||
"""
|
||||
return self.transform((1, 0, 0, 1, x, y))
|
||||
|
||||
def scale(self, x=1, y=None):
|
||||
"""Return a new transformation, scaled by x, y. The 'y' argument
|
||||
may be None, which implies to use the x value for y as well.
|
||||
|
||||
:Example:
|
||||
>>> t = Transform()
|
||||
>>> t.scale(5)
|
||||
<Transform [5 0 0 5 0 0]>
|
||||
>>> t.scale(5, 6)
|
||||
<Transform [5 0 0 6 0 0]>
|
||||
>>>
|
||||
"""
|
||||
if y is None:
|
||||
y = x
|
||||
return self.transform((x, 0, 0, y, 0, 0))
|
||||
|
||||
def rotate(self, angle):
|
||||
"""Return a new transformation, rotated by 'angle' (radians).
|
||||
|
||||
:Example:
|
||||
>>> import math
|
||||
>>> t = Transform()
|
||||
>>> t.rotate(math.pi / 2)
|
||||
<Transform [0 1 -1 0 0 0]>
|
||||
>>>
|
||||
"""
|
||||
import math
|
||||
|
||||
c = _normSinCos(math.cos(angle))
|
||||
s = _normSinCos(math.sin(angle))
|
||||
return self.transform((c, s, -s, c, 0, 0))
|
||||
|
||||
def skew(self, x=0, y=0):
|
||||
"""Return a new transformation, skewed by x and y.
|
||||
|
||||
:Example:
|
||||
>>> import math
|
||||
>>> t = Transform()
|
||||
>>> t.skew(math.pi / 4)
|
||||
<Transform [1 0 1 1 0 0]>
|
||||
>>>
|
||||
"""
|
||||
import math
|
||||
|
||||
return self.transform((1, math.tan(y), math.tan(x), 1, 0, 0))
|
||||
|
||||
def transform(self, other):
|
||||
"""Return a new transformation, transformed by another
|
||||
transformation.
|
||||
|
||||
:Example:
|
||||
>>> t = Transform(2, 0, 0, 3, 1, 6)
|
||||
>>> t.transform((4, 3, 2, 1, 5, 6))
|
||||
<Transform [8 9 4 3 11 24]>
|
||||
>>>
|
||||
"""
|
||||
xx1, xy1, yx1, yy1, dx1, dy1 = other
|
||||
xx2, xy2, yx2, yy2, dx2, dy2 = self
|
||||
return self.__class__(
|
||||
xx1 * xx2 + xy1 * yx2,
|
||||
xx1 * xy2 + xy1 * yy2,
|
||||
yx1 * xx2 + yy1 * yx2,
|
||||
yx1 * xy2 + yy1 * yy2,
|
||||
xx2 * dx1 + yx2 * dy1 + dx2,
|
||||
xy2 * dx1 + yy2 * dy1 + dy2,
|
||||
)
|
||||
|
||||
def reverseTransform(self, other):
|
||||
"""Return a new transformation, which is the other transformation
|
||||
transformed by self. self.reverseTransform(other) is equivalent to
|
||||
other.transform(self).
|
||||
|
||||
:Example:
|
||||
>>> t = Transform(2, 0, 0, 3, 1, 6)
|
||||
>>> t.reverseTransform((4, 3, 2, 1, 5, 6))
|
||||
<Transform [8 6 6 3 21 15]>
|
||||
>>> Transform(4, 3, 2, 1, 5, 6).transform((2, 0, 0, 3, 1, 6))
|
||||
<Transform [8 6 6 3 21 15]>
|
||||
>>>
|
||||
"""
|
||||
xx1, xy1, yx1, yy1, dx1, dy1 = self
|
||||
xx2, xy2, yx2, yy2, dx2, dy2 = other
|
||||
return self.__class__(
|
||||
xx1 * xx2 + xy1 * yx2,
|
||||
xx1 * xy2 + xy1 * yy2,
|
||||
yx1 * xx2 + yy1 * yx2,
|
||||
yx1 * xy2 + yy1 * yy2,
|
||||
xx2 * dx1 + yx2 * dy1 + dx2,
|
||||
xy2 * dx1 + yy2 * dy1 + dy2,
|
||||
)
|
||||
|
||||
def inverse(self):
|
||||
"""Return the inverse transformation.
|
||||
|
||||
:Example:
|
||||
>>> t = Identity.translate(2, 3).scale(4, 5)
|
||||
>>> t.transformPoint((10, 20))
|
||||
(42, 103)
|
||||
>>> it = t.inverse()
|
||||
>>> it.transformPoint((42, 103))
|
||||
(10.0, 20.0)
|
||||
>>>
|
||||
"""
|
||||
if self == Identity:
|
||||
return self
|
||||
xx, xy, yx, yy, dx, dy = self
|
||||
det = xx * yy - yx * xy
|
||||
xx, xy, yx, yy = yy / det, -xy / det, -yx / det, xx / det
|
||||
dx, dy = -xx * dx - yx * dy, -xy * dx - yy * dy
|
||||
return self.__class__(xx, xy, yx, yy, dx, dy)
|
||||
|
||||
def toPS(self):
|
||||
"""Return a PostScript representation
|
||||
|
||||
:Example:
|
||||
|
||||
>>> t = Identity.scale(2, 3).translate(4, 5)
|
||||
>>> t.toPS()
|
||||
'[2 0 0 3 8 15]'
|
||||
>>>
|
||||
"""
|
||||
return "[%s %s %s %s %s %s]" % self
|
||||
|
||||
def toDecomposed(self) -> "DecomposedTransform":
|
||||
"""Decompose into a DecomposedTransform."""
|
||||
return DecomposedTransform.fromTransform(self)
|
||||
|
||||
def __bool__(self):
|
||||
"""Returns True if transform is not identity, False otherwise.
|
||||
|
||||
:Example:
|
||||
|
||||
>>> bool(Identity)
|
||||
False
|
||||
>>> bool(Transform())
|
||||
False
|
||||
>>> bool(Scale(1.))
|
||||
False
|
||||
>>> bool(Scale(2))
|
||||
True
|
||||
>>> bool(Offset())
|
||||
False
|
||||
>>> bool(Offset(0))
|
||||
False
|
||||
>>> bool(Offset(2))
|
||||
True
|
||||
"""
|
||||
return self != Identity
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s [%g %g %g %g %g %g]>" % ((self.__class__.__name__,) + self)
|
||||
|
||||
|
||||
Identity = Transform()
|
||||
|
||||
|
||||
def Offset(x=0, y=0):
|
||||
"""Return the identity transformation offset by x, y.
|
||||
|
||||
:Example:
|
||||
>>> Offset(2, 3)
|
||||
<Transform [1 0 0 1 2 3]>
|
||||
>>>
|
||||
"""
|
||||
return Transform(1, 0, 0, 1, x, y)
|
||||
|
||||
|
||||
def Scale(x, y=None):
|
||||
"""Return the identity transformation scaled by x, y. The 'y' argument
|
||||
may be None, which implies to use the x value for y as well.
|
||||
|
||||
:Example:
|
||||
>>> Scale(2, 3)
|
||||
<Transform [2 0 0 3 0 0]>
|
||||
>>>
|
||||
"""
|
||||
if y is None:
|
||||
y = x
|
||||
return Transform(x, 0, 0, y, 0, 0)
|
||||
|
||||
|
||||
@dataclass
|
||||
class DecomposedTransform:
|
||||
"""The DecomposedTransform class implements a transformation with separate
|
||||
translate, rotation, scale, skew, and transformation-center components.
|
||||
"""
|
||||
|
||||
translateX: float = 0
|
||||
translateY: float = 0
|
||||
rotation: float = 0 # in degrees, counter-clockwise
|
||||
scaleX: float = 1
|
||||
scaleY: float = 1
|
||||
skewX: float = 0 # in degrees, clockwise
|
||||
skewY: float = 0 # in degrees, counter-clockwise
|
||||
tCenterX: float = 0
|
||||
tCenterY: float = 0
|
||||
|
||||
def __bool__(self):
|
||||
return (
|
||||
self.translateX != 0
|
||||
or self.translateY != 0
|
||||
or self.rotation != 0
|
||||
or self.scaleX != 1
|
||||
or self.scaleY != 1
|
||||
or self.skewX != 0
|
||||
or self.skewY != 0
|
||||
or self.tCenterX != 0
|
||||
or self.tCenterY != 0
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def fromTransform(self, transform):
|
||||
# Adapted from an answer on
|
||||
# https://math.stackexchange.com/questions/13150/extracting-rotation-scale-values-from-2d-transformation-matrix
|
||||
a, b, c, d, x, y = transform
|
||||
|
||||
sx = math.copysign(1, a)
|
||||
if sx < 0:
|
||||
a *= sx
|
||||
b *= sx
|
||||
|
||||
delta = a * d - b * c
|
||||
|
||||
rotation = 0
|
||||
scaleX = scaleY = 0
|
||||
skewX = skewY = 0
|
||||
|
||||
# Apply the QR-like decomposition.
|
||||
if a != 0 or b != 0:
|
||||
r = math.sqrt(a * a + b * b)
|
||||
rotation = math.acos(a / r) if b >= 0 else -math.acos(a / r)
|
||||
scaleX, scaleY = (r, delta / r)
|
||||
skewX, skewY = (math.atan((a * c + b * d) / (r * r)), 0)
|
||||
elif c != 0 or d != 0:
|
||||
s = math.sqrt(c * c + d * d)
|
||||
rotation = math.pi / 2 - (
|
||||
math.acos(-c / s) if d >= 0 else -math.acos(c / s)
|
||||
)
|
||||
scaleX, scaleY = (delta / s, s)
|
||||
skewX, skewY = (0, math.atan((a * c + b * d) / (s * s)))
|
||||
else:
|
||||
# a = b = c = d = 0
|
||||
pass
|
||||
|
||||
return DecomposedTransform(
|
||||
x,
|
||||
y,
|
||||
math.degrees(rotation),
|
||||
scaleX * sx,
|
||||
scaleY,
|
||||
math.degrees(skewX) * sx,
|
||||
math.degrees(skewY),
|
||||
0,
|
||||
0,
|
||||
)
|
||||
|
||||
def toTransform(self):
|
||||
"""Return the Transform() equivalent of this transformation.
|
||||
|
||||
:Example:
|
||||
>>> DecomposedTransform(scaleX=2, scaleY=2).toTransform()
|
||||
<Transform [2 0 0 2 0 0]>
|
||||
>>>
|
||||
"""
|
||||
t = Transform()
|
||||
t = t.translate(
|
||||
self.translateX + self.tCenterX, self.translateY + self.tCenterY
|
||||
)
|
||||
t = t.rotate(math.radians(self.rotation))
|
||||
t = t.scale(self.scaleX, self.scaleY)
|
||||
t = t.skew(math.radians(self.skewX), math.radians(self.skewY))
|
||||
t = t.translate(-self.tCenterX, -self.tCenterY)
|
||||
return t
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
import sys
|
||||
import doctest
|
||||
|
||||
sys.exit(doctest.testmod().failed)
|
||||
@ -0,0 +1,45 @@
|
||||
"""Generic tools for working with trees."""
|
||||
|
||||
from math import ceil, log
|
||||
|
||||
|
||||
def build_n_ary_tree(leaves, n):
|
||||
"""Build N-ary tree from sequence of leaf nodes.
|
||||
|
||||
Return a list of lists where each non-leaf node is a list containing
|
||||
max n nodes.
|
||||
"""
|
||||
if not leaves:
|
||||
return []
|
||||
|
||||
assert n > 1
|
||||
|
||||
depth = ceil(log(len(leaves), n))
|
||||
|
||||
if depth <= 1:
|
||||
return list(leaves)
|
||||
|
||||
# Fully populate complete subtrees of root until we have enough leaves left
|
||||
root = []
|
||||
unassigned = None
|
||||
full_step = n ** (depth - 1)
|
||||
for i in range(0, len(leaves), full_step):
|
||||
subtree = leaves[i : i + full_step]
|
||||
if len(subtree) < full_step:
|
||||
unassigned = subtree
|
||||
break
|
||||
while len(subtree) > n:
|
||||
subtree = [subtree[k : k + n] for k in range(0, len(subtree), n)]
|
||||
root.append(subtree)
|
||||
|
||||
if unassigned:
|
||||
# Recurse to fill the last subtree, which is the only partially populated one
|
||||
subtree = build_n_ary_tree(unassigned, n)
|
||||
if len(subtree) <= n - len(root):
|
||||
# replace last subtree with its children if they can still fit
|
||||
root.extend(subtree)
|
||||
else:
|
||||
root.append(subtree)
|
||||
assert len(root) <= n
|
||||
|
||||
return root
|
||||
147
venv/lib/python3.12/site-packages/fontTools/misc/vector.py
Normal file
147
venv/lib/python3.12/site-packages/fontTools/misc/vector.py
Normal file
@ -0,0 +1,147 @@
|
||||
from numbers import Number
|
||||
import math
|
||||
import operator
|
||||
import warnings
|
||||
|
||||
|
||||
__all__ = ["Vector"]
|
||||
|
||||
|
||||
class Vector(tuple):
|
||||
"""A math-like vector.
|
||||
|
||||
Represents an n-dimensional numeric vector. ``Vector`` objects support
|
||||
vector addition and subtraction, scalar multiplication and division,
|
||||
negation, rounding, and comparison tests.
|
||||
"""
|
||||
|
||||
__slots__ = ()
|
||||
|
||||
def __new__(cls, values, keep=False):
|
||||
if keep is not False:
|
||||
warnings.warn(
|
||||
"the 'keep' argument has been deprecated",
|
||||
DeprecationWarning,
|
||||
)
|
||||
if type(values) == Vector:
|
||||
# No need to create a new object
|
||||
return values
|
||||
return super().__new__(cls, values)
|
||||
|
||||
def __repr__(self):
|
||||
return f"{self.__class__.__name__}({super().__repr__()})"
|
||||
|
||||
def _vectorOp(self, other, op):
|
||||
if isinstance(other, Vector):
|
||||
assert len(self) == len(other)
|
||||
return self.__class__(op(a, b) for a, b in zip(self, other))
|
||||
if isinstance(other, Number):
|
||||
return self.__class__(op(v, other) for v in self)
|
||||
raise NotImplementedError()
|
||||
|
||||
def _scalarOp(self, other, op):
|
||||
if isinstance(other, Number):
|
||||
return self.__class__(op(v, other) for v in self)
|
||||
raise NotImplementedError()
|
||||
|
||||
def _unaryOp(self, op):
|
||||
return self.__class__(op(v) for v in self)
|
||||
|
||||
def __add__(self, other):
|
||||
return self._vectorOp(other, operator.add)
|
||||
|
||||
__radd__ = __add__
|
||||
|
||||
def __sub__(self, other):
|
||||
return self._vectorOp(other, operator.sub)
|
||||
|
||||
def __rsub__(self, other):
|
||||
return self._vectorOp(other, _operator_rsub)
|
||||
|
||||
def __mul__(self, other):
|
||||
return self._scalarOp(other, operator.mul)
|
||||
|
||||
__rmul__ = __mul__
|
||||
|
||||
def __truediv__(self, other):
|
||||
return self._scalarOp(other, operator.truediv)
|
||||
|
||||
def __rtruediv__(self, other):
|
||||
return self._scalarOp(other, _operator_rtruediv)
|
||||
|
||||
def __pos__(self):
|
||||
return self._unaryOp(operator.pos)
|
||||
|
||||
def __neg__(self):
|
||||
return self._unaryOp(operator.neg)
|
||||
|
||||
def __round__(self, *, round=round):
|
||||
return self._unaryOp(round)
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, list):
|
||||
# bw compat Vector([1, 2, 3]) == [1, 2, 3]
|
||||
other = tuple(other)
|
||||
return super().__eq__(other)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self.__eq__(other)
|
||||
|
||||
def __bool__(self):
|
||||
return any(self)
|
||||
|
||||
__nonzero__ = __bool__
|
||||
|
||||
def __abs__(self):
|
||||
return math.sqrt(sum(x * x for x in self))
|
||||
|
||||
def length(self):
|
||||
"""Return the length of the vector. Equivalent to abs(vector)."""
|
||||
return abs(self)
|
||||
|
||||
def normalized(self):
|
||||
"""Return the normalized vector of the vector."""
|
||||
return self / abs(self)
|
||||
|
||||
def dot(self, other):
|
||||
"""Performs vector dot product, returning the sum of
|
||||
``a[0] * b[0], a[1] * b[1], ...``"""
|
||||
assert len(self) == len(other)
|
||||
return sum(a * b for a, b in zip(self, other))
|
||||
|
||||
# Deprecated methods/properties
|
||||
|
||||
def toInt(self):
|
||||
warnings.warn(
|
||||
"the 'toInt' method has been deprecated, use round(vector) instead",
|
||||
DeprecationWarning,
|
||||
)
|
||||
return self.__round__()
|
||||
|
||||
@property
|
||||
def values(self):
|
||||
warnings.warn(
|
||||
"the 'values' attribute has been deprecated, use "
|
||||
"the vector object itself instead",
|
||||
DeprecationWarning,
|
||||
)
|
||||
return list(self)
|
||||
|
||||
@values.setter
|
||||
def values(self, values):
|
||||
raise AttributeError(
|
||||
"can't set attribute, the 'values' attribute has been deprecated",
|
||||
)
|
||||
|
||||
def isclose(self, other: "Vector", **kwargs) -> bool:
|
||||
"""Return True if the vector is close to another Vector."""
|
||||
assert len(self) == len(other)
|
||||
return all(math.isclose(a, b, **kwargs) for a, b in zip(self, other))
|
||||
|
||||
|
||||
def _operator_rsub(a, b):
|
||||
return operator.sub(b, a)
|
||||
|
||||
|
||||
def _operator_rtruediv(a, b):
|
||||
return operator.truediv(b, a)
|
||||
142
venv/lib/python3.12/site-packages/fontTools/misc/visitor.py
Normal file
142
venv/lib/python3.12/site-packages/fontTools/misc/visitor.py
Normal file
@ -0,0 +1,142 @@
|
||||
"""Generic visitor pattern implementation for Python objects."""
|
||||
|
||||
import enum
|
||||
|
||||
|
||||
class Visitor(object):
|
||||
defaultStop = False
|
||||
|
||||
@classmethod
|
||||
def _register(celf, clazzes_attrs):
|
||||
assert celf != Visitor, "Subclass Visitor instead."
|
||||
if "_visitors" not in celf.__dict__:
|
||||
celf._visitors = {}
|
||||
|
||||
def wrapper(method):
|
||||
assert method.__name__ == "visit"
|
||||
for clazzes, attrs in clazzes_attrs:
|
||||
if type(clazzes) != tuple:
|
||||
clazzes = (clazzes,)
|
||||
if type(attrs) == str:
|
||||
attrs = (attrs,)
|
||||
for clazz in clazzes:
|
||||
_visitors = celf._visitors.setdefault(clazz, {})
|
||||
for attr in attrs:
|
||||
assert attr not in _visitors, (
|
||||
"Oops, class '%s' has visitor function for '%s' defined already."
|
||||
% (clazz.__name__, attr)
|
||||
)
|
||||
_visitors[attr] = method
|
||||
return None
|
||||
|
||||
return wrapper
|
||||
|
||||
@classmethod
|
||||
def register(celf, clazzes):
|
||||
if type(clazzes) != tuple:
|
||||
clazzes = (clazzes,)
|
||||
return celf._register([(clazzes, (None,))])
|
||||
|
||||
@classmethod
|
||||
def register_attr(celf, clazzes, attrs):
|
||||
clazzes_attrs = []
|
||||
if type(clazzes) != tuple:
|
||||
clazzes = (clazzes,)
|
||||
if type(attrs) == str:
|
||||
attrs = (attrs,)
|
||||
for clazz in clazzes:
|
||||
clazzes_attrs.append((clazz, attrs))
|
||||
return celf._register(clazzes_attrs)
|
||||
|
||||
@classmethod
|
||||
def register_attrs(celf, clazzes_attrs):
|
||||
return celf._register(clazzes_attrs)
|
||||
|
||||
@classmethod
|
||||
def _visitorsFor(celf, thing, _default={}):
|
||||
typ = type(thing)
|
||||
|
||||
for celf in celf.mro():
|
||||
_visitors = getattr(celf, "_visitors", None)
|
||||
if _visitors is None:
|
||||
break
|
||||
|
||||
for base in typ.mro():
|
||||
m = celf._visitors.get(base, None)
|
||||
if m is not None:
|
||||
return m
|
||||
|
||||
return _default
|
||||
|
||||
def visitObject(self, obj, *args, **kwargs):
|
||||
"""Called to visit an object. This function loops over all non-private
|
||||
attributes of the objects and calls any user-registered (via
|
||||
@register_attr() or @register_attrs()) visit() functions.
|
||||
|
||||
If there is no user-registered visit function, of if there is and it
|
||||
returns True, or it returns None (or doesn't return anything) and
|
||||
visitor.defaultStop is False (default), then the visitor will proceed
|
||||
to call self.visitAttr()"""
|
||||
|
||||
keys = sorted(vars(obj).keys())
|
||||
_visitors = self._visitorsFor(obj)
|
||||
defaultVisitor = _visitors.get("*", None)
|
||||
for key in keys:
|
||||
if key[0] == "_":
|
||||
continue
|
||||
value = getattr(obj, key)
|
||||
visitorFunc = _visitors.get(key, defaultVisitor)
|
||||
if visitorFunc is not None:
|
||||
ret = visitorFunc(self, obj, key, value, *args, **kwargs)
|
||||
if ret == False or (ret is None and self.defaultStop):
|
||||
continue
|
||||
self.visitAttr(obj, key, value, *args, **kwargs)
|
||||
|
||||
def visitAttr(self, obj, attr, value, *args, **kwargs):
|
||||
"""Called to visit an attribute of an object."""
|
||||
self.visit(value, *args, **kwargs)
|
||||
|
||||
def visitList(self, obj, *args, **kwargs):
|
||||
"""Called to visit any value that is a list."""
|
||||
for value in obj:
|
||||
self.visit(value, *args, **kwargs)
|
||||
|
||||
def visitDict(self, obj, *args, **kwargs):
|
||||
"""Called to visit any value that is a dictionary."""
|
||||
for value in obj.values():
|
||||
self.visit(value, *args, **kwargs)
|
||||
|
||||
def visitLeaf(self, obj, *args, **kwargs):
|
||||
"""Called to visit any value that is not an object, list,
|
||||
or dictionary."""
|
||||
pass
|
||||
|
||||
def visit(self, obj, *args, **kwargs):
|
||||
"""This is the main entry to the visitor. The visitor will visit object
|
||||
obj.
|
||||
|
||||
The visitor will first determine if there is a registered (via
|
||||
@register()) visit function for the type of object. If there is, it
|
||||
will be called, and (visitor, obj, *args, **kwargs) will be passed to
|
||||
the user visit function.
|
||||
|
||||
If there is no user-registered visit function, of if there is and it
|
||||
returns True, or it returns None (or doesn't return anything) and
|
||||
visitor.defaultStop is False (default), then the visitor will proceed
|
||||
to dispatch to one of self.visitObject(), self.visitList(),
|
||||
self.visitDict(), or self.visitLeaf() (any of which can be overriden in
|
||||
a subclass)."""
|
||||
|
||||
visitorFunc = self._visitorsFor(obj).get(None, None)
|
||||
if visitorFunc is not None:
|
||||
ret = visitorFunc(self, obj, *args, **kwargs)
|
||||
if ret == False or (ret is None and self.defaultStop):
|
||||
return
|
||||
if hasattr(obj, "__dict__") and not isinstance(obj, enum.Enum):
|
||||
self.visitObject(obj, *args, **kwargs)
|
||||
elif isinstance(obj, list):
|
||||
self.visitList(obj, *args, **kwargs)
|
||||
elif isinstance(obj, dict):
|
||||
self.visitDict(obj, *args, **kwargs)
|
||||
else:
|
||||
self.visitLeaf(obj, *args, **kwargs)
|
||||
188
venv/lib/python3.12/site-packages/fontTools/misc/xmlReader.py
Normal file
188
venv/lib/python3.12/site-packages/fontTools/misc/xmlReader.py
Normal file
@ -0,0 +1,188 @@
|
||||
from fontTools import ttLib
|
||||
from fontTools.misc.textTools import safeEval
|
||||
from fontTools.ttLib.tables.DefaultTable import DefaultTable
|
||||
import sys
|
||||
import os
|
||||
import logging
|
||||
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class TTXParseError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
BUFSIZE = 0x4000
|
||||
|
||||
|
||||
class XMLReader(object):
|
||||
def __init__(
|
||||
self, fileOrPath, ttFont, progress=None, quiet=None, contentOnly=False
|
||||
):
|
||||
if fileOrPath == "-":
|
||||
fileOrPath = sys.stdin
|
||||
if not hasattr(fileOrPath, "read"):
|
||||
self.file = open(fileOrPath, "rb")
|
||||
self._closeStream = True
|
||||
else:
|
||||
# assume readable file object
|
||||
self.file = fileOrPath
|
||||
self._closeStream = False
|
||||
self.ttFont = ttFont
|
||||
self.progress = progress
|
||||
if quiet is not None:
|
||||
from fontTools.misc.loggingTools import deprecateArgument
|
||||
|
||||
deprecateArgument("quiet", "configure logging instead")
|
||||
self.quiet = quiet
|
||||
self.root = None
|
||||
self.contentStack = []
|
||||
self.contentOnly = contentOnly
|
||||
self.stackSize = 0
|
||||
|
||||
def read(self, rootless=False):
|
||||
if rootless:
|
||||
self.stackSize += 1
|
||||
if self.progress:
|
||||
self.file.seek(0, 2)
|
||||
fileSize = self.file.tell()
|
||||
self.progress.set(0, fileSize // 100 or 1)
|
||||
self.file.seek(0)
|
||||
self._parseFile(self.file)
|
||||
if self._closeStream:
|
||||
self.close()
|
||||
if rootless:
|
||||
self.stackSize -= 1
|
||||
|
||||
def close(self):
|
||||
self.file.close()
|
||||
|
||||
def _parseFile(self, file):
|
||||
from xml.parsers.expat import ParserCreate
|
||||
|
||||
parser = ParserCreate()
|
||||
parser.StartElementHandler = self._startElementHandler
|
||||
parser.EndElementHandler = self._endElementHandler
|
||||
parser.CharacterDataHandler = self._characterDataHandler
|
||||
|
||||
pos = 0
|
||||
while True:
|
||||
chunk = file.read(BUFSIZE)
|
||||
if not chunk:
|
||||
parser.Parse(chunk, 1)
|
||||
break
|
||||
pos = pos + len(chunk)
|
||||
if self.progress:
|
||||
self.progress.set(pos // 100)
|
||||
parser.Parse(chunk, 0)
|
||||
|
||||
def _startElementHandler(self, name, attrs):
|
||||
if self.stackSize == 1 and self.contentOnly:
|
||||
# We already know the table we're parsing, skip
|
||||
# parsing the table tag and continue to
|
||||
# stack '2' which begins parsing content
|
||||
self.contentStack.append([])
|
||||
self.stackSize = 2
|
||||
return
|
||||
stackSize = self.stackSize
|
||||
self.stackSize = stackSize + 1
|
||||
subFile = attrs.get("src")
|
||||
if subFile is not None:
|
||||
if hasattr(self.file, "name"):
|
||||
# if file has a name, get its parent directory
|
||||
dirname = os.path.dirname(self.file.name)
|
||||
else:
|
||||
# else fall back to using the current working directory
|
||||
dirname = os.getcwd()
|
||||
subFile = os.path.join(dirname, subFile)
|
||||
if not stackSize:
|
||||
if name != "ttFont":
|
||||
raise TTXParseError("illegal root tag: %s" % name)
|
||||
if self.ttFont.reader is None and not self.ttFont.tables:
|
||||
sfntVersion = attrs.get("sfntVersion")
|
||||
if sfntVersion is not None:
|
||||
if len(sfntVersion) != 4:
|
||||
sfntVersion = safeEval('"' + sfntVersion + '"')
|
||||
self.ttFont.sfntVersion = sfntVersion
|
||||
self.contentStack.append([])
|
||||
elif stackSize == 1:
|
||||
if subFile is not None:
|
||||
subReader = XMLReader(subFile, self.ttFont, self.progress)
|
||||
subReader.read()
|
||||
self.contentStack.append([])
|
||||
return
|
||||
tag = ttLib.xmlToTag(name)
|
||||
msg = "Parsing '%s' table..." % tag
|
||||
if self.progress:
|
||||
self.progress.setLabel(msg)
|
||||
log.info(msg)
|
||||
if tag == "GlyphOrder":
|
||||
tableClass = ttLib.GlyphOrder
|
||||
elif "ERROR" in attrs or ("raw" in attrs and safeEval(attrs["raw"])):
|
||||
tableClass = DefaultTable
|
||||
else:
|
||||
tableClass = ttLib.getTableClass(tag)
|
||||
if tableClass is None:
|
||||
tableClass = DefaultTable
|
||||
if tag == "loca" and tag in self.ttFont:
|
||||
# Special-case the 'loca' table as we need the
|
||||
# original if the 'glyf' table isn't recompiled.
|
||||
self.currentTable = self.ttFont[tag]
|
||||
else:
|
||||
self.currentTable = tableClass(tag)
|
||||
self.ttFont[tag] = self.currentTable
|
||||
self.contentStack.append([])
|
||||
elif stackSize == 2 and subFile is not None:
|
||||
subReader = XMLReader(subFile, self.ttFont, self.progress, contentOnly=True)
|
||||
subReader.read()
|
||||
self.contentStack.append([])
|
||||
self.root = subReader.root
|
||||
elif stackSize == 2:
|
||||
self.contentStack.append([])
|
||||
self.root = (name, attrs, self.contentStack[-1])
|
||||
else:
|
||||
l = []
|
||||
self.contentStack[-1].append((name, attrs, l))
|
||||
self.contentStack.append(l)
|
||||
|
||||
def _characterDataHandler(self, data):
|
||||
if self.stackSize > 1:
|
||||
# parser parses in chunks, so we may get multiple calls
|
||||
# for the same text node; thus we need to append the data
|
||||
# to the last item in the content stack:
|
||||
# https://github.com/fonttools/fonttools/issues/2614
|
||||
if (
|
||||
data != "\n"
|
||||
and self.contentStack[-1]
|
||||
and isinstance(self.contentStack[-1][-1], str)
|
||||
and self.contentStack[-1][-1] != "\n"
|
||||
):
|
||||
self.contentStack[-1][-1] += data
|
||||
else:
|
||||
self.contentStack[-1].append(data)
|
||||
|
||||
def _endElementHandler(self, name):
|
||||
self.stackSize = self.stackSize - 1
|
||||
del self.contentStack[-1]
|
||||
if not self.contentOnly:
|
||||
if self.stackSize == 1:
|
||||
self.root = None
|
||||
elif self.stackSize == 2:
|
||||
name, attrs, content = self.root
|
||||
self.currentTable.fromXML(name, attrs, content, self.ttFont)
|
||||
self.root = None
|
||||
|
||||
|
||||
class ProgressPrinter(object):
|
||||
def __init__(self, title, maxval=100):
|
||||
print(title)
|
||||
|
||||
def set(self, val, maxval=None):
|
||||
pass
|
||||
|
||||
def increment(self, val=1):
|
||||
pass
|
||||
|
||||
def setLabel(self, text):
|
||||
print(text)
|
||||
204
venv/lib/python3.12/site-packages/fontTools/misc/xmlWriter.py
Normal file
204
venv/lib/python3.12/site-packages/fontTools/misc/xmlWriter.py
Normal file
@ -0,0 +1,204 @@
|
||||
"""xmlWriter.py -- Simple XML authoring class"""
|
||||
|
||||
from fontTools.misc.textTools import byteord, strjoin, tobytes, tostr
|
||||
import sys
|
||||
import os
|
||||
import string
|
||||
|
||||
INDENT = " "
|
||||
|
||||
|
||||
class XMLWriter(object):
|
||||
def __init__(
|
||||
self,
|
||||
fileOrPath,
|
||||
indentwhite=INDENT,
|
||||
idlefunc=None,
|
||||
encoding="utf_8",
|
||||
newlinestr="\n",
|
||||
):
|
||||
if encoding.lower().replace("-", "").replace("_", "") != "utf8":
|
||||
raise Exception("Only UTF-8 encoding is supported.")
|
||||
if fileOrPath == "-":
|
||||
fileOrPath = sys.stdout
|
||||
if not hasattr(fileOrPath, "write"):
|
||||
self.filename = fileOrPath
|
||||
self.file = open(fileOrPath, "wb")
|
||||
self._closeStream = True
|
||||
else:
|
||||
self.filename = None
|
||||
# assume writable file object
|
||||
self.file = fileOrPath
|
||||
self._closeStream = False
|
||||
|
||||
# Figure out if writer expects bytes or unicodes
|
||||
try:
|
||||
# The bytes check should be first. See:
|
||||
# https://github.com/fonttools/fonttools/pull/233
|
||||
self.file.write(b"")
|
||||
self.totype = tobytes
|
||||
except TypeError:
|
||||
# This better not fail.
|
||||
self.file.write("")
|
||||
self.totype = tostr
|
||||
self.indentwhite = self.totype(indentwhite)
|
||||
if newlinestr is None:
|
||||
self.newlinestr = self.totype(os.linesep)
|
||||
else:
|
||||
self.newlinestr = self.totype(newlinestr)
|
||||
self.indentlevel = 0
|
||||
self.stack = []
|
||||
self.needindent = 1
|
||||
self.idlefunc = idlefunc
|
||||
self.idlecounter = 0
|
||||
self._writeraw('<?xml version="1.0" encoding="UTF-8"?>')
|
||||
self.newline()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exception_type, exception_value, traceback):
|
||||
self.close()
|
||||
|
||||
def close(self):
|
||||
if self._closeStream:
|
||||
self.file.close()
|
||||
|
||||
def write(self, string, indent=True):
|
||||
"""Writes text."""
|
||||
self._writeraw(escape(string), indent=indent)
|
||||
|
||||
def writecdata(self, string):
|
||||
"""Writes text in a CDATA section."""
|
||||
self._writeraw("<![CDATA[" + string + "]]>")
|
||||
|
||||
def write8bit(self, data, strip=False):
|
||||
"""Writes a bytes() sequence into the XML, escaping
|
||||
non-ASCII bytes. When this is read in xmlReader,
|
||||
the original bytes can be recovered by encoding to
|
||||
'latin-1'."""
|
||||
self._writeraw(escape8bit(data), strip=strip)
|
||||
|
||||
def write_noindent(self, string):
|
||||
"""Writes text without indentation."""
|
||||
self._writeraw(escape(string), indent=False)
|
||||
|
||||
def _writeraw(self, data, indent=True, strip=False):
|
||||
"""Writes bytes, possibly indented."""
|
||||
if indent and self.needindent:
|
||||
self.file.write(self.indentlevel * self.indentwhite)
|
||||
self.needindent = 0
|
||||
s = self.totype(data, encoding="utf_8")
|
||||
if strip:
|
||||
s = s.strip()
|
||||
self.file.write(s)
|
||||
|
||||
def newline(self):
|
||||
self.file.write(self.newlinestr)
|
||||
self.needindent = 1
|
||||
idlecounter = self.idlecounter
|
||||
if not idlecounter % 100 and self.idlefunc is not None:
|
||||
self.idlefunc()
|
||||
self.idlecounter = idlecounter + 1
|
||||
|
||||
def comment(self, data):
|
||||
data = escape(data)
|
||||
lines = data.split("\n")
|
||||
self._writeraw("<!-- " + lines[0])
|
||||
for line in lines[1:]:
|
||||
self.newline()
|
||||
self._writeraw(" " + line)
|
||||
self._writeraw(" -->")
|
||||
|
||||
def simpletag(self, _TAG_, *args, **kwargs):
|
||||
attrdata = self.stringifyattrs(*args, **kwargs)
|
||||
data = "<%s%s/>" % (_TAG_, attrdata)
|
||||
self._writeraw(data)
|
||||
|
||||
def begintag(self, _TAG_, *args, **kwargs):
|
||||
attrdata = self.stringifyattrs(*args, **kwargs)
|
||||
data = "<%s%s>" % (_TAG_, attrdata)
|
||||
self._writeraw(data)
|
||||
self.stack.append(_TAG_)
|
||||
self.indent()
|
||||
|
||||
def endtag(self, _TAG_):
|
||||
assert self.stack and self.stack[-1] == _TAG_, "nonmatching endtag"
|
||||
del self.stack[-1]
|
||||
self.dedent()
|
||||
data = "</%s>" % _TAG_
|
||||
self._writeraw(data)
|
||||
|
||||
def dumphex(self, data):
|
||||
linelength = 16
|
||||
hexlinelength = linelength * 2
|
||||
chunksize = 8
|
||||
for i in range(0, len(data), linelength):
|
||||
hexline = hexStr(data[i : i + linelength])
|
||||
line = ""
|
||||
white = ""
|
||||
for j in range(0, hexlinelength, chunksize):
|
||||
line = line + white + hexline[j : j + chunksize]
|
||||
white = " "
|
||||
self._writeraw(line)
|
||||
self.newline()
|
||||
|
||||
def indent(self):
|
||||
self.indentlevel = self.indentlevel + 1
|
||||
|
||||
def dedent(self):
|
||||
assert self.indentlevel > 0
|
||||
self.indentlevel = self.indentlevel - 1
|
||||
|
||||
def stringifyattrs(self, *args, **kwargs):
|
||||
if kwargs:
|
||||
assert not args
|
||||
attributes = sorted(kwargs.items())
|
||||
elif args:
|
||||
assert len(args) == 1
|
||||
attributes = args[0]
|
||||
else:
|
||||
return ""
|
||||
data = ""
|
||||
for attr, value in attributes:
|
||||
if not isinstance(value, (bytes, str)):
|
||||
value = str(value)
|
||||
data = data + ' %s="%s"' % (attr, escapeattr(value))
|
||||
return data
|
||||
|
||||
|
||||
def escape(data):
|
||||
data = tostr(data, "utf_8")
|
||||
data = data.replace("&", "&")
|
||||
data = data.replace("<", "<")
|
||||
data = data.replace(">", ">")
|
||||
data = data.replace("\r", " ")
|
||||
return data
|
||||
|
||||
|
||||
def escapeattr(data):
|
||||
data = escape(data)
|
||||
data = data.replace('"', """)
|
||||
return data
|
||||
|
||||
|
||||
def escape8bit(data):
|
||||
"""Input is Unicode string."""
|
||||
|
||||
def escapechar(c):
|
||||
n = ord(c)
|
||||
if 32 <= n <= 127 and c not in "<&>":
|
||||
return c
|
||||
else:
|
||||
return "&#" + repr(n) + ";"
|
||||
|
||||
return strjoin(map(escapechar, data.decode("latin-1")))
|
||||
|
||||
|
||||
def hexStr(s):
|
||||
h = string.hexdigits
|
||||
r = ""
|
||||
for c in s:
|
||||
i = byteord(c)
|
||||
r = r + h[(i >> 4) & 0xF] + h[i & 0xF]
|
||||
return r
|
||||
1402
venv/lib/python3.12/site-packages/fontTools/mtiLib/__init__.py
Normal file
1402
venv/lib/python3.12/site-packages/fontTools/mtiLib/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,5 @@
|
||||
import sys
|
||||
from fontTools.mtiLib import main
|
||||
|
||||
if __name__ == "__main__":
|
||||
sys.exit(main())
|
||||
@ -0,0 +1 @@
|
||||
"""OpenType Layout-related functionality."""
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user