Bug 1554306 - Update HarfBuzz to version 2.5.1. r=jfkthame

Differential Revision: https://phabricator.services.mozilla.com/D32776
This commit is contained in:
Ryan VanderMeulen
2019-06-03 15:11:49 +00:00
parent 2b35384c93
commit 3d23ed6510
169 changed files with 14710 additions and 6371 deletions

View File

@@ -1,11 +1,14 @@
Behdad Esfahbod Behdad Esfahbod
David Corbett
David Turner David Turner
Ebrahim Byagowi Ebrahim Byagowi
Garret Rieger
Jonathan Kew Jonathan Kew
Khaled Hosny Khaled Hosny
Lars Knoll Lars Knoll
Martin Hosken Martin Hosken
Owen Taylor Owen Taylor
Roderick Sheeter
Roozbeh Pournader Roozbeh Pournader
Simon Hausmann Simon Hausmann
Werner Lemberg Werner Lemberg

View File

@@ -2,7 +2,8 @@ HarfBuzz is licensed under the so-called "Old MIT" license. Details follow.
For parts of HarfBuzz that are licensed under different licenses see individual For parts of HarfBuzz that are licensed under different licenses see individual
files names COPYING in subdirectories where applicable. files names COPYING in subdirectories where applicable.
Copyright © 2010,2011,2012 Google, Inc. Copyright © 2010,2011,2012,2013,2014,2015,2016,2017,2018,2019 Google, Inc.
Copyright © 2019 Facebook, Inc.
Copyright © 2012 Mozilla Foundation Copyright © 2012 Mozilla Foundation
Copyright © 2011 Codethink Limited Copyright © 2011 Codethink Limited
Copyright © 2008,2010 Nokia Corporation and/or its subsidiary(-ies) Copyright © 2008,2010 Nokia Corporation and/or its subsidiary(-ies)

View File

@@ -9,12 +9,18 @@ SUBDIRS = src util test docs
EXTRA_DIST = \ EXTRA_DIST = \
autogen.sh \ autogen.sh \
harfbuzz.doap \ harfbuzz.doap \
README.md \
README.mingw.md \
README.python.md \ README.python.md \
README.wine.md \
BUILD.md \ BUILD.md \
RELEASING.md \ RELEASING.md \
TESTING.md \
CMakeLists.txt \ CMakeLists.txt \
replace-enum-strings.cmake \ replace-enum-strings.cmake \
mingw-configure.sh \
mingw-ldd.py \
mingw32.sh \
mingw64.sh \
$(NULL) $(NULL)
MAINTAINERCLEANFILES = \ MAINTAINERCLEANFILES = \
@@ -60,8 +66,6 @@ DISTCHECK_CONFIGURE_FLAGS = \
--enable-introspection \ --enable-introspection \
$(NULL) $(NULL)
# TODO: Copy infrastructure from cairo
# TAR_OPTIONS is not set as env var for 'make dist'. How to fix that? # TAR_OPTIONS is not set as env var for 'make dist'. How to fix that?
TAR_OPTIONS = --owner=0 --group=0 TAR_OPTIONS = --owner=0 --group=0
@@ -70,8 +74,7 @@ dist-hook: dist-clear-sticky-bits
dist-clear-sticky-bits: dist-clear-sticky-bits:
chmod -R a-s $(distdir) chmod -R a-s $(distdir)
tar_file = $(PACKAGE_TARNAME)-$(VERSION).tar.xz
tar_file = $(PACKAGE_TARNAME)-$(VERSION).tar.bz2
sha256_file = $(tar_file).sha256 sha256_file = $(tar_file).sha256
gpg_file = $(sha256_file).asc gpg_file = $(sha256_file).asc
$(sha256_file): $(tar_file) $(sha256_file): $(tar_file)
@@ -82,5 +85,18 @@ $(gpg_file): $(sha256_file)
release-files: $(tar_file) $(sha256_file) $(gpg_file) release-files: $(tar_file) $(sha256_file) $(gpg_file)
dist-win:
@case $(host_triplet) in *-w64-mingw32) ;; *) echo "Error: Requires mingw build. See README.mingw.md.">&2; exit 1 ;; esac
@DIR=$(PACKAGE_TARNAME)-$(VERSION)-win`case $(host_triplet) in i686-*) echo 32 ;; x86_64-*) echo 64 ;; esac`; \
$(RM) -r $$DIR; $(MKDIR_P) $$DIR || exit 1; \
cp util/.libs/hb-{shape,view,subset}.exe $$DIR && \
$(top_srcdir)/mingw-ldd.py $$DIR/hb-view.exe | grep -v 'not found' | cut -d '>' -f 2 | xargs cp -t $$DIR && \
cp src/.libs/libharfbuzz{,-subset}-0.dll $$DIR && \
chmod a+x $$DIR/*.{exe,dll} && \
$(STRIP) $$DIR/*.{exe,dll} && \
zip -r $$DIR.zip $$DIR && \
$(RM) -r $$DIR && \
echo "$$DIR.zip is ready."
-include $(top_srcdir)/git.mk -include $(top_srcdir)/git.mk

View File

@@ -1,3 +1,28 @@
Overview of changes leading to 2.5.1
Friday, May 31, 2019
====================================
- Fix build with various versions of Visual Studio.
- Improved documentation, thanks to Nathan Willis.
- Bugfix in subsetting glyf table.
- Improved scripts for cross-compiling for Windows using mingw.
- Rename HB_MATH_GLYPH_PART_FLAG_EXTENDER to HB_OT_MATH_GLYPH_PART_FLAG_EXTENDER.
A deprecated macro is added for backwards-compatibility.
Overview of changes leading to 2.5.0
Friday, May 24, 2019
====================================
- This release does not include much functional changes, but includes major internal
code-base changes. We now require C++11. Support for gcc 4.8 and earlier has been
dropped.
- New hb-config.hh facility for compiling smaller library for embedded and web usecases.
- New Unicode Character Databse implementation that is half the size of previously-used
UCDN.
- Subsetter improvements.
- Improved documentation, thanks to Nathan Willis.
- Misc shaping fixes.
Overview of changes leading to 2.4.0 Overview of changes leading to 2.4.0
Monday, March 25, 2019 Monday, March 25, 2019
==================================== ====================================

View File

@@ -1,7 +1,7 @@
This directory contains the HarfBuzz source from the upstream repo: This directory contains the HarfBuzz source from the upstream repo:
https://github.com/harfbuzz/harfbuzz https://github.com/harfbuzz/harfbuzz
Current version: 2.4.0 [commit d6fc1d49aa099104a889c96bc9087c21d8fc0960] Current version: 2.5.1 [commit 93c455567fe3d92a7efe65bf0e9ac2af794e2c4f]
UPDATING: UPDATING:

View File

@@ -13,6 +13,10 @@ For bug reports, mailing list, and other information please visit:
http://harfbuzz.org/ http://harfbuzz.org/
For license information, see the file COPYING. For license information, see [COPYING](COPYING).
For build information, see [BUILD.md](BUILD.md).
For test execution, see [TESTING.md](TESTING.md).
Documentation: https://harfbuzz.github.io Documentation: https://harfbuzz.github.io

View File

@@ -1,6 +1,6 @@
Bradley Grainger Bradley Grainger
Khaled Hosny
Kenichi Ishibashi Kenichi Ishibashi
Ivan Kuckir <https://photopea.com/>
Ryan Lortie Ryan Lortie
Jeff Muizelaar Jeff Muizelaar
suzuki toshiya suzuki toshiya

View File

@@ -1,6 +1,6 @@
AC_PREREQ([2.64]) AC_PREREQ([2.64])
AC_INIT([HarfBuzz], AC_INIT([HarfBuzz],
[2.4.0], [2.5.1],
[https://github.com/harfbuzz/harfbuzz/issues/new], [https://github.com/harfbuzz/harfbuzz/issues/new],
[harfbuzz], [harfbuzz],
[http://harfbuzz.org/]) [http://harfbuzz.org/])
@@ -9,7 +9,7 @@ AC_CONFIG_MACRO_DIR([m4])
AC_CONFIG_SRCDIR([src/harfbuzz.pc.in]) AC_CONFIG_SRCDIR([src/harfbuzz.pc.in])
AC_CONFIG_HEADERS([config.h]) AC_CONFIG_HEADERS([config.h])
AM_INIT_AUTOMAKE([1.13.0 gnits tar-ustar dist-bzip2 no-dist-gzip -Wall no-define color-tests -Wno-portability]) AM_INIT_AUTOMAKE([1.13.0 gnits tar-ustar dist-xz no-dist-gzip -Wall no-define color-tests -Wno-portability])
AM_SILENT_RULES([yes]) AM_SILENT_RULES([yes])
AX_CODE_COVERAGE AX_CODE_COVERAGE
@@ -23,7 +23,7 @@ AC_PROG_CC
AC_PROG_CC_C99 AC_PROG_CC_C99
AM_PROG_CC_C_O AM_PROG_CC_C_O
AC_PROG_CXX AC_PROG_CXX
dnl AX_CXX_COMPILE_STDCXX(11, noext, optional) AX_CXX_COMPILE_STDCXX(11,, optional)
AC_SYS_LARGEFILE AC_SYS_LARGEFILE
PKG_PROG_PKG_CONFIG([0.20]) PKG_PROG_PKG_CONFIG([0.20])
AM_MISSING_PROG([RAGEL], [ragel]) AM_MISSING_PROG([RAGEL], [ragel])
@@ -134,9 +134,7 @@ AC_MSG_RESULT([$hb_os_win32])
AM_CONDITIONAL(OS_WIN32, test "$hb_os_win32" = "yes") AM_CONDITIONAL(OS_WIN32, test "$hb_os_win32" = "yes")
have_pthread=false have_pthread=false
if test "$hb_os_win32" = no; then
AX_PTHREAD([have_pthread=true]) AX_PTHREAD([have_pthread=true])
fi
if $have_pthread; then if $have_pthread; then
AC_DEFINE(HAVE_PTHREAD, 1, [Have POSIX threads]) AC_DEFINE(HAVE_PTHREAD, 1, [Have POSIX threads])
fi fi
@@ -300,21 +298,6 @@ AM_CONDITIONAL(HAVE_ICU_BUILTIN, $have_icu && test "x$with_icu" = "xbuiltin")
dnl =========================================================================== dnl ===========================================================================
AC_ARG_WITH(ucdn,
[AS_HELP_STRING([--with-ucdn=@<:@yes/no@:>@],
[Use builtin UCDN library @<:@default=yes@:>@])],,
[with_ucdn=yes])
have_ucdn=false
if test "x$with_ucdn" = "xyes"; then
have_ucdn=true
fi
if $have_ucdn; then
AC_DEFINE(HAVE_UCDN, 1, [Have UCDN Unicode functions])
fi
AM_CONDITIONAL(HAVE_UCDN, $have_ucdn)
dnl ==========================================================================
AC_ARG_WITH(graphite2, AC_ARG_WITH(graphite2,
[AS_HELP_STRING([--with-graphite2=@<:@yes/no/auto@:>@], [AS_HELP_STRING([--with-graphite2=@<:@yes/no/auto@:>@],
[Use the graphite2 library @<:@default=no@:>@])],, [Use the graphite2 library @<:@default=no@:>@])],,
@@ -497,7 +480,6 @@ AC_CONFIG_FILES([
Makefile Makefile
src/Makefile src/Makefile
src/harfbuzz-config.cmake src/harfbuzz-config.cmake
src/hb-ucdn/Makefile
util/Makefile util/Makefile
test/Makefile test/Makefile
test/api/Makefile test/api/Makefile
@@ -525,7 +507,7 @@ AC_MSG_NOTICE([
Build configuration: Build configuration:
Unicode callbacks (you want at least one): Unicode callbacks (you want at least one):
Builtin (UCDN): ${have_ucdn} Builtin true
Glib: ${have_glib} Glib: ${have_glib}
ICU: ${have_icu} ICU: ${have_icu}

View File

@@ -87,17 +87,6 @@ HBSOURCES += $(HB_CORETEXT_sources)
HBHEADERS += $(HB_CORETEXT_headers) HBHEADERS += $(HB_CORETEXT_headers)
endif endif
if HAVE_UCDN
SUBDIRS += hb-ucdn
HBCFLAGS += -I$(srcdir)/hb-ucdn
HBLIBS += hb-ucdn/libhb-ucdn.la
HBSOURCES += $(HB_UCDN_sources)
hb-ucdn/libhb-ucdn.la: ucdn
ucdn:
@$(MAKE) $(AM_MAKEFLAGS) -C hb-ucdn
endif
DIST_SUBDIRS += hb-ucdn
BUILT_SOURCES += \ BUILT_SOURCES += \
hb-version.h hb-version.h
@@ -258,6 +247,7 @@ GENERATORS = \
gen-indic-table.py \ gen-indic-table.py \
gen-os2-unicode-ranges.py \ gen-os2-unicode-ranges.py \
gen-tag-table.py \ gen-tag-table.py \
gen-ucd-table.py \
gen-use-table.py \ gen-use-table.py \
gen-vowel-constraints.py \ gen-vowel-constraints.py \
$(NULL) $(NULL)
@@ -310,9 +300,9 @@ noinst_PROGRAMS = \
main \ main \
test \ test \
test-buffer-serialize \ test-buffer-serialize \
test-name-table \ test-ot-name \
test-size-params \ test-gpos-size-params \
test-would-substitute \ test-gsub-would-substitute \
$(NULL) $(NULL)
bin_PROGRAMS = bin_PROGRAMS =
@@ -328,17 +318,17 @@ test_buffer_serialize_SOURCES = test-buffer-serialize.cc
test_buffer_serialize_CPPFLAGS = $(HBCFLAGS) test_buffer_serialize_CPPFLAGS = $(HBCFLAGS)
test_buffer_serialize_LDADD = libharfbuzz.la $(HBLIBS) test_buffer_serialize_LDADD = libharfbuzz.la $(HBLIBS)
test_name_table_SOURCES = test-name-table.cc test_ot_name_SOURCES = test-ot-name.cc
test_name_table_CPPFLAGS = $(HBCFLAGS) test_ot_name_CPPFLAGS = $(HBCFLAGS)
test_name_table_LDADD = libharfbuzz.la $(HBLIBS) test_ot_name_LDADD = libharfbuzz.la $(HBLIBS)
test_size_params_SOURCES = test-size-params.cc test_gpos_size_params_SOURCES = test-gpos-size-params.cc
test_size_params_CPPFLAGS = $(HBCFLAGS) test_gpos_size_params_CPPFLAGS = $(HBCFLAGS)
test_size_params_LDADD = libharfbuzz.la $(HBLIBS) test_gpos_size_params_LDADD = libharfbuzz.la $(HBLIBS)
test_would_substitute_SOURCES = test-would-substitute.cc test_gsub_would_substitute_SOURCES = test-gsub-would-substitute.cc
test_would_substitute_CPPFLAGS = $(HBCFLAGS) $(FREETYPE_CFLAGS) test_gsub_would_substitute_CPPFLAGS = $(HBCFLAGS) $(FREETYPE_CFLAGS)
test_would_substitute_LDADD = libharfbuzz.la $(HBLIBS) $(FREETYPE_LIBS) test_gsub_would_substitute_LDADD = libharfbuzz.la $(HBLIBS) $(FREETYPE_LIBS)
if HAVE_FREETYPE if HAVE_FREETYPE
if HAVE_CAIRO_FT if HAVE_CAIRO_FT
@@ -384,16 +374,24 @@ dump_use_data_SOURCES = dump-use-data.cc hb-ot-shape-complex-use-table.cc
dump_use_data_CPPFLAGS = $(HBCFLAGS) dump_use_data_CPPFLAGS = $(HBCFLAGS)
dump_use_data_LDADD = libharfbuzz.la $(HBLIBS) dump_use_data_LDADD = libharfbuzz.la $(HBLIBS)
COMPILED_TESTS = test-iter test-ot-tag test-unicode-ranges COMPILED_TESTS = test-algs test-iter test-meta test-ot-tag test-unicode-ranges
COMPILED_TESTS_CPPFLAGS = $(HBCFLAGS) -DMAIN -UNDEBUG COMPILED_TESTS_CPPFLAGS = $(HBCFLAGS) -DMAIN -UNDEBUG
COMPILED_TESTS_LDADD = libharfbuzz.la $(HBLIBS) COMPILED_TESTS_LDADD = libharfbuzz.la $(HBLIBS)
check_PROGRAMS += $(COMPILED_TESTS) check_PROGRAMS += $(COMPILED_TESTS)
TESTS += $(COMPILED_TESTS) TESTS += $(COMPILED_TESTS)
test_algs_SOURCES = test-algs.cc hb-static.cc
test_algs_CPPFLAGS = $(COMPILED_TESTS_CPPFLAGS)
test_algs_LDADD = $(COMPILED_TESTS_LDADD)
test_iter_SOURCES = test-iter.cc hb-static.cc test_iter_SOURCES = test-iter.cc hb-static.cc
test_iter_CPPFLAGS = $(COMPILED_TESTS_CPPFLAGS) test_iter_CPPFLAGS = $(COMPILED_TESTS_CPPFLAGS)
test_iter_LDADD = $(COMPILED_TESTS_LDADD) test_iter_LDADD = $(COMPILED_TESTS_LDADD)
test_meta_SOURCES = test-meta.cc hb-static.cc
test_meta_CPPFLAGS = $(COMPILED_TESTS_CPPFLAGS)
test_meta_LDADD = $(COMPILED_TESTS_LDADD)
test_ot_tag_SOURCES = hb-ot-tag.cc test_ot_tag_SOURCES = hb-ot-tag.cc
test_ot_tag_CPPFLAGS = $(COMPILED_TESTS_CPPFLAGS) test_ot_tag_CPPFLAGS = $(COMPILED_TESTS_CPPFLAGS)
test_ot_tag_LDADD = $(COMPILED_TESTS_LDADD) test_ot_tag_LDADD = $(COMPILED_TESTS_LDADD)

View File

@@ -16,6 +16,7 @@ HB_BASE_sources = \
hb-aat-ltag-table.hh \ hb-aat-ltag-table.hh \
hb-aat-map.cc \ hb-aat-map.cc \
hb-aat-map.hh \ hb-aat-map.hh \
hb-algs.hh \
hb-array.hh \ hb-array.hh \
hb-atomic.hh \ hb-atomic.hh \
hb-blob.cc \ hb-blob.cc \
@@ -30,8 +31,9 @@ HB_BASE_sources = \
hb-cff1-interp-cs.hh \ hb-cff1-interp-cs.hh \
hb-cff2-interp-cs.hh \ hb-cff2-interp-cs.hh \
hb-common.cc \ hb-common.cc \
hb-config.hh \
hb-debug.hh \ hb-debug.hh \
hb-dsalgs.hh \ hb-dispatch.hh \
hb-face.cc \ hb-face.cc \
hb-face.hh \ hb-face.hh \
hb-font.cc \ hb-font.cc \
@@ -41,6 +43,7 @@ HB_BASE_sources = \
hb-machinery.hh \ hb-machinery.hh \
hb-map.cc \ hb-map.cc \
hb-map.hh \ hb-map.hh \
hb-meta.hh \
hb-mutex.hh \ hb-mutex.hh \
hb-null.hh \ hb-null.hh \
hb-object.hh \ hb-object.hh \
@@ -82,7 +85,7 @@ HB_BASE_sources = \
hb-ot-math-table.hh \ hb-ot-math-table.hh \
hb-ot-math.cc \ hb-ot-math.cc \
hb-ot-maxp-table.hh \ hb-ot-maxp-table.hh \
hb-ot-name-language.cc \ hb-ot-name-language-static.hh \
hb-ot-name-language.hh \ hb-ot-name-language.hh \
hb-ot-name-table.hh \ hb-ot-name-table.hh \
hb-ot-name.cc \ hb-ot-name.cc \
@@ -127,6 +130,9 @@ HB_BASE_sources = \
hb-ot-var-mvar-table.hh \ hb-ot-var-mvar-table.hh \
hb-ot-var.cc \ hb-ot-var.cc \
hb-ot-vorg-table.hh \ hb-ot-vorg-table.hh \
hb-pool.hh \
hb-sanitize.hh \
hb-serialize.hh \
hb-set-digest.hh \ hb-set-digest.hh \
hb-set.cc \ hb-set.cc \
hb-set.hh \ hb-set.hh \
@@ -139,6 +145,8 @@ HB_BASE_sources = \
hb-shaper.hh \ hb-shaper.hh \
hb-static.cc \ hb-static.cc \
hb-string-array.hh \ hb-string-array.hh \
hb-ucd-table.hh \
hb-ucd.cc \
hb-unicode-emoji-table.hh \ hb-unicode-emoji-table.hh \
hb-unicode.cc \ hb-unicode.cc \
hb-unicode.hh \ hb-unicode.hh \
@@ -218,9 +226,6 @@ HB_DIRECTWRITE_headers = hb-directwrite.h
HB_UNISCRIBE_sources = hb-uniscribe.cc HB_UNISCRIBE_sources = hb-uniscribe.cc
HB_UNISCRIBE_headers = hb-uniscribe.h HB_UNISCRIBE_headers = hb-uniscribe.h
# Additional supplemental sources
HB_UCDN_sources = hb-ucdn.cc
# Sources for libharfbuzz-gobject and libharfbuzz-icu # Sources for libharfbuzz-gobject and libharfbuzz-icu
HB_ICU_sources = hb-icu.cc HB_ICU_sources = hb-icu.cc
HB_ICU_headers = hb-icu.h HB_ICU_headers = hb-icu.h
@@ -236,9 +241,6 @@ HB_SUBSET_sources = \
hb-subset-cff1.hh \ hb-subset-cff1.hh \
hb-subset-cff2.cc \ hb-subset-cff2.cc \
hb-subset-cff2.hh \ hb-subset-cff2.hh \
hb-subset-glyf.cc \
hb-subset-glyf.hh \
hb-subset-glyf.hh \
hb-subset-input.cc \ hb-subset-input.cc \
hb-subset-input.hh \ hb-subset-input.hh \
hb-subset-plan.cc \ hb-subset-plan.cc \

View File

@@ -7,7 +7,7 @@ test -z "$srcdir" && srcdir=.
test -z "$libs" && libs=.libs test -z "$libs" && libs=.libs
stat=0 stat=0
IGNORED_SYMBOLS='_fini\|_init\|_fdata\|_ftext\|_fbss\|__bss_start\|__bss_start__\|__bss_end__\|_edata\|_end\|_bss_end__\|__end__\|__gcov_flush\|llvm_.*' IGNORED_SYMBOLS='_fini\|_init\|_fdata\|_ftext\|_fbss\|__bss_start\|__bss_start__\|__bss_end__\|_edata\|_end\|_bss_end__\|__end__\|__gcov_.*\|llvm_.*'
if which nm 2>/dev/null >/dev/null; then if which nm 2>/dev/null >/dev/null; then
: :

4
gfx/harfbuzz/src/gen-os2-unicode-ranges.py Normal file → Executable file
View File

@@ -1,8 +1,10 @@
#!/usr/bin/python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
# Generates the code for a sorted unicode range array as used in hb-ot-os2-unicode-ranges.hh # Generates the code for a sorted unicode range array as used in hb-ot-os2-unicode-ranges.hh
# Input is a tab seperated list of unicode ranges from the otspec # Input is a tab seperated list of unicode ranges from the otspec
# (https://docs.microsoft.com/en-us/typography/opentype/spec/os2#ulunicoderange1). # (https://docs.microsoft.com/en-us/typography/opentype/spec/os2#ur).
from __future__ import print_function, division, absolute_import from __future__ import print_function, division, absolute_import

View File

@@ -895,20 +895,18 @@ def language_name_intersection (a, b):
def get_matching_language_name (intersection, candidates): def get_matching_language_name (intersection, candidates):
return next (iter (c for c in candidates if not intersection.isdisjoint (get_variant_set (c)))) return next (iter (c for c in candidates if not intersection.isdisjoint (get_variant_set (c))))
maximum_tags = 0 def same_tag (bcp_47_tag, ot_tags):
return len (bcp_47_tag) == 3 and len (ot_tags) == 1 and bcp_47_tag == ot_tags[0].lower ()
for language, tags in sorted (ot.from_bcp_47.items ()): for language, tags in sorted (ot.from_bcp_47.items ()):
if language == '' or '-' in language: if language == '' or '-' in language:
continue continue
print (' {\"%s\",\t{' % language, end='') commented_out = same_tag (language, tags)
maximum_tags = max (maximum_tags, len (tags))
tag_count = len (tags)
for i, tag in enumerate (tags, start=1): for i, tag in enumerate (tags, start=1):
if i > 1: print ('%s{\"%s\",\t%s},' % ('/*' if commented_out else ' ', language, hb_tag (tag)), end='')
print ('\t\t ', end='') if commented_out:
print (hb_tag (tag), end='') print ('*/', end='')
if i == tag_count: print ('\t/* ', end='')
print ('}}', end='')
print (',\t/* ', end='')
bcp_47_name = bcp_47.names.get (language, '') bcp_47_name = bcp_47.names.get (language, '')
bcp_47_name_candidates = bcp_47_name.split ('\n') bcp_47_name_candidates = bcp_47_name.split ('\n')
intersection = language_name_intersection (bcp_47_name, ot.names[tag]) intersection = language_name_intersection (bcp_47_name, ot.names[tag])
@@ -923,8 +921,6 @@ for language, tags in sorted (ot.from_bcp_47.items ()):
print ('};') print ('};')
print () print ()
print ('static_assert (HB_OT_MAX_TAGS_PER_LANGUAGE == %iu, "");' % maximum_tags)
print ()
print ('/**') print ('/**')
print (' * hb_ot_tags_from_complex_language:') print (' * hb_ot_tags_from_complex_language:')
@@ -1051,7 +1047,8 @@ print (' * @tag: A language tag.')
print (' *') print (' *')
print (' * Converts @tag to a BCP 47 language tag if it is ambiguous (it corresponds to') print (' * Converts @tag to a BCP 47 language tag if it is ambiguous (it corresponds to')
print (' * many language tags) and the best tag is not the alphabetically first, or if') print (' * many language tags) and the best tag is not the alphabetically first, or if')
print (' * the best tag consists of multiple subtags.') print (' * the best tag consists of multiple subtags, or if the best tag does not appear')
print (' * in #ot_languages.')
print (' *') print (' *')
print (' * Return value: The #hb_language_t corresponding to the BCP 47 language tag,') print (' * Return value: The #hb_language_t corresponding to the BCP 47 language tag,')
print (' * or #HB_LANGUAGE_INVALID if @tag is not ambiguous.') print (' * or #HB_LANGUAGE_INVALID if @tag is not ambiguous.')
@@ -1102,7 +1099,8 @@ def verify_disambiguation_dict ():
'%s is not a valid disambiguation for %s' % (disambiguation[ot_tag], ot_tag)) '%s is not a valid disambiguation for %s' % (disambiguation[ot_tag], ot_tag))
elif ot_tag not in disambiguation: elif ot_tag not in disambiguation:
disambiguation[ot_tag] = macrolanguages[0] disambiguation[ot_tag] = macrolanguages[0]
if disambiguation[ot_tag] == sorted (primary_tags)[0] and '-' not in disambiguation[ot_tag]: different_primary_tags = sorted (t for t in primary_tags if not same_tag (t, ot.from_bcp_47.get (t)))
if different_primary_tags and disambiguation[ot_tag] == different_primary_tags[0] and '-' not in disambiguation[ot_tag]:
del disambiguation[ot_tag] del disambiguation[ot_tag]
for ot_tag in disambiguation.keys (): for ot_tag in disambiguation.keys ():
expect (ot_tag in ot.to_bcp_47, 'unknown OT tag: %s' % ot_tag) expect (ot_tag in ot.to_bcp_47, 'unknown OT tag: %s' % ot_tag)

123
gfx/harfbuzz/src/gen-ucd-table.py Executable file
View File

@@ -0,0 +1,123 @@
#!/usr/bin/env python
from __future__ import print_function, division, absolute_import
import io, os.path, sys, re
if len (sys.argv) != 2:
print("usage: ./gen-ucd-table ucd.nonunihan.grouped.xml", file=sys.stderr)
sys.exit(1)
# https://github.com/harfbuzz/packtab
import packTab
import packTab.ucdxml
ucdxml = packTab.ucdxml.load_ucdxml(sys.argv[1])
ucd = packTab.ucdxml.ucdxml_get_repertoire(ucdxml)
gc = [u['gc'] for u in ucd]
ccc = [int(u['ccc']) for u in ucd]
bmg = [int(v, 16) - int(u) if v else 0 for u,v in enumerate(u['bmg'] for u in ucd)]
#gc_ccc_non0 = set((cat,klass) for cat,klass in zip(gc,ccc) if klass)
#gc_bmg_non0 = set((cat,mirr) for cat,mirr in zip(gc, bmg) if mirr)
sc = [u['sc'] for u in ucd]
dm = {i:tuple(int(v, 16) for v in u['dm'].split()) for i,u in enumerate(ucd)
if u['dm'] != '#' and u['dt'] == 'can' and not (0xAC00 <= i < 0xAC00+11172)}
ce = {i for i,u in enumerate(ucd) if u['Comp_Ex'] == 'Y'}
assert not any(v for v in dm.values() if len(v) not in (1,2))
dm1 = sorted(set(v for v in dm.values() if len(v) == 1))
dm1_array = ['0x%04Xu' % v for v in dm1]
dm1_order = {v:i+1 for i,v in enumerate(dm1)}
dm2 = sorted((v, i) for i,v in dm.items() if len(v) == 2)
dm2 = [("HB_CODEPOINT_ENCODE3 (0x%04Xu, 0x%04Xu, 0x%04Xu)" %
(v+(i if i not in ce and not ccc[i] else 0,)), v)
for v,i in dm2]
dm2_array = [s for s,v in dm2]
l = 1 + len(dm1_array)
dm2_order = {v[1]:i+l for i,v in enumerate(dm2)}
dm_order = {None: 0}
dm_order.update(dm1_order)
dm_order.update(dm2_order)
gc_order = packTab.AutoMapping()
for _ in ('Cc', 'Cf', 'Cn', 'Co', 'Cs', 'Ll', 'Lm', 'Lo', 'Lt', 'Lu',
'Mc', 'Me', 'Mn', 'Nd', 'Nl', 'No', 'Pc', 'Pd', 'Pe', 'Pf',
'Pi', 'Po', 'Ps', 'Sc', 'Sk', 'Sm', 'So', 'Zl', 'Zp', 'Zs',):
gc_order[_]
sc_order = packTab.AutoMapping()
sc_array = []
sc_re = re.compile(" (HB_SCRIPT_[_A-Z]*).*HB_TAG [(]'(.)','(.)','(.)','(.)'[)]")
for line in open('hb-common.h'):
m = sc_re.search (line)
if not m: continue
name = m.group(1)
tag = ''.join(m.group(i) for i in range(2, 6))
i = sc_order[tag]
assert i == len(sc_array)
sc_array.append(name)
# TODO Currently if gc_order or sc_order do not capture all values, we get in
# trouble because they silently add new values. We should be able to "freeze"
# them, or just do the mapping ourselves.
DEFAULT = 1
COMPACT = 3
print("/* == Start of generated table == */")
print("/*")
print(" * The following table is generated by running:")
print(" *")
print(" * ./gen-ucd-table.py ucd.nonunihan.grouped.xml")
print(" *")
print(" * on file with this description:", ucdxml.description)
print(" */")
print()
print("#ifndef HB_UCD_TABLE_HH")
print("#define HB_UCD_TABLE_HH")
print()
print()
print('#include "hb.hh"')
print()
code = packTab.Code('_hb_ucd')
sc_array, _ = code.addArray('hb_script_t', 'sc_map', sc_array)
dm1_array, _ = code.addArray('hb_codepoint_t', 'dm1_map', dm1_array)
dm2_array, _ = code.addArray('uint64_t', 'dm2_map', dm2_array)
code.print_c(linkage='static inline')
for compression in (DEFAULT, COMPACT):
print()
if compression == DEFAULT:
print('#ifndef HB_OPTIMIZE_SIZE')
else:
print('#else')
print()
code = packTab.Code('_hb_ucd')
packTab.pack_table(gc, 'Cn', mapping=gc_order, compression=compression).genCode(code, 'gc')
packTab.pack_table(ccc, 0, compression=compression).genCode(code, 'ccc')
packTab.pack_table(bmg, 0, compression=compression).genCode(code, 'bmg')
packTab.pack_table(sc, 'Zzzz', mapping=sc_order, compression=compression).genCode(code, 'sc')
packTab.pack_table(dm, None, mapping=dm_order, compression=compression).genCode(code, 'dm')
code.print_c(linkage='static inline')
if compression != DEFAULT:
print()
print('#endif')
print()
print()
print("#endif /* HB_UCD_TABLE_HH */")
print()
print("/* == End of generated table == */")

View File

@@ -47,8 +47,22 @@ defaults = ('Other', 'Not_Applicable', 'Cn', 'No_Block')
# TODO Characters that are not in Unicode Indic files, but used in USE # TODO Characters that are not in Unicode Indic files, but used in USE
data[0][0x034F] = defaults[0] data[0][0x034F] = defaults[0]
data[0][0x1B61] = defaults[0]
data[0][0x1B63] = defaults[0]
data[0][0x1B64] = defaults[0]
data[0][0x1B65] = defaults[0]
data[0][0x1B66] = defaults[0]
data[0][0x1B67] = defaults[0]
data[0][0x1B69] = defaults[0]
data[0][0x1B6A] = defaults[0]
data[0][0x2060] = defaults[0] data[0][0x2060] = defaults[0]
# TODO https://github.com/roozbehp/unicode-data/issues/9 # TODO https://github.com/harfbuzz/harfbuzz/pull/1685
data[0][0x1B5B] = 'Consonant_Placeholder'
data[0][0x1B5C] = 'Consonant_Placeholder'
data[0][0x1B5F] = 'Consonant_Placeholder'
data[0][0x1B62] = 'Consonant_Placeholder'
data[0][0x1B68] = 'Consonant_Placeholder'
# TODO https://github.com/harfbuzz/harfbuzz/issues/1035
data[0][0x11C44] = 'Consonant_Placeholder' data[0][0x11C44] = 'Consonant_Placeholder'
data[0][0x11C45] = 'Consonant_Placeholder' data[0][0x11C45] = 'Consonant_Placeholder'
# TODO https://github.com/harfbuzz/harfbuzz/pull/1399 # TODO https://github.com/harfbuzz/harfbuzz/pull/1399
@@ -171,7 +185,7 @@ def is_BASE(U, UISC, UGC):
def is_BASE_IND(U, UISC, UGC): def is_BASE_IND(U, UISC, UGC):
#SPEC-DRAFT return (UISC in [Consonant_Dead, Modifying_Letter] or UGC == Po) #SPEC-DRAFT return (UISC in [Consonant_Dead, Modifying_Letter] or UGC == Po)
return (UISC in [Consonant_Dead, Modifying_Letter] or return (UISC in [Consonant_Dead, Modifying_Letter] or
(UGC == Po and not U in [0x104B, 0x104E, 0x2022, 0x111C8, 0x11A3F, 0x11A45, 0x11C44, 0x11C45]) or (UGC == Po and not U in [0x104B, 0x104E, 0x1B5B, 0x1B5C, 0x1B5F, 0x2022, 0x111C8, 0x11A3F, 0x11A45, 0x11C44, 0x11C45]) or
False # SPEC-DRAFT-OUTDATED! U == 0x002D False # SPEC-DRAFT-OUTDATED! U == 0x002D
) )
def is_BASE_NUM(U, UISC, UGC): def is_BASE_NUM(U, UISC, UGC):
@@ -183,15 +197,15 @@ def is_BASE_OTHER(U, UISC, UGC):
def is_CGJ(U, UISC, UGC): def is_CGJ(U, UISC, UGC):
return U == 0x034F return U == 0x034F
def is_CONS_FINAL(U, UISC, UGC): def is_CONS_FINAL(U, UISC, UGC):
# Consonant_Initial_Postfixed is new in Unicode 11; not in the spec.
return ((UISC == Consonant_Final and UGC != Lo) or return ((UISC == Consonant_Final and UGC != Lo) or
UISC == Consonant_Initial_Postfixed or
UISC == Consonant_Succeeding_Repha) UISC == Consonant_Succeeding_Repha)
def is_CONS_FINAL_MOD(U, UISC, UGC): def is_CONS_FINAL_MOD(U, UISC, UGC):
#SPEC-DRAFT return UISC in [Consonant_Final_Modifier, Syllable_Modifier] #SPEC-DRAFT return UISC in [Consonant_Final_Modifier, Syllable_Modifier]
return UISC == Syllable_Modifier return UISC == Syllable_Modifier
def is_CONS_MED(U, UISC, UGC): def is_CONS_MED(U, UISC, UGC):
return UISC == Consonant_Medial and UGC != Lo # Consonant_Initial_Postfixed is new in Unicode 11; not in the spec.
return (UISC == Consonant_Medial and UGC != Lo or
UISC == Consonant_Initial_Postfixed)
def is_CONS_MOD(U, UISC, UGC): def is_CONS_MOD(U, UISC, UGC):
return UISC in [Nukta, Gemination_Mark, Consonant_Killer] return UISC in [Nukta, Gemination_Mark, Consonant_Killer]
def is_CONS_SUB(U, UISC, UGC): def is_CONS_SUB(U, UISC, UGC):
@@ -200,7 +214,9 @@ def is_CONS_SUB(U, UISC, UGC):
def is_CONS_WITH_STACKER(U, UISC, UGC): def is_CONS_WITH_STACKER(U, UISC, UGC):
return UISC == Consonant_With_Stacker return UISC == Consonant_With_Stacker
def is_HALANT(U, UISC, UGC): def is_HALANT(U, UISC, UGC):
return UISC in [Virama, Invisible_Stacker] and not is_HALANT_OR_VOWEL_MODIFIER(U, UISC, UGC) return (UISC in [Virama, Invisible_Stacker]
and not is_HALANT_OR_VOWEL_MODIFIER(U, UISC, UGC)
and not is_SAKOT(U, UISC, UGC))
def is_HALANT_OR_VOWEL_MODIFIER(U, UISC, UGC): def is_HALANT_OR_VOWEL_MODIFIER(U, UISC, UGC):
# https://github.com/harfbuzz/harfbuzz/issues/1102 # https://github.com/harfbuzz/harfbuzz/issues/1102
# https://github.com/harfbuzz/harfbuzz/issues/1379 # https://github.com/harfbuzz/harfbuzz/issues/1379
@@ -216,6 +232,7 @@ def is_Word_Joiner(U, UISC, UGC):
def is_OTHER(U, UISC, UGC): def is_OTHER(U, UISC, UGC):
#SPEC-OUTDATED return UGC == Zs # or any other SCRIPT_COMMON characters #SPEC-OUTDATED return UGC == Zs # or any other SCRIPT_COMMON characters
return (UISC == Other return (UISC == Other
and not is_SYM(U, UISC, UGC)
and not is_SYM_MOD(U, UISC, UGC) and not is_SYM_MOD(U, UISC, UGC)
and not is_CGJ(U, UISC, UGC) and not is_CGJ(U, UISC, UGC)
and not is_Word_Joiner(U, UISC, UGC) and not is_Word_Joiner(U, UISC, UGC)
@@ -225,20 +242,22 @@ def is_Reserved(U, UISC, UGC):
return UGC == 'Cn' return UGC == 'Cn'
def is_REPHA(U, UISC, UGC): def is_REPHA(U, UISC, UGC):
return UISC in [Consonant_Preceding_Repha, Consonant_Prefixed] return UISC in [Consonant_Preceding_Repha, Consonant_Prefixed]
def is_SAKOT(U, UISC, UGC):
return U == 0x1A60
def is_SYM(U, UISC, UGC): def is_SYM(U, UISC, UGC):
if U == 0x25CC: return False #SPEC-DRAFT if U == 0x25CC: return False #SPEC-DRAFT
#SPEC-DRAFT return UGC in [So, Sc] or UISC == Symbol_Letter #SPEC-DRAFT return UGC in [So, Sc] or UISC == Symbol_Letter
return UGC in [So, Sc] return UGC in [So, Sc] and U not in [0x1B62, 0x1B68]
def is_SYM_MOD(U, UISC, UGC): def is_SYM_MOD(U, UISC, UGC):
return U in [0x1B6B, 0x1B6C, 0x1B6D, 0x1B6E, 0x1B6F, 0x1B70, 0x1B71, 0x1B72, 0x1B73] return U in [0x1B6B, 0x1B6C, 0x1B6D, 0x1B6E, 0x1B6F, 0x1B70, 0x1B71, 0x1B72, 0x1B73]
def is_VARIATION_SELECTOR(U, UISC, UGC): def is_VARIATION_SELECTOR(U, UISC, UGC):
return 0xFE00 <= U <= 0xFE0F return 0xFE00 <= U <= 0xFE0F
def is_VOWEL(U, UISC, UGC): def is_VOWEL(U, UISC, UGC):
# https://github.com/roozbehp/unicode-data/issues/6 # https://github.com/harfbuzz/harfbuzz/issues/376
return (UISC == Pure_Killer or return (UISC == Pure_Killer or
(UGC != Lo and UISC in [Vowel, Vowel_Dependent] and U not in [0xAA29])) (UGC != Lo and UISC in [Vowel, Vowel_Dependent] and U not in [0xAA29]))
def is_VOWEL_MOD(U, UISC, UGC): def is_VOWEL_MOD(U, UISC, UGC):
# https://github.com/roozbehp/unicode-data/issues/6 # https://github.com/harfbuzz/harfbuzz/issues/376
return (UISC in [Tone_Mark, Cantillation_Mark, Register_Shifter, Visarga] or return (UISC in [Tone_Mark, Cantillation_Mark, Register_Shifter, Visarga] or
(UGC != Lo and (UISC == Bindu or U in [0xAA29]))) (UGC != Lo and (UISC == Bindu or U in [0xAA29])))
@@ -264,6 +283,7 @@ use_mapping = {
'Rsv': is_Reserved, 'Rsv': is_Reserved,
'R': is_REPHA, 'R': is_REPHA,
'S': is_SYM, 'S': is_SYM,
'Sk': is_SAKOT,
'SM': is_SYM_MOD, 'SM': is_SYM_MOD,
'VS': is_VARIATION_SELECTOR, 'VS': is_VARIATION_SELECTOR,
'V': is_VOWEL, 'V': is_VOWEL,
@@ -305,7 +325,11 @@ use_positions = {
'H': None, 'H': None,
'HVM': None, 'HVM': None,
'B': None, 'B': None,
'FM': None, 'FM': {
'Abv': [Top],
'Blw': [Bottom],
'Pst': [Not_Applicable],
},
'SUB': None, 'SUB': None,
} }
@@ -344,15 +368,9 @@ def map_to_use(data):
# the nasalization marks, maybe only for U+1CE9..U+1CF1. # the nasalization marks, maybe only for U+1CE9..U+1CF1.
if U == 0x1CED: UISC = Tone_Mark if U == 0x1CED: UISC = Tone_Mark
# TODO: https://github.com/harfbuzz/harfbuzz/issues/525
if U == 0x1A7F: UISC = Consonant_Final
# TODO: https://github.com/harfbuzz/harfbuzz/issues/1105 # TODO: https://github.com/harfbuzz/harfbuzz/issues/1105
if U == 0x11134: UISC = Gemination_Mark if U == 0x11134: UISC = Gemination_Mark
# TODO: https://github.com/harfbuzz/harfbuzz/pull/1399
if U == 0x111C9: UISC = Consonant_Final
values = [k for k,v in items if v(U,UISC,UGC)] values = [k for k,v in items if v(U,UISC,UGC)]
assert len(values) == 1, "%s %s %s %s" % (hex(U), UISC, UGC, values) assert len(values) == 1, "%s %s %s %s" % (hex(U), UISC, UGC, values)
USE = values[0] USE = values[0]

View File

@@ -180,6 +180,9 @@ print ('_hb_preprocess_text_vowel_constraints (const hb_ot_shape_plan_t *plan HB
print ('\t\t\t\t hb_buffer_t *buffer,') print ('\t\t\t\t hb_buffer_t *buffer,')
print ('\t\t\t\t hb_font_t *font HB_UNUSED)') print ('\t\t\t\t hb_font_t *font HB_UNUSED)')
print ('{') print ('{')
print ('#if defined(HB_NO_OT_SHAPE_COMPLEX_VOWEL_CONSTRAINTS)')
print (' return;')
print ('#endif')
print (' if (buffer->flags & HB_BUFFER_FLAG_DO_NOT_INSERT_DOTTED_CIRCLE)') print (' if (buffer->flags & HB_BUFFER_FLAG_DO_NOT_INSERT_DOTTED_CIRCLE)')
print (' return;') print (' return;')
print () print ()

View File

@@ -153,13 +153,13 @@ struct LookupSegmentArray
first <= last && first <= last &&
valuesZ.sanitize (c, base, last - first + 1)); valuesZ.sanitize (c, base, last - first + 1));
} }
template <typename T2> template <typename ...Ts>
bool sanitize (hb_sanitize_context_t *c, const void *base, T2 user_data) const bool sanitize (hb_sanitize_context_t *c, const void *base, Ts&&... ds) const
{ {
TRACE_SANITIZE (this); TRACE_SANITIZE (this);
return_trace (c->check_struct (this) && return_trace (c->check_struct (this) &&
first <= last && first <= last &&
valuesZ.sanitize (c, base, last - first + 1, user_data)); valuesZ.sanitize (c, base, last - first + 1, hb_forward<Ts> (ds)...));
} }
GlyphID last; /* Last GlyphID in this segment */ GlyphID last; /* Last GlyphID in this segment */
@@ -576,7 +576,7 @@ struct StateTable
if (unlikely (stop > states)) if (unlikely (stop > states))
return_trace (false); return_trace (false);
for (const HBUSHORT *p = states; stop < p; p--) for (const HBUSHORT *p = states; stop < p; p--)
num_entries = MAX<unsigned int> (num_entries, *(p - 1) + 1); num_entries = hb_max (num_entries, *(p - 1) + 1);
state_neg = min_state; state_neg = min_state;
} }
} }
@@ -597,7 +597,7 @@ struct StateTable
if (unlikely (stop < states)) if (unlikely (stop < states))
return_trace (false); return_trace (false);
for (const HBUSHORT *p = &states[state_pos * num_classes]; p < stop; p++) for (const HBUSHORT *p = &states[state_pos * num_classes]; p < stop; p++)
num_entries = MAX<unsigned int> (num_entries, *p + 1); num_entries = hb_max (num_entries, *p + 1);
state_pos = max_state + 1; state_pos = max_state + 1;
} }
} }
@@ -611,8 +611,8 @@ struct StateTable
for (const Entry<Extra> *p = &entries[entry]; p < stop; p++) for (const Entry<Extra> *p = &entries[entry]; p < stop; p++)
{ {
int newState = new_state (p->newState); int newState = new_state (p->newState);
min_state = MIN (min_state, newState); min_state = hb_min (min_state, newState);
max_state = MAX (max_state, newState); max_state = hb_max (max_state, newState);
} }
entry = num_entries; entry = num_entries;
} }

View File

@@ -165,7 +165,7 @@ struct feat
unsigned int feature_count = featureNameCount; unsigned int feature_count = featureNameCount;
if (count && *count) if (count && *count)
{ {
unsigned int len = MIN (feature_count - start_offset, *count); unsigned int len = hb_min (feature_count - start_offset, *count);
for (unsigned int i = 0; i < len; i++) for (unsigned int i = 0; i < len; i++)
features[i] = namesZ[i + start_offset].get_feature_type (); features[i] = namesZ[i + start_offset].get_feature_type ();
*count = len; *count = len;

View File

@@ -251,7 +251,7 @@ struct KerxSubTableFormat1
if (Format1EntryT::performAction (entry) && depth) if (Format1EntryT::performAction (entry) && depth)
{ {
unsigned int tuple_count = MAX (1u, table->header.tuple_count ()); unsigned int tuple_count = hb_max (1u, table->header.tuple_count ());
unsigned int kern_idx = Format1EntryT::kernActionIndex (entry); unsigned int kern_idx = Format1EntryT::kernActionIndex (entry);
kern_idx = Types::byteOffsetToIndex (kern_idx, &table->machine, kernAction.arrayZ); kern_idx = Types::byteOffsetToIndex (kern_idx, &table->machine, kernAction.arrayZ);
@@ -771,17 +771,17 @@ struct KerxSubTable
unsigned int get_size () const { return u.header.length; } unsigned int get_size () const { return u.header.length; }
unsigned int get_type () const { return u.header.coverage & u.header.SubtableType; } unsigned int get_type () const { return u.header.coverage & u.header.SubtableType; }
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
unsigned int subtable_type = get_type (); unsigned int subtable_type = get_type ();
TRACE_DISPATCH (this, subtable_type); TRACE_DISPATCH (this, subtable_type);
switch (subtable_type) { switch (subtable_type) {
case 0: return_trace (c->dispatch (u.format0)); case 0: return_trace (c->dispatch (u.format0, hb_forward<Ts> (ds)...));
case 1: return_trace (c->dispatch (u.format1)); case 1: return_trace (c->dispatch (u.format1, hb_forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2)); case 2: return_trace (c->dispatch (u.format2, hb_forward<Ts> (ds)...));
case 4: return_trace (c->dispatch (u.format4)); case 4: return_trace (c->dispatch (u.format4, hb_forward<Ts> (ds)...));
case 6: return_trace (c->dispatch (u.format6)); case 6: return_trace (c->dispatch (u.format6, hb_forward<Ts> (ds)...));
default: return_trace (c->default_return_value ()); default: return_trace (c->default_return_value ());
} }
} }

View File

@@ -88,7 +88,7 @@ struct RearrangementSubtable
start = buffer->idx; start = buffer->idx;
if (flags & MarkLast) if (flags & MarkLast)
end = MIN (buffer->idx + 1, buffer->len); end = hb_min (buffer->idx + 1, buffer->len);
if ((flags & Verb) && start < end) if ((flags & Verb) && start < end)
{ {
@@ -117,14 +117,14 @@ struct RearrangementSubtable
}; };
unsigned int m = map[flags & Verb]; unsigned int m = map[flags & Verb];
unsigned int l = MIN<unsigned int> (2, m >> 4); unsigned int l = hb_min (2u, m >> 4);
unsigned int r = MIN<unsigned int> (2, m & 0x0F); unsigned int r = hb_min (2u, m & 0x0F);
bool reverse_l = 3 == (m >> 4); bool reverse_l = 3 == (m >> 4);
bool reverse_r = 3 == (m & 0x0F); bool reverse_r = 3 == (m & 0x0F);
if (end - start >= l + r) if (end - start >= l + r)
{ {
buffer->merge_clusters (start, MIN (buffer->idx + 1, buffer->len)); buffer->merge_clusters (start, hb_min (buffer->idx + 1, buffer->len));
buffer->merge_clusters (start, end); buffer->merge_clusters (start, end);
hb_glyph_info_t *info = buffer->info; hb_glyph_info_t *info = buffer->info;
@@ -261,13 +261,13 @@ struct ContextualSubtable
} }
if (replacement) if (replacement)
{ {
buffer->unsafe_to_break (mark, MIN (buffer->idx + 1, buffer->len)); buffer->unsafe_to_break (mark, hb_min (buffer->idx + 1, buffer->len));
buffer->info[mark].codepoint = *replacement; buffer->info[mark].codepoint = *replacement;
ret = true; ret = true;
} }
replacement = nullptr; replacement = nullptr;
unsigned int idx = MIN (buffer->idx, buffer->len - 1); unsigned int idx = hb_min (buffer->idx, buffer->len - 1);
if (Types::extended) if (Types::extended)
{ {
if (entry.data.currentIndex != 0xFFFF) if (entry.data.currentIndex != 0xFFFF)
@@ -337,9 +337,9 @@ struct ContextualSubtable
const EntryData &data = entries[i].data; const EntryData &data = entries[i].data;
if (data.markIndex != 0xFFFF) if (data.markIndex != 0xFFFF)
num_lookups = MAX<unsigned int> (num_lookups, 1 + data.markIndex); num_lookups = hb_max (num_lookups, 1 + data.markIndex);
if (data.currentIndex != 0xFFFF) if (data.currentIndex != 0xFFFF)
num_lookups = MAX<unsigned int> (num_lookups, 1 + data.currentIndex); num_lookups = hb_max (num_lookups, 1 + data.currentIndex);
} }
return_trace (substitutionTables.sanitize (c, this, num_lookups)); return_trace (substitutionTables.sanitize (c, this, num_lookups));
@@ -744,7 +744,7 @@ struct InsertionSubtable
buffer->move_to (end + count); buffer->move_to (end + count);
buffer->unsafe_to_break_from_outbuffer (mark, MIN (buffer->idx + 1, buffer->len)); buffer->unsafe_to_break_from_outbuffer (mark, hb_min (buffer->idx + 1, buffer->len));
} }
if (flags & SetMark) if (flags & SetMark)
@@ -883,17 +883,17 @@ struct ChainSubtable
Insertion = 5 Insertion = 5
}; };
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
unsigned int subtable_type = get_type (); unsigned int subtable_type = get_type ();
TRACE_DISPATCH (this, subtable_type); TRACE_DISPATCH (this, subtable_type);
switch (subtable_type) { switch (subtable_type) {
case Rearrangement: return_trace (c->dispatch (u.rearrangement)); case Rearrangement: return_trace (c->dispatch (u.rearrangement, hb_forward<Ts> (ds)...));
case Contextual: return_trace (c->dispatch (u.contextual)); case Contextual: return_trace (c->dispatch (u.contextual, hb_forward<Ts> (ds)...));
case Ligature: return_trace (c->dispatch (u.ligature)); case Ligature: return_trace (c->dispatch (u.ligature, hb_forward<Ts> (ds)...));
case Noncontextual: return_trace (c->dispatch (u.noncontextual)); case Noncontextual: return_trace (c->dispatch (u.noncontextual, hb_forward<Ts> (ds)...));
case Insertion: return_trace (c->dispatch (u.insertion)); case Insertion: return_trace (c->dispatch (u.insertion, hb_forward<Ts> (ds)...));
default: return_trace (c->default_return_value ()); default: return_trace (c->default_return_value ());
} }
} }

View File

@@ -133,7 +133,7 @@ struct TrackData
if (size_table[size_index].to_float () >= csspx) if (size_table[size_index].to_float () >= csspx)
break; break;
return round (interpolate_at (size_index ? size_index - 1 : 0, csspx, return roundf (interpolate_at (size_index ? size_index - 1 : 0, csspx,
*trackTableEntry, base)); *trackTableEntry, base));
} }

View File

@@ -135,6 +135,10 @@ static const hb_aat_feature_mapping_t feature_mappings[] =
const hb_aat_feature_mapping_t * const hb_aat_feature_mapping_t *
hb_aat_layout_find_feature_mapping (hb_tag_t tag) hb_aat_layout_find_feature_mapping (hb_tag_t tag)
{ {
#ifdef HB_NO_SHAPE_AAT
return nullptr;
#endif
return (const hb_aat_feature_mapping_t *) bsearch (&tag, return (const hb_aat_feature_mapping_t *) bsearch (&tag,
feature_mappings, feature_mappings,
ARRAY_LENGTH (feature_mappings), ARRAY_LENGTH (feature_mappings),
@@ -147,6 +151,8 @@ hb_aat_layout_find_feature_mapping (hb_tag_t tag)
* hb_aat_apply_context_t * hb_aat_apply_context_t
*/ */
/* Note: This context is used for kerning, even without AAT. */
AAT::hb_aat_apply_context_t::hb_aat_apply_context_t (const hb_ot_shape_plan_t *plan_, AAT::hb_aat_apply_context_t::hb_aat_apply_context_t (const hb_ot_shape_plan_t *plan_,
hb_font_t *font_, hb_font_t *font_,
hb_buffer_t *buffer_, hb_buffer_t *buffer_,
@@ -183,6 +189,10 @@ void
hb_aat_layout_compile_map (const hb_aat_map_builder_t *mapper, hb_aat_layout_compile_map (const hb_aat_map_builder_t *mapper,
hb_aat_map_t *map) hb_aat_map_t *map)
{ {
#ifdef HB_NO_SHAPE_AAT
return;
#endif
const AAT::morx& morx = *mapper->face->table.morx; const AAT::morx& morx = *mapper->face->table.morx;
if (morx.has_data ()) if (morx.has_data ())
{ {
@@ -209,6 +219,10 @@ hb_aat_layout_compile_map (const hb_aat_map_builder_t *mapper,
hb_bool_t hb_bool_t
hb_aat_layout_has_substitution (hb_face_t *face) hb_aat_layout_has_substitution (hb_face_t *face)
{ {
#ifdef HB_NO_SHAPE_AAT
return false;
#endif
return face->table.morx->has_data () || return face->table.morx->has_data () ||
face->table.mort->has_data (); face->table.mort->has_data ();
} }
@@ -218,6 +232,10 @@ hb_aat_layout_substitute (const hb_ot_shape_plan_t *plan,
hb_font_t *font, hb_font_t *font,
hb_buffer_t *buffer) hb_buffer_t *buffer)
{ {
#ifdef HB_NO_SHAPE_AAT
return;
#endif
hb_blob_t *morx_blob = font->face->table.morx.get_blob (); hb_blob_t *morx_blob = font->face->table.morx.get_blob ();
const AAT::morx& morx = *morx_blob->as<AAT::morx> (); const AAT::morx& morx = *morx_blob->as<AAT::morx> ();
if (morx.has_data ()) if (morx.has_data ())
@@ -240,6 +258,10 @@ hb_aat_layout_substitute (const hb_ot_shape_plan_t *plan,
void void
hb_aat_layout_zero_width_deleted_glyphs (hb_buffer_t *buffer) hb_aat_layout_zero_width_deleted_glyphs (hb_buffer_t *buffer)
{ {
#ifdef HB_NO_SHAPE_AAT
return;
#endif
unsigned int count = buffer->len; unsigned int count = buffer->len;
hb_glyph_info_t *info = buffer->info; hb_glyph_info_t *info = buffer->info;
hb_glyph_position_t *pos = buffer->pos; hb_glyph_position_t *pos = buffer->pos;
@@ -257,6 +279,10 @@ is_deleted_glyph (const hb_glyph_info_t *info)
void void
hb_aat_layout_remove_deleted_glyphs (hb_buffer_t *buffer) hb_aat_layout_remove_deleted_glyphs (hb_buffer_t *buffer)
{ {
#ifdef HB_NO_SHAPE_AAT
return;
#endif
hb_ot_layout_delete_glyphs_inplace (buffer, is_deleted_glyph); hb_ot_layout_delete_glyphs_inplace (buffer, is_deleted_glyph);
} }
@@ -270,6 +296,10 @@ hb_aat_layout_remove_deleted_glyphs (hb_buffer_t *buffer)
hb_bool_t hb_bool_t
hb_aat_layout_has_positioning (hb_face_t *face) hb_aat_layout_has_positioning (hb_face_t *face)
{ {
#ifdef HB_NO_SHAPE_AAT
return false;
#endif
return face->table.kerx->has_data (); return face->table.kerx->has_data ();
} }
@@ -278,6 +308,10 @@ hb_aat_layout_position (const hb_ot_shape_plan_t *plan,
hb_font_t *font, hb_font_t *font,
hb_buffer_t *buffer) hb_buffer_t *buffer)
{ {
#ifdef HB_NO_SHAPE_AAT
return;
#endif
hb_blob_t *kerx_blob = font->face->table.kerx.get_blob (); hb_blob_t *kerx_blob = font->face->table.kerx.get_blob ();
const AAT::kerx& kerx = *kerx_blob->as<AAT::kerx> (); const AAT::kerx& kerx = *kerx_blob->as<AAT::kerx> ();
@@ -297,6 +331,10 @@ hb_aat_layout_position (const hb_ot_shape_plan_t *plan,
hb_bool_t hb_bool_t
hb_aat_layout_has_tracking (hb_face_t *face) hb_aat_layout_has_tracking (hb_face_t *face)
{ {
#ifdef HB_NO_SHAPE_AAT
return false;
#endif
return face->table.trak->has_data (); return face->table.trak->has_data ();
} }
@@ -305,20 +343,16 @@ hb_aat_layout_track (const hb_ot_shape_plan_t *plan,
hb_font_t *font, hb_font_t *font,
hb_buffer_t *buffer) hb_buffer_t *buffer)
{ {
#ifdef HB_NO_SHAPE_AAT
return;
#endif
const AAT::trak& trak = *font->face->table.trak; const AAT::trak& trak = *font->face->table.trak;
AAT::hb_aat_apply_context_t c (plan, font, buffer); AAT::hb_aat_apply_context_t c (plan, font, buffer);
trak.apply (&c); trak.apply (&c);
} }
hb_language_t
_hb_aat_language_get (hb_face_t *face,
unsigned int i)
{
return face->table.ltag->get_language (i);
}
/** /**
* hb_aat_layout_get_feature_types: * hb_aat_layout_get_feature_types:
* @face: a face object * @face: a face object
@@ -336,6 +370,12 @@ hb_aat_layout_get_feature_types (hb_face_t *face,
unsigned int *feature_count, /* IN/OUT. May be NULL. */ unsigned int *feature_count, /* IN/OUT. May be NULL. */
hb_aat_layout_feature_type_t *features /* OUT. May be NULL. */) hb_aat_layout_feature_type_t *features /* OUT. May be NULL. */)
{ {
#ifdef HB_NO_SHAPE_AAT
if (feature_count)
*feature_count = 0;
return 0;
#endif
return face->table.feat->get_feature_types (start_offset, feature_count, features); return face->table.feat->get_feature_types (start_offset, feature_count, features);
} }
@@ -352,6 +392,10 @@ hb_ot_name_id_t
hb_aat_layout_feature_type_get_name_id (hb_face_t *face, hb_aat_layout_feature_type_get_name_id (hb_face_t *face,
hb_aat_layout_feature_type_t feature_type) hb_aat_layout_feature_type_t feature_type)
{ {
#ifdef HB_NO_SHAPE_AAT
return HB_OT_NAME_ID_INVALID;
#endif
return face->table.feat->get_feature_name_id (feature_type); return face->table.feat->get_feature_name_id (feature_type);
} }
@@ -380,5 +424,11 @@ hb_aat_layout_feature_type_get_selector_infos (hb_face_t
hb_aat_layout_feature_selector_info_t *selectors, /* OUT. May be NULL. */ hb_aat_layout_feature_selector_info_t *selectors, /* OUT. May be NULL. */
unsigned int *default_index /* OUT. May be NULL. */) unsigned int *default_index /* OUT. May be NULL. */)
{ {
#ifdef HB_NO_SHAPE_AAT
if (selector_count)
*selector_count = 0;
return 0;
#endif
return face->table.feat->get_selector_infos (feature_type, start_offset, selector_count, selectors, default_index); return face->table.feat->get_selector_infos (feature_type, start_offset, selector_count, selectors, default_index);
} }

View File

@@ -30,7 +30,7 @@
#include "hb.hh" #include "hb.hh"
#include "hb-ot-shape.hh" #include "hb-ot-shape.hh"
#include "hb-aat-ltag-table.hh"
struct hb_aat_feature_mapping_t struct hb_aat_feature_mapping_t
{ {
@@ -39,7 +39,7 @@ struct hb_aat_feature_mapping_t
hb_aat_layout_feature_selector_t selectorToEnable; hb_aat_layout_feature_selector_t selectorToEnable;
hb_aat_layout_feature_selector_t selectorToDisable; hb_aat_layout_feature_selector_t selectorToDisable;
static int cmp (const void *key_, const void *entry_) HB_INTERNAL static int cmp (const void *key_, const void *entry_)
{ {
hb_tag_t key = * (unsigned int *) key_; hb_tag_t key = * (unsigned int *) key_;
const hb_aat_feature_mapping_t * entry = (const hb_aat_feature_mapping_t *) entry_; const hb_aat_feature_mapping_t * entry = (const hb_aat_feature_mapping_t *) entry_;
@@ -77,9 +77,13 @@ hb_aat_layout_track (const hb_ot_shape_plan_t *plan,
hb_font_t *font, hb_font_t *font,
hb_buffer_t *buffer); hb_buffer_t *buffer);
HB_INTERNAL hb_language_t
inline hb_language_t
_hb_aat_language_get (hb_face_t *face, _hb_aat_language_get (hb_face_t *face,
unsigned int i); unsigned int i)
{
return face->table.ltag->get_language (i);
}
#endif /* HB_AAT_LAYOUT_HH */ #endif /* HB_AAT_LAYOUT_HH */

View File

@@ -34,6 +34,10 @@
void hb_aat_map_builder_t::add_feature (hb_tag_t tag, void hb_aat_map_builder_t::add_feature (hb_tag_t tag,
unsigned int value) unsigned int value)
{ {
#ifdef HB_NO_SHAPE_AAT
return;
#endif
if (tag == HB_TAG ('a','a','l','t')) if (tag == HB_TAG ('a','a','l','t'))
{ {
feature_info_t *info = features.push(); feature_info_t *info = features.push();
@@ -53,6 +57,10 @@ void hb_aat_map_builder_t::add_feature (hb_tag_t tag,
void void
hb_aat_map_builder_t::compile (hb_aat_map_t &m) hb_aat_map_builder_t::compile (hb_aat_map_t &m)
{ {
#ifdef HB_NO_SHAPE_AAT
return;
#endif
/* Sort features and merge duplicates */ /* Sort features and merge duplicates */
if (features.length) if (features.length)
{ {

View File

@@ -66,7 +66,7 @@ struct hb_aat_map_builder_t
hb_aat_layout_feature_selector_t setting; hb_aat_layout_feature_selector_t setting;
unsigned seq; /* For stable sorting only. */ unsigned seq; /* For stable sorting only. */
static int cmp (const void *pa, const void *pb) HB_INTERNAL static int cmp (const void *pa, const void *pb)
{ {
const feature_info_t *a = (const feature_info_t *) pa; const feature_info_t *a = (const feature_info_t *) pa;
const feature_info_t *b = (const feature_info_t *) pb; const feature_info_t *b = (const feature_info_t *) pb;
@@ -84,7 +84,7 @@ struct hb_aat_map_builder_t
hb_face_t *face; hb_face_t *face;
public: public:
hb_vector_t<feature_info_t> features; hb_sorted_vector_t<feature_info_t> features;
}; };

View File

@@ -1,5 +1,6 @@
/* /*
* Copyright © 2017 Google, Inc. * Copyright © 2017 Google, Inc.
* Copyright © 2019 Google, Inc.
* *
* This is part of HarfBuzz, a text shaping library. * This is part of HarfBuzz, a text shaping library.
* *
@@ -22,18 +23,318 @@
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
* *
* Google Author(s): Behdad Esfahbod * Google Author(s): Behdad Esfahbod
* Facebook Author(s): Behdad Esfahbod
*/ */
#ifndef HB_DSALGS_HH #ifndef HB_ALGS_HH
#define HB_DSALGS_HH #define HB_ALGS_HH
#include "hb.hh" #include "hb.hh"
#include "hb-meta.hh"
#include "hb-null.hh" #include "hb-null.hh"
/* Void! For when we need a expression-type of void. */ /* Encodes three unsigned integers in one 64-bit number. If the inputs have more than 21 bits,
typedef const struct _hb_void_t *hb_void_t; * values will be truncated / overlap, and might not decode exactly. */
#define HB_VOID ((const _hb_void_t *) nullptr) #define HB_CODEPOINT_ENCODE3(x,y,z) (((uint64_t) (x) << 42) | ((uint64_t) (y) << 21) | (uint64_t) (z))
#define HB_CODEPOINT_DECODE3_1(v) ((hb_codepoint_t) ((v) >> 42))
#define HB_CODEPOINT_DECODE3_2(v) ((hb_codepoint_t) ((v) >> 21) & 0x1FFFFFu)
#define HB_CODEPOINT_DECODE3_3(v) ((hb_codepoint_t) (v) & 0x1FFFFFu)
struct
{
/* Note. This is dangerous in that if it's passed an rvalue, it returns rvalue-reference. */
template <typename T> auto
operator () (T&& v) const HB_AUTO_RETURN ( hb_forward<T> (v) )
}
HB_FUNCOBJ (hb_identity);
struct
{
/* Like identity(), but only retains lvalue-references. Rvalues are returned as rvalues. */
template <typename T> T&
operator () (T& v) const { return v; }
template <typename T> hb_remove_reference<T>
operator () (T&& v) const { return v; }
}
HB_FUNCOBJ (hb_lidentity);
struct
{
/* Like identity(), but always returns rvalue. */
template <typename T> hb_remove_reference<T>
operator () (T&& v) const { return v; }
}
HB_FUNCOBJ (hb_ridentity);
struct
{
template <typename T> bool
operator () (T&& v) const { return bool (hb_forward<T> (v)); }
}
HB_FUNCOBJ (hb_bool);
struct
{
private:
template <typename T> auto
impl (const T& v, hb_priority<1>) const HB_RETURN (uint32_t, hb_deref (v).hash ())
template <typename T,
hb_enable_if (hb_is_integral (T))> auto
impl (const T& v, hb_priority<0>) const HB_AUTO_RETURN
(
/* Knuth's multiplicative method: */
(uint32_t) v * 2654435761u
)
public:
template <typename T> auto
operator () (const T& v) const HB_RETURN (uint32_t, impl (v, hb_prioritize))
}
HB_FUNCOBJ (hb_hash);
struct
{
private:
/* Pointer-to-member-function. */
template <typename Appl, typename T, typename ...Ts> auto
impl (Appl&& a, hb_priority<2>, T &&v, Ts&&... ds) const HB_AUTO_RETURN
((hb_deref (hb_forward<T> (v)).*hb_forward<Appl> (a)) (hb_forward<Ts> (ds)...))
/* Pointer-to-member. */
template <typename Appl, typename T> auto
impl (Appl&& a, hb_priority<1>, T &&v) const HB_AUTO_RETURN
((hb_deref (hb_forward<T> (v))).*hb_forward<Appl> (a))
/* Operator(). */
template <typename Appl, typename ...Ts> auto
impl (Appl&& a, hb_priority<0>, Ts&&... ds) const HB_AUTO_RETURN
(hb_deref (hb_forward<Appl> (a)) (hb_forward<Ts> (ds)...))
public:
template <typename Appl, typename ...Ts> auto
operator () (Appl&& a, Ts&&... ds) const HB_AUTO_RETURN
(
impl (hb_forward<Appl> (a),
hb_prioritize,
hb_forward<Ts> (ds)...)
)
}
HB_FUNCOBJ (hb_invoke);
template <unsigned Pos, typename Appl, typename V>
struct hb_partial_t
{
hb_partial_t (Appl a, V v) : a (a), v (v) {}
static_assert (Pos > 0, "");
template <typename ...Ts,
unsigned P = Pos,
hb_enable_if (P == 1)> auto
operator () (Ts&& ...ds) -> decltype (hb_invoke (hb_declval (Appl),
hb_declval (V),
hb_declval (Ts)...))
{
return hb_invoke (hb_forward<Appl> (a),
hb_forward<V> (v),
hb_forward<Ts> (ds)...);
}
template <typename T0, typename ...Ts,
unsigned P = Pos,
hb_enable_if (P == 2)> auto
operator () (T0&& d0, Ts&& ...ds) -> decltype (hb_invoke (hb_declval (Appl),
hb_declval (T0),
hb_declval (V),
hb_declval (Ts)...))
{
return hb_invoke (hb_forward<Appl> (a),
hb_forward<T0> (d0),
hb_forward<V> (v),
hb_forward<Ts> (ds)...);
}
private:
hb_reference_wrapper<Appl> a;
V v;
};
template <unsigned Pos=1, typename Appl, typename V>
auto hb_partial (Appl&& a, V&& v) HB_AUTO_RETURN
(( hb_partial_t<Pos, Appl, V> (a, v) ))
/* The following hacky replacement version is to make Visual Stuiod build:. */ \
/* https://github.com/harfbuzz/harfbuzz/issues/1730 */ \
#ifdef _MSC_VER
#define HB_PARTIALIZE(Pos) \
template <typename _T> \
decltype(auto) operator () (_T&& _v) const \
{ return hb_partial<Pos> (this, hb_forward<_T> (_v)); } \
static_assert (true, "")
#else
#define HB_PARTIALIZE(Pos) \
template <typename _T> \
auto operator () (_T&& _v) const HB_AUTO_RETURN \
(hb_partial<Pos> (this, hb_forward<_T> (_v))) \
static_assert (true, "")
#endif
struct
{
private:
template <typename Pred, typename Val> auto
impl (Pred&& p, Val &&v, hb_priority<1>) const HB_AUTO_RETURN
(hb_deref (hb_forward<Pred> (p)).has (hb_forward<Val> (v)))
template <typename Pred, typename Val> auto
impl (Pred&& p, Val &&v, hb_priority<0>) const HB_AUTO_RETURN
(
hb_invoke (hb_forward<Pred> (p),
hb_forward<Val> (v))
)
public:
template <typename Pred, typename Val> auto
operator () (Pred&& p, Val &&v) const HB_RETURN (bool,
impl (hb_forward<Pred> (p),
hb_forward<Val> (v),
hb_prioritize)
)
}
HB_FUNCOBJ (hb_has);
struct
{
private:
template <typename Pred, typename Val> auto
impl (Pred&& p, Val &&v, hb_priority<1>) const HB_AUTO_RETURN
(
hb_has (hb_forward<Pred> (p),
hb_forward<Val> (v))
)
template <typename Pred, typename Val> auto
impl (Pred&& p, Val &&v, hb_priority<0>) const HB_AUTO_RETURN
(
hb_forward<Pred> (p) == hb_forward<Val> (v)
)
public:
template <typename Pred, typename Val> auto
operator () (Pred&& p, Val &&v) const HB_RETURN (bool,
impl (hb_forward<Pred> (p),
hb_forward<Val> (v),
hb_prioritize)
)
}
HB_FUNCOBJ (hb_match);
struct
{
private:
template <typename Proj, typename Val> auto
impl (Proj&& f, Val &&v, hb_priority<2>) const HB_AUTO_RETURN
(hb_deref (hb_forward<Proj> (f)).get (hb_forward<Val> (v)))
template <typename Proj, typename Val> auto
impl (Proj&& f, Val &&v, hb_priority<1>) const HB_AUTO_RETURN
(
hb_invoke (hb_forward<Proj> (f),
hb_forward<Val> (v))
)
template <typename Proj, typename Val> auto
impl (Proj&& f, Val &&v, hb_priority<0>) const HB_AUTO_RETURN
(
hb_forward<Proj> (f)[hb_forward<Val> (v)]
)
public:
template <typename Proj, typename Val> auto
operator () (Proj&& f, Val &&v) const HB_AUTO_RETURN
(
impl (hb_forward<Proj> (f),
hb_forward<Val> (v),
hb_prioritize)
)
}
HB_FUNCOBJ (hb_get);
template <typename T1, typename T2>
struct hb_pair_t
{
typedef T1 first_t;
typedef T2 second_t;
typedef hb_pair_t<T1, T2> pair_t;
hb_pair_t (T1 a, T2 b) : first (a), second (b) {}
template <typename Q1, typename Q2,
hb_enable_if (hb_is_convertible (T1, Q1) &&
hb_is_convertible (T2, T2))>
operator hb_pair_t<Q1, Q2> () { return hb_pair_t<Q1, Q2> (first, second); }
hb_pair_t<T1, T2> reverse () const
{ return hb_pair_t<T1, T2> (second, first); }
bool operator == (const pair_t& o) const { return first == o.first && second == o.second; }
bool operator != (const pair_t& o) const { return !(*this == o); }
bool operator < (const pair_t& o) const { return first < o.first || (first == o.first && second < o.second); }
bool operator >= (const pair_t& o) const { return !(*this < o); }
bool operator > (const pair_t& o) const { return first > o.first || (first == o.first && second > o.second); }
bool operator <= (const pair_t& o) const { return !(*this > o); }
T1 first;
T2 second;
};
#define hb_pair_t(T1,T2) hb_pair_t<T1, T2>
template <typename T1, typename T2> static inline hb_pair_t<T1, T2>
hb_pair (T1&& a, T2&& b) { return hb_pair_t<T1, T2> (a, b); }
struct
{
template <typename Pair> typename Pair::first_t
operator () (const Pair& pair) const { return pair.first; }
}
HB_FUNCOBJ (hb_first);
struct
{
template <typename Pair> typename Pair::second_t
operator () (const Pair& pair) const { return pair.second; }
}
HB_FUNCOBJ (hb_second);
/* Note. In min/max impl, we can use hb_type_identity<T> for second argument.
* However, that would silently convert between different-signedness integers.
* Instead we accept two different types, such that compiler can err if
* comparing integers of different signedness. */
struct
{
template <typename T, typename T2> auto
operator () (T&& a, T2&& b) const HB_AUTO_RETURN
(hb_forward<T> (a) <= hb_forward<T2> (b) ? hb_forward<T> (a) : hb_forward<T2> (b))
}
HB_FUNCOBJ (hb_min);
struct
{
template <typename T, typename T2> auto
operator () (T&& a, T2&& b) const HB_AUTO_RETURN
(hb_forward<T> (a) >= hb_forward<T2> (b) ? hb_forward<T> (a) : hb_forward<T2> (b))
}
HB_FUNCOBJ (hb_max);
/* /*
@@ -233,18 +534,6 @@ hb_ctz (T v)
* Tiny stuff. * Tiny stuff.
*/ */
template <typename T>
static inline T* hb_addressof (T& arg)
{
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-align"
/* https://en.cppreference.com/w/cpp/memory/addressof */
return reinterpret_cast<T*>(
&const_cast<char&>(
reinterpret_cast<const volatile char&>(arg)));
#pragma GCC diagnostic pop
}
/* ASCII tag/character handling */ /* ASCII tag/character handling */
static inline bool ISALPHA (unsigned char c) static inline bool ISALPHA (unsigned char c)
{ return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'); } { return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z'); }
@@ -257,14 +546,6 @@ static inline unsigned char TOUPPER (unsigned char c)
static inline unsigned char TOLOWER (unsigned char c) static inline unsigned char TOLOWER (unsigned char c)
{ return (c >= 'A' && c <= 'Z') ? c - 'A' + 'a' : c; } { return (c >= 'A' && c <= 'Z') ? c - 'A' + 'a' : c; }
#undef MIN
template <typename Type>
static inline Type MIN (const Type &a, const Type &b) { return a < b ? a : b; }
#undef MAX
template <typename Type>
static inline Type MAX (const Type &a, const Type &b) { return a > b ? a : b; }
static inline unsigned int DIV_CEIL (const unsigned int a, unsigned int b) static inline unsigned int DIV_CEIL (const unsigned int a, unsigned int b)
{ return (a + (b - 1)) / b; } { return (a + (b - 1)) / b; }
@@ -298,17 +579,6 @@ hb_ceil_to_4 (unsigned int v)
return ((v - 1) | 3) + 1; return ((v - 1) | 3) + 1;
} }
template <typename T> struct hb_is_signed;
/* https://github.com/harfbuzz/harfbuzz/issues/1535 */
template <> struct hb_is_signed<int8_t> { enum { value = true }; };
template <> struct hb_is_signed<int16_t> { enum { value = true }; };
template <> struct hb_is_signed<int32_t> { enum { value = true }; };
template <> struct hb_is_signed<int64_t> { enum { value = true }; };
template <> struct hb_is_signed<uint8_t> { enum { value = false }; };
template <> struct hb_is_signed<uint16_t> { enum { value = false }; };
template <> struct hb_is_signed<uint32_t> { enum { value = false }; };
template <> struct hb_is_signed<uint64_t> { enum { value = false }; };
template <typename T> static inline bool template <typename T> static inline bool
hb_in_range (T u, T lo, T hi) hb_in_range (T u, T lo, T hi)
{ {
@@ -482,7 +752,8 @@ static inline void sort_r_simple(void *base, size_t nel, size_t w,
} }
} }
static inline void hb_sort_r(void *base, size_t nel, size_t width, static inline void
hb_sort_r (void *base, size_t nel, size_t width,
int (*compar)(const void *_a, const void *_b, void *_arg), int (*compar)(const void *_a, const void *_b, void *_arg),
void *arg) void *arg)
{ {
@@ -490,8 +761,8 @@ static inline void hb_sort_r(void *base, size_t nel, size_t width,
} }
template <typename T, typename T2> static inline void template <typename T, typename T2, typename T3> static inline void
hb_stable_sort (T *array, unsigned int len, int(*compar)(const T *, const T *), T2 *array2) hb_stable_sort (T *array, unsigned int len, int(*compar)(const T2 *, const T2 *), T3 *array2)
{ {
for (unsigned int i = 1; i < len; i++) for (unsigned int i = 1; i < len; i++)
{ {
@@ -508,8 +779,8 @@ hb_stable_sort (T *array, unsigned int len, int(*compar)(const T *, const T *),
} }
if (array2) if (array2)
{ {
T2 t = array2[i]; T3 t = array2[i];
memmove (&array2[j + 1], &array2[j], (i - j) * sizeof (T2)); memmove (&array2[j + 1], &array2[j], (i - j) * sizeof (T3));
array2[j] = t; array2[j] = t;
} }
} }
@@ -526,7 +797,7 @@ hb_codepoint_parse (const char *s, unsigned int len, int base, hb_codepoint_t *o
{ {
/* Pain because we don't know whether s is nul-terminated. */ /* Pain because we don't know whether s is nul-terminated. */
char buf[64]; char buf[64];
len = MIN (ARRAY_LENGTH (buf) - 1, len); len = hb_min (ARRAY_LENGTH (buf) - 1, len);
strncpy (buf, s, len); strncpy (buf, s, len);
buf[len] = '\0'; buf[len] = '\0';
@@ -540,30 +811,83 @@ hb_codepoint_parse (const char *s, unsigned int len, int base, hb_codepoint_t *o
} }
struct HbOpOr /* Operators. */
{
static constexpr bool passthru_left = true; struct hb_bitwise_and
static constexpr bool passthru_right = true; { HB_PARTIALIZE(2);
template <typename T> static void process (T &o, const T &a, const T &b) { o = a | b; }
};
struct HbOpAnd
{
static constexpr bool passthru_left = false; static constexpr bool passthru_left = false;
static constexpr bool passthru_right = false; static constexpr bool passthru_right = false;
template <typename T> static void process (T &o, const T &a, const T &b) { o = a & b; } template <typename T> auto
}; operator () (const T &a, const T &b) const HB_AUTO_RETURN (a & b)
struct HbOpMinus }
{ HB_FUNCOBJ (hb_bitwise_and);
static constexpr bool passthru_left = true; struct hb_bitwise_or
static constexpr bool passthru_right = false; { HB_PARTIALIZE(2);
template <typename T> static void process (T &o, const T &a, const T &b) { o = a & ~b; }
};
struct HbOpXor
{
static constexpr bool passthru_left = true; static constexpr bool passthru_left = true;
static constexpr bool passthru_right = true; static constexpr bool passthru_right = true;
template <typename T> static void process (T &o, const T &a, const T &b) { o = a ^ b; } template <typename T> auto
}; operator () (const T &a, const T &b) const HB_AUTO_RETURN (a | b)
}
HB_FUNCOBJ (hb_bitwise_or);
struct hb_bitwise_xor
{ HB_PARTIALIZE(2);
static constexpr bool passthru_left = true;
static constexpr bool passthru_right = true;
template <typename T> auto
operator () (const T &a, const T &b) const HB_AUTO_RETURN (a ^ b)
}
HB_FUNCOBJ (hb_bitwise_xor);
struct hb_bitwise_sub
{ HB_PARTIALIZE(2);
static constexpr bool passthru_left = true;
static constexpr bool passthru_right = false;
template <typename T> auto
operator () (const T &a, const T &b) const HB_AUTO_RETURN (a & ~b)
}
HB_FUNCOBJ (hb_bitwise_sub);
struct
{ HB_PARTIALIZE(2);
template <typename T, typename T2> auto
operator () (const T &a, const T2 &b) const HB_AUTO_RETURN (a + b)
}
HB_FUNCOBJ (hb_add);
struct
{ HB_PARTIALIZE(2);
template <typename T, typename T2> auto
operator () (const T &a, const T2 &b) const HB_AUTO_RETURN (a - b)
}
HB_FUNCOBJ (hb_sub);
struct
{ HB_PARTIALIZE(2);
template <typename T, typename T2> auto
operator () (const T &a, const T2 &b) const HB_AUTO_RETURN (a * b)
}
HB_FUNCOBJ (hb_mul);
struct
{ HB_PARTIALIZE(2);
template <typename T, typename T2> auto
operator () (const T &a, const T2 &b) const HB_AUTO_RETURN (a / b)
}
HB_FUNCOBJ (hb_div);
struct
{ HB_PARTIALIZE(2);
template <typename T, typename T2> auto
operator () (const T &a, const T2 &b) const HB_AUTO_RETURN (a % b)
}
HB_FUNCOBJ (hb_mod);
struct
{
template <typename T> auto
operator () (const T &a) const HB_AUTO_RETURN (+a)
}
HB_FUNCOBJ (hb_pos);
struct
{
template <typename T> auto
operator () (const T &a) const HB_AUTO_RETURN (-a)
}
HB_FUNCOBJ (hb_neg);
/* Compiler-assisted vectorization. */ /* Compiler-assisted vectorization. */
@@ -579,26 +903,26 @@ struct hb_vector_size_t
void clear (unsigned char v = 0) { memset (this, v, sizeof (*this)); } void clear (unsigned char v = 0) { memset (this, v, sizeof (*this)); }
template <class Op> template <typename Op>
hb_vector_size_t process (const hb_vector_size_t &o) const hb_vector_size_t process (const Op& op, const hb_vector_size_t &o) const
{ {
hb_vector_size_t r; hb_vector_size_t r;
#if HB_VECTOR_SIZE #if HB_VECTOR_SIZE
if (HB_VECTOR_SIZE && 0 == (byte_size * 8) % HB_VECTOR_SIZE) if (HB_VECTOR_SIZE && 0 == (byte_size * 8) % HB_VECTOR_SIZE)
for (unsigned int i = 0; i < ARRAY_LENGTH (u.vec); i++) for (unsigned int i = 0; i < ARRAY_LENGTH (u.vec); i++)
Op::process (r.u.vec[i], u.vec[i], o.u.vec[i]); r.u.vec[i] = op (u.vec[i], o.u.vec[i]);
else else
#endif #endif
for (unsigned int i = 0; i < ARRAY_LENGTH (u.v); i++) for (unsigned int i = 0; i < ARRAY_LENGTH (u.v); i++)
Op::process (r.u.v[i], u.v[i], o.u.v[i]); r.u.v[i] = op (u.v[i], o.u.v[i]);
return r; return r;
} }
hb_vector_size_t operator | (const hb_vector_size_t &o) const hb_vector_size_t operator | (const hb_vector_size_t &o) const
{ return process<HbOpOr> (o); } { return process (hb_bitwise_or, o); }
hb_vector_size_t operator & (const hb_vector_size_t &o) const hb_vector_size_t operator & (const hb_vector_size_t &o) const
{ return process<HbOpAnd> (o); } { return process (hb_bitwise_and, o); }
hb_vector_size_t operator ^ (const hb_vector_size_t &o) const hb_vector_size_t operator ^ (const hb_vector_size_t &o) const
{ return process<HbOpXor> (o); } { return process (hb_bitwise_xor, o); }
hb_vector_size_t operator ~ () const hb_vector_size_t operator ~ () const
{ {
hb_vector_size_t r; hb_vector_size_t r;
@@ -624,4 +948,4 @@ struct hb_vector_size_t
}; };
#endif /* HB_DSALGS_HH */ #endif /* HB_ALGS_HH */

View File

@@ -28,7 +28,7 @@
#define HB_ARRAY_HH #define HB_ARRAY_HH
#include "hb.hh" #include "hb.hh"
#include "hb-dsalgs.hh" #include "hb-algs.hh"
#include "hb-iter.hh" #include "hb-iter.hh"
#include "hb-null.hh" #include "hb-null.hh"
@@ -37,22 +37,31 @@ template <typename Type>
struct hb_sorted_array_t; struct hb_sorted_array_t;
template <typename Type> template <typename Type>
struct hb_array_t : struct hb_array_t : hb_iter_with_fallback_t<hb_array_t<Type>, Type&>
hb_iter_t<hb_array_t<Type>, Type>,
hb_iter_mixin_t<hb_array_t<Type>, Type>
{ {
/* /*
* Constructors. * Constructors.
*/ */
hb_array_t () : arrayZ (nullptr), length (0) {} hb_array_t () : arrayZ (nullptr), length (0), backwards_length (0) {}
hb_array_t (Type *array_, unsigned int length_) : arrayZ (array_), length (length_) {} hb_array_t (Type *array_, unsigned int length_) : arrayZ (array_), length (length_), backwards_length (0) {}
template <unsigned int length_> hb_array_t (Type (&array_)[length_]) : arrayZ (array_), length (length_) {} template <unsigned int length_>
hb_array_t (Type (&array_)[length_]) : arrayZ (array_), length (length_), backwards_length (0) {}
template <typename U,
hb_enable_if (hb_is_cr_convertible(U, Type))>
hb_array_t (const hb_array_t<U> &o) :
hb_iter_with_fallback_t<hb_array_t<Type>, Type&> (),
arrayZ (o.arrayZ), length (o.length), backwards_length (o.backwards_length) {}
template <typename U,
hb_enable_if (hb_is_cr_convertible(U, Type))>
hb_array_t& operator = (const hb_array_t<U> &o)
{ arrayZ = o.arrayZ; length = o.length; backwards_length = o.backwards_length; return *this; }
/* /*
* Iterator implementation. * Iterator implementation.
*/ */
typedef Type __item_type__; typedef Type& __item_t__;
static constexpr bool is_random_access_iterator = true;
Type& __item_at__ (unsigned i) const Type& __item_at__ (unsigned i) const
{ {
if (unlikely (i >= length)) return CrapOrNull (Type); if (unlikely (i >= length)) return CrapOrNull (Type);
@@ -63,16 +72,25 @@ struct hb_array_t :
if (unlikely (n > length)) if (unlikely (n > length))
n = length; n = length;
length -= n; length -= n;
backwards_length += n;
arrayZ += n; arrayZ += n;
} }
void __rewind__ (unsigned n) void __rewind__ (unsigned n)
{ {
if (unlikely (n > length)) if (unlikely (n > backwards_length))
n = length; n = backwards_length;
length -= n; length += n;
backwards_length -= n;
arrayZ -= n;
} }
unsigned __len__ () const { return length; } unsigned __len__ () const { return length; }
bool __random_access__ () const { return true; } /* Ouch. The operator== compares the contents of the array. For range-based for loops,
* it's best if we can just compare arrayZ, though comparing contents is still fast,
* but also would require that Type has operator==. As such, we optimize this operator
* for range-based for loop and just compare arrayZ. No need to compare length, as we
* assume we're only compared to .end(). */
bool operator != (const hb_array_t& o) const
{ return arrayZ != o.arrayZ; }
/* Extra operators. /* Extra operators.
*/ */
@@ -80,6 +98,9 @@ struct hb_array_t :
operator hb_array_t<const Type> () { return hb_array_t<const Type> (arrayZ, length); } operator hb_array_t<const Type> () { return hb_array_t<const Type> (arrayZ, length); }
template <typename T> operator T * () const { return arrayZ; } template <typename T> operator T * () const { return arrayZ; }
HB_INTERNAL bool operator == (const hb_array_t &o) const;
HB_INTERNAL uint32_t hash () const;
/* /*
* Compare, Sort, and Search. * Compare, Sort, and Search.
*/ */
@@ -91,7 +112,7 @@ struct hb_array_t :
return (int) a.length - (int) length; return (int) a.length - (int) length;
return hb_memcmp (a.arrayZ, arrayZ, get_size ()); return hb_memcmp (a.arrayZ, arrayZ, get_size ());
} }
static int cmp (const void *pa, const void *pb) HB_INTERNAL static int cmp (const void *pa, const void *pb)
{ {
hb_array_t<Type> *a = (hb_array_t<Type> *) pa; hb_array_t<Type> *a = (hb_array_t<Type> *) pa;
hb_array_t<Type> *b = (hb_array_t<Type> *) pb; hb_array_t<Type> *b = (hb_array_t<Type> *) pb;
@@ -131,7 +152,7 @@ struct hb_array_t :
} }
void qsort (unsigned int start, unsigned int end) void qsort (unsigned int start, unsigned int end)
{ {
end = MIN (end, length); end = hb_min (end, length);
assert (start <= end); assert (start <= end);
if (likely (start < end)) if (likely (start < end))
::qsort (arrayZ + start, end - start, this->item_size, Type::cmp); ::qsort (arrayZ + start, end - start, this->item_size, Type::cmp);
@@ -154,7 +175,7 @@ struct hb_array_t :
else else
count -= start_offset; count -= start_offset;
if (seg_count) if (seg_count)
count = *seg_count = MIN (count, *seg_count); count = *seg_count = hb_min (count, *seg_count);
return hb_array_t<Type> (arrayZ + start_offset, count); return hb_array_t<Type> (arrayZ + start_offset, count);
} }
hb_array_t<Type> sub_array (unsigned int start_offset, unsigned int seg_count) const hb_array_t<Type> sub_array (unsigned int start_offset, unsigned int seg_count) const
@@ -164,6 +185,17 @@ struct hb_array_t :
void free () void free ()
{ ::free ((void *) arrayZ); arrayZ = nullptr; length = 0; } { ::free ((void *) arrayZ); arrayZ = nullptr; length = 0; }
template <typename hb_serialize_context_t>
hb_array_t copy (hb_serialize_context_t *c) const
{
TRACE_SERIALIZE (this);
auto* out = c->start_embed (arrayZ);
if (unlikely (!c->extend_size (out, get_size ()))) return_trace (hb_array_t ());
for (unsigned i = 0; i < length; i++)
out[i] = arrayZ[i]; /* TODO: add version that calls c->copy() */
return_trace (hb_array_t (out, length));
}
template <typename hb_sanitize_context_t> template <typename hb_sanitize_context_t>
bool sanitize (hb_sanitize_context_t *c) const bool sanitize (hb_sanitize_context_t *c) const
{ return c->check_array (arrayZ, length); } { return c->check_array (arrayZ, length); }
@@ -175,6 +207,7 @@ struct hb_array_t :
public: public:
Type *arrayZ; Type *arrayZ;
unsigned int length; unsigned int length;
unsigned int backwards_length;
}; };
template <typename T> inline hb_array_t<T> template <typename T> inline hb_array_t<T>
hb_array (T *array, unsigned int length) hb_array (T *array, unsigned int length)
@@ -183,7 +216,6 @@ template <typename T, unsigned int length_> inline hb_array_t<T>
hb_array (T (&array_)[length_]) hb_array (T (&array_)[length_])
{ return hb_array_t<T> (array_); } { return hb_array_t<T> (array_); }
enum hb_bfind_not_found_t enum hb_bfind_not_found_t
{ {
HB_BFIND_NOT_FOUND_DONT_STORE, HB_BFIND_NOT_FOUND_DONT_STORE,
@@ -193,14 +225,32 @@ enum hb_bfind_not_found_t
template <typename Type> template <typename Type>
struct hb_sorted_array_t : struct hb_sorted_array_t :
hb_sorted_iter_t<hb_sorted_array_t<Type>, Type>, hb_iter_t<hb_sorted_array_t<Type>, Type&>,
hb_array_t<Type>, hb_array_t<Type>
hb_iter_mixin_t<hb_sorted_array_t<Type>, Type>
{ {
typedef hb_iter_t<hb_sorted_array_t<Type>, Type&> iter_base_t;
HB_ITER_USING (iter_base_t);
static constexpr bool is_random_access_iterator = true;
static constexpr bool is_sorted_iterator = true;
hb_sorted_array_t () : hb_array_t<Type> () {} hb_sorted_array_t () : hb_array_t<Type> () {}
hb_sorted_array_t (const hb_array_t<Type> &o) : hb_array_t<Type> (o) {}
hb_sorted_array_t (Type *array_, unsigned int length_) : hb_array_t<Type> (array_, length_) {} hb_sorted_array_t (Type *array_, unsigned int length_) : hb_array_t<Type> (array_, length_) {}
template <unsigned int length_> hb_sorted_array_t (Type (&array_)[length_]) : hb_array_t<Type> (array_) {} template <unsigned int length_>
hb_sorted_array_t (Type (&array_)[length_]) : hb_array_t<Type> (array_) {}
template <typename U,
hb_enable_if (hb_is_cr_convertible(U, Type))>
hb_sorted_array_t (const hb_array_t<U> &o) :
hb_iter_t<hb_sorted_array_t<Type>, Type&> (),
hb_array_t<Type> (o) {}
template <typename U,
hb_enable_if (hb_is_cr_convertible(U, Type))>
hb_sorted_array_t& operator = (const hb_array_t<U> &o)
{ hb_array_t<Type> (*this) = o; return *this; }
/* Iterator implementation. */
bool operator != (const hb_sorted_array_t& o) const
{ return this->arrayZ != o.arrayZ || this->length != o.length; }
hb_sorted_array_t<Type> sub_array (unsigned int start_offset, unsigned int *seg_count /* IN/OUT */) const hb_sorted_array_t<Type> sub_array (unsigned int start_offset, unsigned int *seg_count /* IN/OUT */) const
{ return hb_sorted_array_t<Type> (((const hb_array_t<Type> *) (this))->sub_array (start_offset, seg_count)); } { return hb_sorted_array_t<Type> (((const hb_array_t<Type> *) (this))->sub_array (start_offset, seg_count)); }
@@ -269,9 +319,30 @@ template <typename T, unsigned int length_> inline hb_sorted_array_t<T>
hb_sorted_array (T (&array_)[length_]) hb_sorted_array (T (&array_)[length_])
{ return hb_sorted_array_t<T> (array_); } { return hb_sorted_array_t<T> (array_); }
template <typename T>
bool hb_array_t<T>::operator == (const hb_array_t<T> &o) const
{
return length == o.length &&
+ hb_zip (*this, o)
| hb_map ([] (hb_pair_t<T&, T&> &&_) { return _.first == _.second; })
| hb_all
;
}
template <typename T>
uint32_t hb_array_t<T>::hash () const
{
return
+ hb_iter (*this)
| hb_map (hb_hash)
| hb_reduce ([] (uint32_t a, uint32_t b) { return a * 31 + b; }, 0)
;
}
typedef hb_array_t<const char> hb_bytes_t; typedef hb_array_t<const char> hb_bytes_t;
typedef hb_array_t<const unsigned char> hb_ubytes_t; typedef hb_array_t<const unsigned char> hb_ubytes_t;
/* TODO Specialize opeator==/hash() for hb_bytes_t and hb_ubytes_t. */
//template <>
//uint32_t hb_array_t<const char>::hash () const { return 0; }
#endif /* HB_ARRAY_HH */ #endif /* HB_ARRAY_HH */

View File

@@ -33,6 +33,7 @@
#define HB_ATOMIC_HH #define HB_ATOMIC_HH
#include "hb.hh" #include "hb.hh"
#include "hb-meta.hh"
/* /*
@@ -106,7 +107,7 @@ _hb_atomic_ptr_impl_cmplexch (const void **P, const void *O_, const void *N)
static inline void _hb_memory_barrier () static inline void _hb_memory_barrier ()
{ {
#if !defined(MemoryBarrier) #ifndef MemoryBarrier
/* MinGW has a convoluted history of supporting MemoryBarrier. */ /* MinGW has a convoluted history of supporting MemoryBarrier. */
LONG dummy = 0; LONG dummy = 0;
InterlockedExchange (&dummy, 1); InterlockedExchange (&dummy, 1);
@@ -215,7 +216,7 @@ static_assert ((sizeof (long) == sizeof (void *)), "");
#define HB_ATOMIC_INT_NIL 1 /* Warn that fallback implementation is in use. */ #define HB_ATOMIC_INT_NIL 1 /* Warn that fallback implementation is in use. */
#define _hb_memory_barrier() #define _hb_memory_barrier() do {} while (0)
#define hb_atomic_int_impl_add(AI, V) ((*(AI) += (V)) - (V)) #define hb_atomic_int_impl_add(AI, V) ((*(AI) += (V)) - (V))
@@ -226,7 +227,7 @@ static_assert ((sizeof (long) == sizeof (void *)), "");
#define hb_atomic_int_impl_add(AI, V) ((*(AI) += (V)) - (V)) #define hb_atomic_int_impl_add(AI, V) ((*(AI) += (V)) - (V))
#define _hb_memory_barrier() #define _hb_memory_barrier() do {} while (0)
#define hb_atomic_ptr_impl_cmpexch(P,O,N) (* (void **) (P) == (void *) (O) ? (* (void **) (P) = (void *) (N), true) : false) #define hb_atomic_ptr_impl_cmpexch(P,O,N) (* (void **) (P) == (void *) (O) ? (* (void **) (P) = (void *) (N), true) : false)
@@ -282,7 +283,7 @@ struct hb_atomic_int_t
template <typename P> template <typename P>
struct hb_atomic_ptr_t struct hb_atomic_ptr_t
{ {
typedef typename hb_remove_pointer (P) T; typedef hb_remove_pointer<P> T;
void init (T* v_ = nullptr) { set_relaxed (v_); } void init (T* v_ = nullptr) { set_relaxed (v_); }
void set_relaxed (T* v_) { hb_atomic_ptr_impl_set_relaxed (&v, v_); } void set_relaxed (T* v_) { hb_atomic_ptr_impl_set_relaxed (&v, v_); }

View File

@@ -30,7 +30,7 @@
* http://www.gnu.org/software/libc/manual/html_node/Feature-Test-Macros.html * http://www.gnu.org/software/libc/manual/html_node/Feature-Test-Macros.html
* https://www.oracle.com/technetwork/articles/servers-storage-dev/standardheaderfiles-453865.html * https://www.oracle.com/technetwork/articles/servers-storage-dev/standardheaderfiles-453865.html
*/ */
#ifndef _POSIX_C_SOURCE #if !defined(_POSIX_C_SOURCE) && !defined(_MSC_VER) && !defined(__NetBSD__)
#pragma GCC diagnostic push #pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-macros" #pragma GCC diagnostic ignored "-Wunused-macros"
#define _POSIX_C_SOURCE 200809L #define _POSIX_C_SOURCE 200809L
@@ -155,7 +155,7 @@ hb_blob_create_sub_blob (hb_blob_t *parent,
hb_blob_make_immutable (parent); hb_blob_make_immutable (parent);
blob = hb_blob_create (parent->data + offset, blob = hb_blob_create (parent->data + offset,
MIN (length, parent->length - offset), hb_min (length, parent->length - offset),
HB_MEMORY_MODE_READONLY, HB_MEMORY_MODE_READONLY,
hb_blob_reference (parent), hb_blob_reference (parent),
_hb_blob_destroy); _hb_blob_destroy);

View File

@@ -71,6 +71,9 @@ hb_blob_create (const char *data,
void *user_data, void *user_data,
hb_destroy_func_t destroy); hb_destroy_func_t destroy);
HB_EXTERN hb_blob_t *
hb_blob_create_from_file (const char *file_name);
/* Always creates with MEMORY_MODE_READONLY. /* Always creates with MEMORY_MODE_READONLY.
* Even if the parent blob is writable, we don't * Even if the parent blob is writable, we don't
* want the user of the sub-blob to be able to * want the user of the sub-blob to be able to
@@ -123,9 +126,6 @@ hb_blob_get_data (hb_blob_t *blob, unsigned int *length);
HB_EXTERN char * HB_EXTERN char *
hb_blob_get_data_writable (hb_blob_t *blob, unsigned int *length); hb_blob_get_data_writable (hb_blob_t *blob, unsigned int *length);
HB_EXTERN hb_blob_t *
hb_blob_create_from_file (const char *file_name);
HB_END_DECLS HB_END_DECLS
#endif /* HB_BLOB_H */ #endif /* HB_BLOB_H */

View File

@@ -81,7 +81,7 @@ struct hb_blob_t
template <typename P> template <typename P>
struct hb_blob_ptr_t struct hb_blob_ptr_t
{ {
typedef typename hb_remove_pointer (P) T; typedef hb_remove_pointer<P> T;
hb_blob_ptr_t (hb_blob_t *b_ = nullptr) : b (b_) {} hb_blob_ptr_t (hb_blob_t *b_ = nullptr) : b (b_) {}
hb_blob_t * operator = (hb_blob_t *b_) { return b = b_; } hb_blob_t * operator = (hb_blob_t *b_) { return b = b_; }

View File

@@ -28,8 +28,10 @@
static const char *serialize_formats[] = { static const char *serialize_formats[] = {
#ifndef HB_NO_BUFFER_SERIALIZE
"text", "text",
"json", "json",
#endif
nullptr nullptr
}; };
@@ -85,10 +87,12 @@ hb_buffer_serialize_format_from_string (const char *str, int len)
const char * const char *
hb_buffer_serialize_format_to_string (hb_buffer_serialize_format_t format) hb_buffer_serialize_format_to_string (hb_buffer_serialize_format_t format)
{ {
switch (format) switch ((unsigned) format)
{ {
#ifndef HB_NO_BUFFER_SERIALIZE
case HB_BUFFER_SERIALIZE_FORMAT_TEXT: return serialize_formats[0]; case HB_BUFFER_SERIALIZE_FORMAT_TEXT: return serialize_formats[0];
case HB_BUFFER_SERIALIZE_FORMAT_JSON: return serialize_formats[1]; case HB_BUFFER_SERIALIZE_FORMAT_JSON: return serialize_formats[1];
#endif
default: default:
case HB_BUFFER_SERIALIZE_FORMAT_INVALID: return nullptr; case HB_BUFFER_SERIALIZE_FORMAT_INVALID: return nullptr;
} }
@@ -138,34 +142,34 @@ _hb_buffer_serialize_glyphs_json (hb_buffer_t *buffer,
*p++ = '"'; *p++ = '"';
} }
else else
p += MAX (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), "%u", info[i].codepoint)); p += hb_max (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), "%u", info[i].codepoint));
if (!(flags & HB_BUFFER_SERIALIZE_FLAG_NO_CLUSTERS)) { if (!(flags & HB_BUFFER_SERIALIZE_FLAG_NO_CLUSTERS)) {
p += MAX (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), ",\"cl\":%u", info[i].cluster)); p += hb_max (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), ",\"cl\":%u", info[i].cluster));
} }
if (!(flags & HB_BUFFER_SERIALIZE_FLAG_NO_POSITIONS)) if (!(flags & HB_BUFFER_SERIALIZE_FLAG_NO_POSITIONS))
{ {
p += MAX (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), ",\"dx\":%d,\"dy\":%d", p += hb_max (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), ",\"dx\":%d,\"dy\":%d",
x+pos[i].x_offset, y+pos[i].y_offset)); x+pos[i].x_offset, y+pos[i].y_offset));
if (!(flags & HB_BUFFER_SERIALIZE_FLAG_NO_ADVANCES)) if (!(flags & HB_BUFFER_SERIALIZE_FLAG_NO_ADVANCES))
p += MAX (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), ",\"ax\":%d,\"ay\":%d", p += hb_max (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), ",\"ax\":%d,\"ay\":%d",
pos[i].x_advance, pos[i].y_advance)); pos[i].x_advance, pos[i].y_advance));
} }
if (flags & HB_BUFFER_SERIALIZE_FLAG_GLYPH_FLAGS) if (flags & HB_BUFFER_SERIALIZE_FLAG_GLYPH_FLAGS)
{ {
if (info[i].mask & HB_GLYPH_FLAG_DEFINED) if (info[i].mask & HB_GLYPH_FLAG_DEFINED)
p += MAX (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), ",\"fl\":%u", info[i].mask & HB_GLYPH_FLAG_DEFINED)); p += hb_max (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), ",\"fl\":%u", info[i].mask & HB_GLYPH_FLAG_DEFINED));
} }
if (flags & HB_BUFFER_SERIALIZE_FLAG_GLYPH_EXTENTS) if (flags & HB_BUFFER_SERIALIZE_FLAG_GLYPH_EXTENTS)
{ {
hb_glyph_extents_t extents; hb_glyph_extents_t extents;
hb_font_get_glyph_extents(font, info[i].codepoint, &extents); hb_font_get_glyph_extents(font, info[i].codepoint, &extents);
p += MAX (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), ",\"xb\":%d,\"yb\":%d", p += hb_max (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), ",\"xb\":%d,\"yb\":%d",
extents.x_bearing, extents.y_bearing)); extents.x_bearing, extents.y_bearing));
p += MAX (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), ",\"w\":%d,\"h\":%d", p += hb_max (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), ",\"w\":%d,\"h\":%d",
extents.width, extents.height)); extents.width, extents.height));
} }
@@ -224,37 +228,37 @@ _hb_buffer_serialize_glyphs_text (hb_buffer_t *buffer,
p += strlen (p); p += strlen (p);
} }
else else
p += MAX (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), "%u", info[i].codepoint)); p += hb_max (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), "%u", info[i].codepoint));
if (!(flags & HB_BUFFER_SERIALIZE_FLAG_NO_CLUSTERS)) { if (!(flags & HB_BUFFER_SERIALIZE_FLAG_NO_CLUSTERS)) {
p += MAX (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), "=%u", info[i].cluster)); p += hb_max (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), "=%u", info[i].cluster));
} }
if (!(flags & HB_BUFFER_SERIALIZE_FLAG_NO_POSITIONS)) if (!(flags & HB_BUFFER_SERIALIZE_FLAG_NO_POSITIONS))
{ {
if (x+pos[i].x_offset || y+pos[i].y_offset) if (x+pos[i].x_offset || y+pos[i].y_offset)
p += MAX (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), "@%d,%d", x+pos[i].x_offset, y+pos[i].y_offset)); p += hb_max (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), "@%d,%d", x+pos[i].x_offset, y+pos[i].y_offset));
if (!(flags & HB_BUFFER_SERIALIZE_FLAG_NO_ADVANCES)) if (!(flags & HB_BUFFER_SERIALIZE_FLAG_NO_ADVANCES))
{ {
*p++ = '+'; *p++ = '+';
p += MAX (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), "%d", pos[i].x_advance)); p += hb_max (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), "%d", pos[i].x_advance));
if (pos[i].y_advance) if (pos[i].y_advance)
p += MAX (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), ",%d", pos[i].y_advance)); p += hb_max (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), ",%d", pos[i].y_advance));
} }
} }
if (flags & HB_BUFFER_SERIALIZE_FLAG_GLYPH_FLAGS) if (flags & HB_BUFFER_SERIALIZE_FLAG_GLYPH_FLAGS)
{ {
if (info[i].mask & HB_GLYPH_FLAG_DEFINED) if (info[i].mask & HB_GLYPH_FLAG_DEFINED)
p += MAX (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), "#%X", info[i].mask &HB_GLYPH_FLAG_DEFINED)); p += hb_max (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), "#%X", info[i].mask &HB_GLYPH_FLAG_DEFINED));
} }
if (flags & HB_BUFFER_SERIALIZE_FLAG_GLYPH_EXTENTS) if (flags & HB_BUFFER_SERIALIZE_FLAG_GLYPH_EXTENTS)
{ {
hb_glyph_extents_t extents; hb_glyph_extents_t extents;
hb_font_get_glyph_extents(font, info[i].codepoint, &extents); hb_font_get_glyph_extents(font, info[i].codepoint, &extents);
p += MAX (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), "<%d,%d,%d,%d>", extents.x_bearing, extents.y_bearing, extents.width, extents.height)); p += hb_max (0, snprintf (p, ARRAY_LENGTH (b) - (p - b), "<%d,%d,%d,%d>", extents.x_bearing, extents.y_bearing, extents.width, extents.height));
} }
unsigned int l = p - b; unsigned int l = p - b;
@@ -344,6 +348,10 @@ hb_buffer_serialize_glyphs (hb_buffer_t *buffer,
if (buf_size) if (buf_size)
*buf = '\0'; *buf = '\0';
#ifdef HB_NO_BUFFER_SERIALIZE
return 0;
#endif
assert ((!buffer->len && buffer->content_type == HB_BUFFER_CONTENT_TYPE_INVALID) || assert ((!buffer->len && buffer->content_type == HB_BUFFER_CONTENT_TYPE_INVALID) ||
buffer->content_type == HB_BUFFER_CONTENT_TYPE_GLYPHS); buffer->content_type == HB_BUFFER_CONTENT_TYPE_GLYPHS);
@@ -380,7 +388,7 @@ static hb_bool_t
parse_uint (const char *pp, const char *end, uint32_t *pv) parse_uint (const char *pp, const char *end, uint32_t *pv)
{ {
char buf[32]; char buf[32];
unsigned int len = MIN (ARRAY_LENGTH (buf) - 1, (unsigned int) (end - pp)); unsigned int len = hb_min (ARRAY_LENGTH (buf) - 1, (unsigned int) (end - pp));
strncpy (buf, pp, len); strncpy (buf, pp, len);
buf[len] = '\0'; buf[len] = '\0';
@@ -401,7 +409,7 @@ static hb_bool_t
parse_int (const char *pp, const char *end, int32_t *pv) parse_int (const char *pp, const char *end, int32_t *pv)
{ {
char buf[32]; char buf[32];
unsigned int len = MIN (ARRAY_LENGTH (buf) - 1, (unsigned int) (end - pp)); unsigned int len = hb_min (ARRAY_LENGTH (buf) - 1, (unsigned int) (end - pp));
strncpy (buf, pp, len); strncpy (buf, pp, len);
buf[len] = '\0'; buf[len] = '\0';
@@ -449,6 +457,10 @@ hb_buffer_deserialize_glyphs (hb_buffer_t *buffer,
end_ptr = &end; end_ptr = &end;
*end_ptr = buf; *end_ptr = buf;
#ifdef HB_NO_BUFFER_SERIALIZE
return false;
#endif
assert ((!buffer->len && buffer->content_type == HB_BUFFER_CONTENT_TYPE_INVALID) || assert ((!buffer->len && buffer->content_type == HB_BUFFER_CONTENT_TYPE_INVALID) ||
buffer->content_type == HB_BUFFER_CONTENT_TYPE_GLYPHS); buffer->content_type == HB_BUFFER_CONTENT_TYPE_GLYPHS);

View File

@@ -524,7 +524,7 @@ hb_buffer_t::merge_clusters_impl (unsigned int start,
unsigned int cluster = info[start].cluster; unsigned int cluster = info[start].cluster;
for (unsigned int i = start + 1; i < end; i++) for (unsigned int i = start + 1; i < end; i++)
cluster = MIN<unsigned int> (cluster, info[i].cluster); cluster = hb_min (cluster, info[i].cluster);
/* Extend end */ /* Extend end */
while (end < len && info[end - 1].cluster == info[end].cluster) while (end < len && info[end - 1].cluster == info[end].cluster)
@@ -555,7 +555,7 @@ hb_buffer_t::merge_out_clusters (unsigned int start,
unsigned int cluster = out_info[start].cluster; unsigned int cluster = out_info[start].cluster;
for (unsigned int i = start + 1; i < end; i++) for (unsigned int i = start + 1; i < end; i++)
cluster = MIN<unsigned int> (cluster, out_info[i].cluster); cluster = hb_min (cluster, out_info[i].cluster);
/* Extend start */ /* Extend start */
while (start && out_info[start - 1].cluster == out_info[start].cluster) while (start && out_info[start - 1].cluster == out_info[start].cluster)

View File

@@ -379,7 +379,7 @@ struct hb_buffer_t
unsigned int cluster) const unsigned int cluster) const
{ {
for (unsigned int i = start; i < end; i++) for (unsigned int i = start; i < end; i++)
cluster = MIN<unsigned int> (cluster, infos[i].cluster); cluster = hb_min (cluster, infos[i].cluster);
return cluster; return cluster;
} }
void void

View File

@@ -272,11 +272,11 @@ struct UnsizedByteStr : UnsizedArrayOf <HBUINT8>
HBUINT8 *p = c->allocate_size<HBUINT8> (1); HBUINT8 *p = c->allocate_size<HBUINT8> (1);
if (unlikely (p == nullptr)) return_trace (false); if (unlikely (p == nullptr)) return_trace (false);
p->set (intOp); *p = intOp;
INTTYPE *ip = c->allocate_size<INTTYPE> (INTTYPE::static_size); INTTYPE *ip = c->allocate_size<INTTYPE> (INTTYPE::static_size);
if (unlikely (ip == nullptr)) return_trace (false); if (unlikely (ip == nullptr)) return_trace (false);
ip->set ((unsigned int)value); *ip = (unsigned int) value;
return_trace (true); return_trace (true);
} }
@@ -691,7 +691,7 @@ struct opset_t
case OpCode_TwoByteNegInt0: case OpCode_TwoByteNegInt1: case OpCode_TwoByteNegInt0: case OpCode_TwoByteNegInt1:
case OpCode_TwoByteNegInt2: case OpCode_TwoByteNegInt3: case OpCode_TwoByteNegInt2: case OpCode_TwoByteNegInt3:
env.argStack.push_int ((int16_t)(-(op - OpCode_TwoByteNegInt0) * 256 - env.str_ref[0] - 108)); env.argStack.push_int ((-(int16_t)(op - OpCode_TwoByteNegInt0) * 256 - env.str_ref[0] - 108));
env.str_ref.inc (); env.str_ref.inc ();
break; break;

View File

@@ -147,8 +147,9 @@ struct cs_interp_env_t : interp_env_t<ARG>
return callStack.in_error () || SUPER::in_error (); return callStack.in_error () || SUPER::in_error ();
} }
bool popSubrNum (const biased_subrs_t<SUBRS>& biasedSubrs, unsigned int &subr_num) bool pop_subr_num (const biased_subrs_t<SUBRS>& biasedSubrs, unsigned int &subr_num)
{ {
subr_num = 0;
int n = SUPER::argStack.pop_int (); int n = SUPER::argStack.pop_int ();
n += biasedSubrs.get_bias (); n += biasedSubrs.get_bias ();
if (unlikely ((n < 0) || ((unsigned int)n >= biasedSubrs.get_count ()))) if (unlikely ((n < 0) || ((unsigned int)n >= biasedSubrs.get_count ())))
@@ -158,11 +159,11 @@ struct cs_interp_env_t : interp_env_t<ARG>
return true; return true;
} }
void callSubr (const biased_subrs_t<SUBRS>& biasedSubrs, cs_type_t type) void call_subr (const biased_subrs_t<SUBRS>& biasedSubrs, cs_type_t type)
{ {
unsigned int subr_num; unsigned int subr_num = 0;
if (unlikely (!popSubrNum (biasedSubrs, subr_num) if (unlikely (!pop_subr_num (biasedSubrs, subr_num)
|| callStack.get_count () >= kMaxCallLimit)) || callStack.get_count () >= kMaxCallLimit))
{ {
SUPER::set_error (); SUPER::set_error ();
@@ -175,7 +176,7 @@ struct cs_interp_env_t : interp_env_t<ARG>
SUPER::str_ref = context.str_ref; SUPER::str_ref = context.str_ref;
} }
void returnFromSubr () void return_from_subr ()
{ {
if (unlikely (SUPER::str_ref.in_error ())) if (unlikely (SUPER::str_ref.in_error ()))
SUPER::set_error (); SUPER::set_error ();
@@ -254,7 +255,7 @@ struct cs_opset_t : opset_t<ARG>
switch (op) { switch (op) {
case OpCode_return: case OpCode_return:
env.returnFromSubr (); env.return_from_subr ();
break; break;
case OpCode_endchar: case OpCode_endchar:
OPSET::check_width (op, env, param); OPSET::check_width (op, env, param);
@@ -267,11 +268,11 @@ struct cs_opset_t : opset_t<ARG>
break; break;
case OpCode_callsubr: case OpCode_callsubr:
env.callSubr (env.localSubrs, CSType_LocalSubr); env.call_subr (env.localSubrs, CSType_LocalSubr);
break; break;
case OpCode_callgsubr: case OpCode_callgsubr:
env.callSubr (env.globalSubrs, CSType_GlobalSubr); env.call_subr (env.globalSubrs, CSType_GlobalSubr);
break; break;
case OpCode_hstem: case OpCode_hstem:

View File

@@ -67,7 +67,7 @@ _hb_options_init ()
p = c + strlen (c); p = c + strlen (c);
#define OPTION(name, symbol) \ #define OPTION(name, symbol) \
if (0 == strncmp (c, name, p - c) && strlen (name) == p - c) u.opts.symbol = true; if (0 == strncmp (c, name, p - c) && strlen (name) == p - c) do { u.opts.symbol = true; } while (0)
OPTION ("uniscribe-bug-compatible", uniscribe_bug_compatible); OPTION ("uniscribe-bug-compatible", uniscribe_bug_compatible);
OPTION ("aat", aat); OPTION ("aat", aat);
@@ -356,7 +356,7 @@ hb_language_from_string (const char *str, int len)
{ {
/* NUL-terminate it. */ /* NUL-terminate it. */
char strbuf[64]; char strbuf[64];
len = MIN (len, (int) sizeof (strbuf) - 1); len = hb_min (len, (int) sizeof (strbuf) - 1);
memcpy (strbuf, str, len); memcpy (strbuf, str, len);
strbuf[len] = '\0'; strbuf[len] = '\0';
item = lang_find_or_insert (strbuf); item = lang_find_or_insert (strbuf);
@@ -488,7 +488,7 @@ hb_script_from_string (const char *str, int len)
/** /**
* hb_script_to_iso15924_tag: * hb_script_to_iso15924_tag:
* @script: an #hb_script_ to convert. * @script: an #hb_script_t to convert.
* *
* See hb_script_from_iso15924_tag(). * See hb_script_from_iso15924_tag().
* *
@@ -720,7 +720,7 @@ static bool
parse_uint (const char **pp, const char *end, unsigned int *pv) parse_uint (const char **pp, const char *end, unsigned int *pv)
{ {
char buf[32]; char buf[32];
unsigned int len = MIN (ARRAY_LENGTH (buf) - 1, (unsigned int) (end - *pp)); unsigned int len = hb_min (ARRAY_LENGTH (buf) - 1, (unsigned int) (end - *pp));
strncpy (buf, *pp, len); strncpy (buf, *pp, len);
buf[len] = '\0'; buf[len] = '\0';
@@ -744,7 +744,7 @@ static bool
parse_uint32 (const char **pp, const char *end, uint32_t *pv) parse_uint32 (const char **pp, const char *end, uint32_t *pv)
{ {
char buf[32]; char buf[32];
unsigned int len = MIN (ARRAY_LENGTH (buf) - 1, (unsigned int) (end - *pp)); unsigned int len = hb_min (ARRAY_LENGTH (buf) - 1, (unsigned int) (end - *pp));
strncpy (buf, *pp, len); strncpy (buf, *pp, len);
buf[len] = '\0'; buf[len] = '\0';
@@ -783,7 +783,7 @@ parse_uint32 (const char **pp, const char *end, uint32_t *pv)
static void free_static_C_locale (); static void free_static_C_locale ();
#endif #endif
static struct hb_C_locale_lazy_loader_t : hb_lazy_loader_t<hb_remove_pointer (HB_LOCALE_T), static struct hb_C_locale_lazy_loader_t : hb_lazy_loader_t<hb_remove_pointer<HB_LOCALE_T>,
hb_C_locale_lazy_loader_t> hb_C_locale_lazy_loader_t>
{ {
static HB_LOCALE_T create () static HB_LOCALE_T create ()
@@ -825,7 +825,7 @@ static bool
parse_float (const char **pp, const char *end, float *pv) parse_float (const char **pp, const char *end, float *pv)
{ {
char buf[32]; char buf[32];
unsigned int len = MIN (ARRAY_LENGTH (buf) - 1, (unsigned int) (end - *pp)); unsigned int len = hb_min (ARRAY_LENGTH (buf) - 1, (unsigned int) (end - *pp));
strncpy (buf, *pp, len); strncpy (buf, *pp, len);
buf[len] = '\0'; buf[len] = '\0';
@@ -1071,21 +1071,21 @@ hb_feature_to_string (hb_feature_t *feature,
{ {
s[len++] = '['; s[len++] = '[';
if (feature->start) if (feature->start)
len += MAX (0, snprintf (s + len, ARRAY_LENGTH (s) - len, "%u", feature->start)); len += hb_max (0, snprintf (s + len, ARRAY_LENGTH (s) - len, "%u", feature->start));
if (feature->end != feature->start + 1) { if (feature->end != feature->start + 1) {
s[len++] = ':'; s[len++] = ':';
if (feature->end != (unsigned int) -1) if (feature->end != (unsigned int) -1)
len += MAX (0, snprintf (s + len, ARRAY_LENGTH (s) - len, "%u", feature->end)); len += hb_max (0, snprintf (s + len, ARRAY_LENGTH (s) - len, "%u", feature->end));
} }
s[len++] = ']'; s[len++] = ']';
} }
if (feature->value > 1) if (feature->value > 1)
{ {
s[len++] = '='; s[len++] = '=';
len += MAX (0, snprintf (s + len, ARRAY_LENGTH (s) - len, "%u", feature->value)); len += hb_max (0, snprintf (s + len, ARRAY_LENGTH (s) - len, "%u", feature->value));
} }
assert (len < ARRAY_LENGTH (s)); assert (len < ARRAY_LENGTH (s));
len = MIN (len, size - 1); len = hb_min (len, size - 1);
memcpy (buf, s, len); memcpy (buf, s, len);
buf[len] = '\0'; buf[len] = '\0';
} }
@@ -1152,14 +1152,71 @@ hb_variation_to_string (hb_variation_t *variation,
while (len && s[len - 1] == ' ') while (len && s[len - 1] == ' ')
len--; len--;
s[len++] = '='; s[len++] = '=';
len += MAX (0, snprintf (s + len, ARRAY_LENGTH (s) - len, "%g", (double) variation->value)); len += hb_max (0, snprintf (s + len, ARRAY_LENGTH (s) - len, "%g", (double) variation->value));
assert (len < ARRAY_LENGTH (s)); assert (len < ARRAY_LENGTH (s));
len = MIN (len, size - 1); len = hb_min (len, size - 1);
memcpy (buf, s, len); memcpy (buf, s, len);
buf[len] = '\0'; buf[len] = '\0';
} }
/**
* hb_color_get_alpha:
* color: a #hb_color_t we are interested in its channels.
*
* Return value: Alpha channel value of the given color
*
* Since: 2.1.0
*/
uint8_t
(hb_color_get_alpha) (hb_color_t color)
{
return hb_color_get_alpha (color);
}
/**
* hb_color_get_red:
* color: a #hb_color_t we are interested in its channels.
*
* Return value: Red channel value of the given color
*
* Since: 2.1.0
*/
uint8_t
(hb_color_get_red) (hb_color_t color)
{
return hb_color_get_red (color);
}
/**
* hb_color_get_green:
* color: a #hb_color_t we are interested in its channels.
*
* Return value: Green channel value of the given color
*
* Since: 2.1.0
*/
uint8_t
(hb_color_get_green) (hb_color_t color)
{
return hb_color_get_green (color);
}
/**
* hb_color_get_blue:
* color: a #hb_color_t we are interested in its channels.
*
* Return value: Blue channel value of the given color
*
* Since: 2.1.0
*/
uint8_t
(hb_color_get_blue) (hb_color_t color)
{
return hb_color_get_blue (color);
}
/* If there is no visibility control, then hb-static.cc will NOT /* If there is no visibility control, then hb-static.cc will NOT
* define anything. Instead, we get it to define one set in here * define anything. Instead, we get it to define one set in here
* only, so only libharfbuzz.so defines them, not other libs. */ * only, so only libharfbuzz.so defines them, not other libs. */

View File

@@ -358,7 +358,7 @@ typedef enum
/*11.0*/HB_SCRIPT_SOGDIAN = HB_TAG ('S','o','g','d'), /*11.0*/HB_SCRIPT_SOGDIAN = HB_TAG ('S','o','g','d'),
/* /*
* Since REPLACEME * Since 2.4.0
*/ */
/*12.0*/HB_SCRIPT_ELYMAIC = HB_TAG ('E','l','y','m'), /*12.0*/HB_SCRIPT_ELYMAIC = HB_TAG ('E','l','y','m'),
/*12.0*/HB_SCRIPT_NANDINAGARI = HB_TAG ('N','a','n','d'), /*12.0*/HB_SCRIPT_NANDINAGARI = HB_TAG ('N','a','n','d'),
@@ -467,39 +467,21 @@ typedef uint32_t hb_color_t;
#define HB_COLOR(b,g,r,a) ((hb_color_t) HB_TAG ((b),(g),(r),(a))) #define HB_COLOR(b,g,r,a) ((hb_color_t) HB_TAG ((b),(g),(r),(a)))
/** HB_EXTERN uint8_t
* hb_color_get_alpha: hb_color_get_alpha (hb_color_t color);
*
*
*
* Since: 2.1.0
*/
#define hb_color_get_alpha(color) ((color) & 0xFF) #define hb_color_get_alpha(color) ((color) & 0xFF)
/**
* hb_color_get_red:
*
*
*
* Since: 2.1.0
*/
#define hb_color_get_red(color) (((color) >> 8) & 0xFF)
/**
* hb_color_get_green:
*
*
*
* Since: 2.1.0
*/
#define hb_color_get_green(color) (((color) >> 16) & 0xFF)
/**
* hb_color_get_blue:
*
*
*
* Since: 2.1.0
*/
#define hb_color_get_blue(color) (((color) >> 24) & 0xFF)
HB_EXTERN uint8_t
hb_color_get_red (hb_color_t color);
#define hb_color_get_red(color) (((color) >> 8) & 0xFF)
HB_EXTERN uint8_t
hb_color_get_green (hb_color_t color);
#define hb_color_get_green(color) (((color) >> 16) & 0xFF)
HB_EXTERN uint8_t
hb_color_get_blue (hb_color_t color);
#define hb_color_get_blue(color) (((color) >> 24) & 0xFF)
HB_END_DECLS HB_END_DECLS

View File

@@ -0,0 +1,130 @@
/*
* Copyright © 2019 Facebook, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Facebook Author(s): Behdad Esfahbod
*/
#ifndef HB_CONFIG_HH
#define HB_CONFIG_HH
#if 0 /* Make test happy. */
#include "hb.hh"
#endif
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#ifdef HB_TINY
#define HB_LEAN
#define HB_MINI
#define HB_NO_MT
#ifndef NDEBUG
#define NDEBUG
#endif
#ifndef __OPTIMIZE_SIZE__
#define __OPTIMIZE_SIZE__
#endif
#endif
#ifdef HB_LEAN
#define HB_DISABLE_DEPRECATED
#define HB_NDEBUG
#define HB_NO_ATEXIT
#define HB_NO_BUFFER_SERIALIZE
#define HB_NO_BITMAP
#define HB_NO_CFF
#define HB_NO_COLOR
#define HB_NO_GETENV
#define HB_NO_LAYOUT_UNUSED
#define HB_NO_MATH
#define HB_NO_NAME
#define HB_NO_SUBSET_LAYOUT
#endif
#ifdef HB_MINI
#define HB_NO_AAT
#define HB_NO_LEGACY
#endif
/* Closure. */
#ifdef HB_DISABLE_DEPRECATED
#define HB_IF_NOT_DEPRECATED(x)
#else
#define HB_IF_NOT_DEPRECATED(x) x
#endif
#ifdef HB_NO_AAT
#define HB_NO_OT_NAME_LANGUAGE_AAT
#define HB_NO_SHAPE_AAT
#endif
#ifdef HB_NO_BITMAP
#define HB_NO_OT_FONT_BITMAP
#endif
#ifdef HB_NO_CFF
#define HB_NO_OT_FONT_CFF
#define HB_NO_SUBSET_CFF
#endif
#ifdef HB_NO_GETENV
#define HB_NO_UNISCRIBE_BUG_COMPATIBLE
#endif
#ifdef HB_NO_LEGACY
#define HB_NO_OT_LAYOUT_BLACKLIST
#define HB_NO_OT_SHAPE_FALLBACK
#endif
#ifdef HB_NO_NAME
#define HB_NO_OT_NAME_LANGUAGE
#endif
#ifdef HB_NO_OT_SHAPE_FALLBACK
#define HB_NO_OT_SHAPE_COMPLEX_ARABIC_FALLBACK
#define HB_NO_OT_SHAPE_COMPLEX_HEBREW_FALLBACK
#define HB_NO_OT_SHAPE_COMPLEX_THAI_FALLBACK
#define HB_NO_OT_SHAPE_COMPLEX_VOWEL_CONSTRAINTS
#endif
#ifdef NDEBUG
#ifndef HB_NDEBUG
#define HB_NDEBUG
#endif
#endif
#ifdef __OPTIMIZE_SIZE__
#ifndef HB_OPTIMIZE_SIZE
#define HB_OPTIMIZE_SIZE
#endif
#endif
#ifdef HAVE_CONFIG_OVERRIDE_H
#include "config-override.h"
#endif
#endif /* HB_CONFIG_HH */

View File

@@ -55,13 +55,13 @@ coretext_font_size_from_ptem (float ptem)
* https://developer.apple.com/library/content/documentation/GraphicsAnimation/Conceptual/HighResolutionOSX/Explained/Explained.html * https://developer.apple.com/library/content/documentation/GraphicsAnimation/Conceptual/HighResolutionOSX/Explained/Explained.html
*/ */
ptem *= 96.f / 72.f; ptem *= 96.f / 72.f;
return ptem <= 0.f ? HB_CORETEXT_DEFAULT_FONT_SIZE : ptem; return (CGFloat) (ptem <= 0.f ? HB_CORETEXT_DEFAULT_FONT_SIZE : ptem);
} }
static float static float
coretext_font_size_to_ptem (CGFloat size) coretext_font_size_to_ptem (CGFloat size)
{ {
size *= 72.f / 96.f; size *= 72. / 96.;
return size <= 0.f ? 0 : size; return size <= 0 ? 0 : size;
} }
static void static void
@@ -410,7 +410,7 @@ struct active_feature_t {
feature_record_t rec; feature_record_t rec;
unsigned int order; unsigned int order;
static int cmp (const void *pa, const void *pb) { HB_INTERNAL static int cmp (const void *pa, const void *pb) {
const active_feature_t *a = (const active_feature_t *) pa; const active_feature_t *a = (const active_feature_t *) pa;
const active_feature_t *b = (const active_feature_t *) pb; const active_feature_t *b = (const active_feature_t *) pb;
return a->rec.feature < b->rec.feature ? -1 : a->rec.feature > b->rec.feature ? 1 : return a->rec.feature < b->rec.feature ? -1 : a->rec.feature > b->rec.feature ? 1 :
@@ -428,7 +428,7 @@ struct feature_event_t {
bool start; bool start;
active_feature_t feature; active_feature_t feature;
static int cmp (const void *pa, const void *pb) { HB_INTERNAL static int cmp (const void *pa, const void *pb) {
const feature_event_t *a = (const feature_event_t *) pa; const feature_event_t *a = (const feature_event_t *) pa;
const feature_event_t *b = (const feature_event_t *) pb; const feature_event_t *b = (const feature_event_t *) pb;
return a->index < b->index ? -1 : a->index > b->index ? 1 : return a->index < b->index ? -1 : a->index > b->index ? 1 :
@@ -598,7 +598,7 @@ _hb_coretext_shape (hb_shape_plan_t *shape_plan,
} else { } else {
active_feature_t *feature = active_features.find (&event->feature); active_feature_t *feature = active_features.find (&event->feature);
if (feature) if (feature)
active_features.remove (feature - active_features.arrayZ ()); active_features.remove (feature - active_features.arrayZ);
} }
} }
} }
@@ -608,7 +608,7 @@ _hb_coretext_shape (hb_shape_plan_t *shape_plan,
#define ALLOCATE_ARRAY(Type, name, len, on_no_room) \ #define ALLOCATE_ARRAY(Type, name, len, on_no_room) \
Type *name = (Type *) scratch; \ Type *name = (Type *) scratch; \
{ \ do { \
unsigned int _consumed = DIV_CEIL ((len) * sizeof (Type), sizeof (*scratch)); \ unsigned int _consumed = DIV_CEIL ((len) * sizeof (Type), sizeof (*scratch)); \
if (unlikely (_consumed > scratch_size)) \ if (unlikely (_consumed > scratch_size)) \
{ \ { \
@@ -617,7 +617,7 @@ _hb_coretext_shape (hb_shape_plan_t *shape_plan,
} \ } \
scratch += _consumed; \ scratch += _consumed; \
scratch_size -= _consumed; \ scratch_size -= _consumed; \
} } while (0)
ALLOCATE_ARRAY (UniChar, pchars, buffer->len * 2, /*nothing*/); ALLOCATE_ARRAY (UniChar, pchars, buffer->len * 2, /*nothing*/);
unsigned int chars_len = 0; unsigned int chars_len = 0;
@@ -771,7 +771,7 @@ resize_and_retry:
feature.start < chars_len && feature.start < feature.end) feature.start < chars_len && feature.start < feature.end)
{ {
CFRange feature_range = CFRangeMake (feature.start, CFRange feature_range = CFRangeMake (feature.start,
MIN (feature.end, chars_len) - feature.start); hb_min (feature.end, chars_len) - feature.start);
if (feature.value) if (feature.value)
CFAttributedStringRemoveAttribute (attr_string, feature_range, kCTKernAttributeName); CFAttributedStringRemoveAttribute (attr_string, feature_range, kCTKernAttributeName);
else else
@@ -1069,7 +1069,7 @@ resize_and_retry:
if (false) if (false)
{ {
/* Make sure all runs had the expected direction. */ /* Make sure all runs had the expected direction. */
bool backward = HB_DIRECTION_IS_BACKWARD (buffer->props.direction); HB_UNUSED bool backward = HB_DIRECTION_IS_BACKWARD (buffer->props.direction);
assert (bool (status_and & kCTRunStatusRightToLeft) == backward); assert (bool (status_and & kCTRunStatusRightToLeft) == backward);
assert (bool (status_or & kCTRunStatusRightToLeft) == backward); assert (bool (status_or & kCTRunStatusRightToLeft) == backward);
} }
@@ -1116,7 +1116,7 @@ resize_and_retry:
unsigned int cluster = info[count - 1].cluster; unsigned int cluster = info[count - 1].cluster;
for (unsigned int i = count - 1; i > 0; i--) for (unsigned int i = count - 1; i > 0; i--)
{ {
cluster = MIN (cluster, info[i - 1].cluster); cluster = hb_min (cluster, info[i - 1].cluster);
info[i - 1].cluster = cluster; info[i - 1].cluster = cluster;
} }
} }
@@ -1125,7 +1125,7 @@ resize_and_retry:
unsigned int cluster = info[0].cluster; unsigned int cluster = info[0].cluster;
for (unsigned int i = 1; i < count; i++) for (unsigned int i = 1; i < count; i++)
{ {
cluster = MIN (cluster, info[i].cluster); cluster = hb_min (cluster, info[i].cluster);
info[i].cluster = cluster; info[i].cluster = cluster;
} }
} }
@@ -1148,59 +1148,3 @@ fail:
return ret; return ret;
} }
/*
* AAT shaper
*/
/*
* shaper face data
*/
struct hb_coretext_aat_face_data_t {};
hb_coretext_aat_face_data_t *
_hb_coretext_aat_shaper_face_data_create (hb_face_t *face)
{
return hb_aat_layout_has_substitution (face) || hb_aat_layout_has_positioning (face) ?
(hb_coretext_aat_face_data_t *) HB_SHAPER_DATA_SUCCEEDED : nullptr;
}
void
_hb_coretext_aat_shaper_face_data_destroy (hb_coretext_aat_face_data_t *data HB_UNUSED)
{
}
/*
* shaper font data
*/
struct hb_coretext_aat_font_data_t {};
hb_coretext_aat_font_data_t *
_hb_coretext_aat_shaper_font_data_create (hb_font_t *font)
{
return font->data.coretext ? (hb_coretext_aat_font_data_t *) HB_SHAPER_DATA_SUCCEEDED : nullptr;
}
void
_hb_coretext_aat_shaper_font_data_destroy (hb_coretext_aat_font_data_t *data HB_UNUSED)
{
}
/*
* shaper
*/
hb_bool_t
_hb_coretext_aat_shape (hb_shape_plan_t *shape_plan,
hb_font_t *font,
hb_buffer_t *buffer,
const hb_feature_t *features,
unsigned int num_features)
{
return _hb_coretext_shape (shape_plan, font, buffer, features, num_features);
}

View File

@@ -29,7 +29,7 @@
#include "hb.hh" #include "hb.hh"
#include "hb-atomic.hh" #include "hb-atomic.hh"
#include "hb-dsalgs.hh" #include "hb-algs.hh"
#ifndef HB_DEBUG #ifndef HB_DEBUG
@@ -63,6 +63,9 @@ extern HB_INTERNAL hb_atomic_int_t _hb_options;
static inline hb_options_t static inline hb_options_t
hb_options () hb_options ()
{ {
#ifdef HB_NO_GETENV
return hb_options_t ();
#endif
/* Make a local copy, so we can access bitfield threadsafely. */ /* Make a local copy, so we can access bitfield threadsafely. */
hb_options_union_t u; hb_options_union_t u;
u.i = _hb_options.get_relaxed (); u.i = _hb_options.get_relaxed ();
@@ -158,7 +161,7 @@ _hb_debug_msg_va (const char *what,
VBAR VBAR VBAR VBAR VBAR VBAR VBAR VBAR VBAR VBAR; VBAR VBAR VBAR VBAR VBAR VBAR VBAR VBAR VBAR VBAR;
fprintf (stderr, "%2u %s" VRBAR "%s", fprintf (stderr, "%2u %s" VRBAR "%s",
level, level,
bars + sizeof (bars) - 1 - MIN ((unsigned int) sizeof (bars) - 1, (unsigned int) (sizeof (VBAR) - 1) * level), bars + sizeof (bars) - 1 - hb_min ((unsigned int) sizeof (bars) - 1, (unsigned int) (sizeof (VBAR) - 1) * level),
level_dir ? (level_dir > 0 ? DLBAR : ULBAR) : LBAR); level_dir ? (level_dir > 0 ? DLBAR : ULBAR) : LBAR);
} else } else
fprintf (stderr, " " VRBAR LBAR); fprintf (stderr, " " VRBAR LBAR);
@@ -246,8 +249,8 @@ struct hb_printer_t<bool> {
}; };
template <> template <>
struct hb_printer_t<hb_void_t> { struct hb_printer_t<hb_empty_t> {
const char *print (hb_void_t) { return ""; } const char *print (hb_empty_t) { return ""; }
}; };
@@ -263,7 +266,7 @@ static inline void _hb_warn_no_return (bool returned)
} }
} }
template <> template <>
/*static*/ inline void _hb_warn_no_return<hb_void_t> (bool returned HB_UNUSED) /*static*/ inline void _hb_warn_no_return<hb_empty_t> (bool returned HB_UNUSED)
{} {}
template <int max_level, typename ret_t> template <int max_level, typename ret_t>
@@ -327,18 +330,20 @@ struct hb_auto_trace_t<0, ret_t>
const char *message, const char *message,
...) HB_PRINTF_FUNC(6, 7) {} ...) HB_PRINTF_FUNC(6, 7) {}
ret_t ret (ret_t v, template <typename T>
T ret (T&& v,
const char *func HB_UNUSED = nullptr, const char *func HB_UNUSED = nullptr,
unsigned int line HB_UNUSED = 0) { return v; } unsigned int line HB_UNUSED = 0) { return hb_forward<T> (v); }
}; };
/* For disabled tracing; optimize out everything. /* For disabled tracing; optimize out everything.
* https://github.com/harfbuzz/harfbuzz/pull/605 */ * https://github.com/harfbuzz/harfbuzz/pull/605 */
template <typename ret_t> template <typename ret_t>
struct hb_no_trace_t { struct hb_no_trace_t {
ret_t ret (ret_t v, template <typename T>
const char *func HB_UNUSED = "", T ret (T&& v,
unsigned int line HB_UNUSED = 0) { return v; } const char *func HB_UNUSED = nullptr,
unsigned int line HB_UNUSED = 0) { return hb_forward<T> (v); }
}; };
#define return_trace(RET) return trace.ret (RET, HB_FUNC, __LINE__) #define return_trace(RET) return trace.ret (RET, HB_FUNC, __LINE__)
@@ -437,25 +442,12 @@ struct hb_no_trace_t {
#define TRACE_SUBSET(this) hb_no_trace_t<bool> trace #define TRACE_SUBSET(this) hb_no_trace_t<bool> trace
#endif #endif
#ifndef HB_DEBUG_WOULD_APPLY
#define HB_DEBUG_WOULD_APPLY (HB_DEBUG+0)
#endif
#if HB_DEBUG_WOULD_APPLY
#define TRACE_WOULD_APPLY(this) \
hb_auto_trace_t<HB_DEBUG_WOULD_APPLY, bool> trace \
(&c->debug_depth, c->get_name (), this, HB_FUNC, \
"%d glyphs", c->len);
#else
#define TRACE_WOULD_APPLY(this) hb_no_trace_t<bool> trace
#endif
#ifndef HB_DEBUG_DISPATCH #ifndef HB_DEBUG_DISPATCH
#define HB_DEBUG_DISPATCH ( \ #define HB_DEBUG_DISPATCH ( \
HB_DEBUG_APPLY + \ HB_DEBUG_APPLY + \
HB_DEBUG_SANITIZE + \ HB_DEBUG_SANITIZE + \
HB_DEBUG_SERIALIZE + \ HB_DEBUG_SERIALIZE + \
HB_DEBUG_SUBSET + \ HB_DEBUG_SUBSET + \
HB_DEBUG_WOULD_APPLY + \
0) 0)
#endif #endif
#if HB_DEBUG_DISPATCH #if HB_DEBUG_DISPATCH

View File

@@ -63,7 +63,7 @@ typedef hb_bool_t (*hb_font_get_glyph_func_t) (hb_font_t *font, void *font_data,
hb_codepoint_t *glyph, hb_codepoint_t *glyph,
void *user_data); void *user_data);
HB_EXTERN HB_DEPRECATED_FOR(hb_font_funcs_set_nominal_glyph_func or hb_font_funcs_set_variation_glyph_func) void HB_EXTERN HB_DEPRECATED_FOR(hb_font_funcs_set_nominal_glyph_func and hb_font_funcs_set_variation_glyph_func) void
hb_font_funcs_set_glyph_func (hb_font_funcs_t *ffuncs, hb_font_funcs_set_glyph_func (hb_font_funcs_t *ffuncs,
hb_font_get_glyph_func_t func, hb_font_get_glyph_func_t func,
void *user_data, hb_destroy_func_t destroy); void *user_data, hb_destroy_func_t destroy);

View File

@@ -530,12 +530,12 @@ _hb_directwrite_shape_full (hb_shape_plan_t *shape_plan,
hb_buffer_t::scratch_buffer_t *scratch = buffer->get_scratch_buffer (&scratch_size); hb_buffer_t::scratch_buffer_t *scratch = buffer->get_scratch_buffer (&scratch_size);
#define ALLOCATE_ARRAY(Type, name, len) \ #define ALLOCATE_ARRAY(Type, name, len) \
Type *name = (Type *) scratch; \ Type *name = (Type *) scratch; \
{ \ do { \
unsigned int _consumed = DIV_CEIL ((len) * sizeof (Type), sizeof (*scratch)); \ unsigned int _consumed = DIV_CEIL ((len) * sizeof (Type), sizeof (*scratch)); \
assert (_consumed <= scratch_size); \ assert (_consumed <= scratch_size); \
scratch += _consumed; \ scratch += _consumed; \
scratch_size -= _consumed; \ scratch_size -= _consumed; \
} } while (0)
#define utf16_index() var1.u32 #define utf16_index() var1.u32
@@ -778,7 +778,7 @@ retry_getglyphs:
{ {
uint32_t *p = uint32_t *p =
&vis_clusters[log_clusters[buffer->info[i].utf16_index ()]]; &vis_clusters[log_clusters[buffer->info[i].utf16_index ()]];
*p = MIN (*p, buffer->info[i].cluster); *p = hb_min (*p, buffer->info[i].cluster);
} }
for (unsigned int i = 1; i < glyphCount; i++) for (unsigned int i = 1; i < glyphCount; i++)
if (vis_clusters[i] == (uint32_t) -1) if (vis_clusters[i] == (uint32_t) -1)
@@ -846,10 +846,23 @@ _hb_directwrite_shape (hb_shape_plan_t *shape_plan,
features, num_features, 0); features, num_features, 0);
} }
/* /**
* Public [experimental] API * hb_directwrite_shape_experimental_width:
*/ * Experimental API to test DirectWrite's justification algorithm.
*
* It inserts Kashida at wrong order so don't use the API ever.
*
* It doesn't work with cygwin/msys due to header bugs so one
* should use MSVC toolchain in order to use it for now.
*
* @font:
* @buffer:
* @features:
* @num_features:
* @width:
*
* Since: 1.4.2
**/
hb_bool_t hb_bool_t
hb_directwrite_shape_experimental_width (hb_font_t *font, hb_directwrite_shape_experimental_width (hb_font_t *font,
hb_buffer_t *buffer, hb_buffer_t *buffer,
@@ -917,8 +930,11 @@ _hb_directwrite_font_release (void *data)
/** /**
* hb_directwrite_face_create: * hb_directwrite_face_create:
* @font_face: * @font_face: a DirectWrite IDWriteFontFace object.
* Since: REPLACEME *
* Return value: #hb_face_t object corresponding to the given input
*
* Since: 2.4.0
**/ **/
hb_face_t * hb_face_t *
hb_directwrite_face_create (IDWriteFontFace *font_face) hb_directwrite_face_create (IDWriteFontFace *font_face)
@@ -928,3 +944,17 @@ hb_directwrite_face_create (IDWriteFontFace *font_face)
return hb_face_create_for_tables (reference_table, font_face, return hb_face_create_for_tables (reference_table, font_face,
_hb_directwrite_font_release); _hb_directwrite_font_release);
} }
/**
* hb_directwrite_face_get_font_face:
* @face: a #hb_face_t object
*
* Return value: DirectWrite IDWriteFontFace object corresponding to the given input
*
* Since: REPLACEME
**/
IDWriteFontFace *
hb_directwrite_face_get_font_face (hb_face_t *face)
{
return face->data.directwrite->fontFace;
}

View File

@@ -37,6 +37,9 @@ hb_directwrite_shape_experimental_width (hb_font_t *font, hb_buffer_t *buffer,
HB_EXTERN hb_face_t * HB_EXTERN hb_face_t *
hb_directwrite_face_create (IDWriteFontFace *font_face); hb_directwrite_face_create (IDWriteFontFace *font_face);
HB_EXTERN IDWriteFontFace *
hb_directwrite_face_get_font_face (hb_face_t *face);
HB_END_DECLS HB_END_DECLS
#endif /* HB_DIRECTWRITE_H */ #endif /* HB_DIRECTWRITE_H */

View File

@@ -0,0 +1,58 @@
/*
* Copyright © 2007,2008,2009,2010 Red Hat, Inc.
* Copyright © 2012,2018 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Red Hat Author(s): Behdad Esfahbod
* Google Author(s): Behdad Esfahbod
*/
#ifndef HB_DISPATCH_HH
#define HB_DISPATCH_HH
#include "hb.hh"
/*
* Dispatch
*/
template <typename Context, typename Return, unsigned int MaxDebugDepth>
struct hb_dispatch_context_t
{
private:
/* https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern */
const Context* thiz () const { return static_cast<const Context *> (this); }
Context* thiz () { return static_cast< Context *> (this); }
public:
static constexpr unsigned max_debug_depth = MaxDebugDepth;
typedef Return return_t;
template <typename T, typename F>
bool may_dispatch (const T *obj HB_UNUSED, const F *format HB_UNUSED) { return true; }
template <typename T, typename ...Ts>
return_t dispatch (const T &obj, Ts&&... ds)
{ return obj.dispatch (thiz (), hb_forward<Ts> (ds)...); }
static return_t no_dispatch_return_value () { return Context::default_return_value (); }
static bool stop_sublookup_iteration (const return_t r HB_UNUSED) { return false; }
};
#endif /* HB_DISPATCH_HH */

View File

@@ -336,6 +336,7 @@ hb_font_get_glyph_v_origin_default (hb_font_t *font,
return ret; return ret;
} }
#ifndef HB_DISABLE_DEPRECATED
static hb_position_t static hb_position_t
hb_font_get_glyph_h_kerning_nil (hb_font_t *font HB_UNUSED, hb_font_get_glyph_h_kerning_nil (hb_font_t *font HB_UNUSED,
void *font_data HB_UNUSED, void *font_data HB_UNUSED,
@@ -373,6 +374,7 @@ hb_font_get_glyph_v_kerning_default (hb_font_t *font,
{ {
return font->parent_scale_y_distance (font->parent->get_glyph_v_kerning (top_glyph, bottom_glyph)); return font->parent_scale_y_distance (font->parent->get_glyph_v_kerning (top_glyph, bottom_glyph));
} }
#endif
static hb_bool_t static hb_bool_t
hb_font_get_glyph_extents_nil (hb_font_t *font HB_UNUSED, hb_font_get_glyph_extents_nil (hb_font_t *font HB_UNUSED,
@@ -925,6 +927,7 @@ hb_font_get_glyph_v_origin (hb_font_t *font,
return font->get_glyph_v_origin (glyph, x, y); return font->get_glyph_v_origin (glyph, x, y);
} }
#ifndef HB_DISABLE_DEPRECATED
/** /**
* hb_font_get_glyph_h_kerning: * hb_font_get_glyph_h_kerning:
* @font: a font. * @font: a font.
@@ -964,6 +967,7 @@ hb_font_get_glyph_v_kerning (hb_font_t *font,
{ {
return font->get_glyph_v_kerning (top_glyph, bottom_glyph); return font->get_glyph_v_kerning (top_glyph, bottom_glyph);
} }
#endif
/** /**
* hb_font_get_glyph_extents: * hb_font_get_glyph_extents:
@@ -1173,6 +1177,7 @@ hb_font_subtract_glyph_origin_for_direction (hb_font_t *font,
return font->subtract_glyph_origin_for_direction (glyph, direction, x, y); return font->subtract_glyph_origin_for_direction (glyph, direction, x, y);
} }
#ifndef HB_DISABLE_DEPRECATED
/** /**
* hb_font_get_glyph_kerning_for_direction: * hb_font_get_glyph_kerning_for_direction:
* @font: a font. * @font: a font.
@@ -1195,6 +1200,7 @@ hb_font_get_glyph_kerning_for_direction (hb_font_t *font,
{ {
return font->get_glyph_kerning_for_direction (first_glyph, second_glyph, direction, x, y); return font->get_glyph_kerning_for_direction (first_glyph, second_glyph, direction, x, y);
} }
#endif
/** /**
* hb_font_get_glyph_extents_for_origin: * hb_font_get_glyph_extents_for_origin:
@@ -1347,8 +1353,10 @@ hb_font_create (hb_face_t *face)
{ {
hb_font_t *font = _hb_font_create (face); hb_font_t *font = _hb_font_create (face);
#ifndef HB_NO_OT_FONT
/* Install our in-house, very lightweight, funcs. */ /* Install our in-house, very lightweight, funcs. */
hb_ot_font_set_funcs (font); hb_ot_font_set_funcs (font);
#endif
return font; return font;
} }
@@ -1914,6 +1922,7 @@ hb_font_get_var_coords_normalized (hb_font_t *font,
} }
#ifndef HB_DISABLE_DEPRECATED
/* /*
* Deprecated get_glyph_func(): * Deprecated get_glyph_func():
*/ */
@@ -2036,3 +2045,4 @@ hb_font_funcs_set_glyph_func (hb_font_funcs_t *ffuncs,
trampoline, trampoline,
trampoline_destroy); trampoline_destroy);
} }
#endif

View File

@@ -51,8 +51,8 @@
HB_FONT_FUNC_IMPLEMENT (glyph_v_advances) \ HB_FONT_FUNC_IMPLEMENT (glyph_v_advances) \
HB_FONT_FUNC_IMPLEMENT (glyph_h_origin) \ HB_FONT_FUNC_IMPLEMENT (glyph_h_origin) \
HB_FONT_FUNC_IMPLEMENT (glyph_v_origin) \ HB_FONT_FUNC_IMPLEMENT (glyph_v_origin) \
HB_FONT_FUNC_IMPLEMENT (glyph_h_kerning) \ HB_IF_NOT_DEPRECATED (HB_FONT_FUNC_IMPLEMENT (glyph_h_kerning)) \
HB_FONT_FUNC_IMPLEMENT (glyph_v_kerning) \ HB_IF_NOT_DEPRECATED (HB_FONT_FUNC_IMPLEMENT (glyph_v_kerning)) \
HB_FONT_FUNC_IMPLEMENT (glyph_extents) \ HB_FONT_FUNC_IMPLEMENT (glyph_extents) \
HB_FONT_FUNC_IMPLEMENT (glyph_contour_point) \ HB_FONT_FUNC_IMPLEMENT (glyph_contour_point) \
HB_FONT_FUNC_IMPLEMENT (glyph_name) \ HB_FONT_FUNC_IMPLEMENT (glyph_name) \
@@ -304,17 +304,25 @@ struct hb_font_t
hb_position_t get_glyph_h_kerning (hb_codepoint_t left_glyph, hb_position_t get_glyph_h_kerning (hb_codepoint_t left_glyph,
hb_codepoint_t right_glyph) hb_codepoint_t right_glyph)
{ {
#ifdef HB_DISABLE_DEPRECATED
return 0;
#else
return klass->get.f.glyph_h_kerning (this, user_data, return klass->get.f.glyph_h_kerning (this, user_data,
left_glyph, right_glyph, left_glyph, right_glyph,
klass->user_data.glyph_h_kerning); klass->user_data.glyph_h_kerning);
#endif
} }
hb_position_t get_glyph_v_kerning (hb_codepoint_t top_glyph, hb_position_t get_glyph_v_kerning (hb_codepoint_t top_glyph,
hb_codepoint_t bottom_glyph) hb_codepoint_t bottom_glyph)
{ {
#ifdef HB_DISABLE_DEPRECATED
return 0;
#else
return klass->get.f.glyph_v_kerning (this, user_data, return klass->get.f.glyph_v_kerning (this, user_data,
top_glyph, bottom_glyph, top_glyph, bottom_glyph,
klass->user_data.glyph_v_kerning); klass->user_data.glyph_v_kerning);
#endif
} }
hb_bool_t get_glyph_extents (hb_codepoint_t glyph, hb_bool_t get_glyph_extents (hb_codepoint_t glyph,
@@ -607,7 +615,7 @@ struct hb_font_t
return (hb_position_t) (scaled / upem); return (hb_position_t) (scaled / upem);
} }
hb_position_t em_scalef (float v, int scale) hb_position_t em_scalef (float v, int scale)
{ return (hb_position_t) round (v * scale / face->get_upem ()); } { return (hb_position_t) roundf (v * scale / face->get_upem ()); }
float em_fscale (int16_t v, int scale) float em_fscale (int16_t v, int scale)
{ return (float) v * scale / face->get_upem (); } { return (float) v * scale / face->get_upem (); }
}; };

View File

@@ -96,7 +96,7 @@ _hb_ft_font_create (FT_Face ft_face, bool symbol, bool unref)
ft_font->load_flags = FT_LOAD_DEFAULT | FT_LOAD_NO_HINTING; ft_font->load_flags = FT_LOAD_DEFAULT | FT_LOAD_NO_HINTING;
ft_font->cached_x_scale.set (0); ft_font->cached_x_scale.set_relaxed (0);
ft_font->advance_cache.init (); ft_font->advance_cache.init ();
return ft_font; return ft_font;
@@ -439,7 +439,7 @@ hb_ft_get_glyph_from_name (hb_font_t *font HB_UNUSED,
else { else {
/* Make a nul-terminated version. */ /* Make a nul-terminated version. */
char buf[128]; char buf[128];
len = MIN (len, (int) sizeof (buf) - 1); len = hb_min (len, (int) sizeof (buf) - 1);
strncpy (buf, name, len); strncpy (buf, name, len);
buf[len] = '\0'; buf[len] = '\0';
*glyph = FT_Get_Name_Index (ft_face, buf); *glyph = FT_Get_Name_Index (ft_face, buf);
@@ -748,7 +748,7 @@ hb_ft_font_create_referenced (FT_Face ft_face)
static void free_static_ft_library (); static void free_static_ft_library ();
#endif #endif
static struct hb_ft_library_lazy_loader_t : hb_lazy_loader_t<hb_remove_pointer (FT_Library), static struct hb_ft_library_lazy_loader_t : hb_lazy_loader_t<hb_remove_pointer<FT_Library>,
hb_ft_library_lazy_loader_t> hb_ft_library_lazy_loader_t>
{ {
static FT_Library create () static FT_Library create ()

View File

@@ -202,6 +202,7 @@ _hb_graphite2_shaper_font_data_destroy (hb_graphite2_font_data_t *data HB_UNUSED
{ {
} }
#ifndef HB_DISABLE_DEPRECATED
/** /**
* hb_graphite2_font_get_gr_font: * hb_graphite2_font_get_gr_font:
* *
@@ -213,6 +214,7 @@ hb_graphite2_font_get_gr_font (hb_font_t *font HB_UNUSED)
{ {
return nullptr; return nullptr;
} }
#endif
/* /*
@@ -308,12 +310,12 @@ _hb_graphite2_shape (hb_shape_plan_t *shape_plan HB_UNUSED,
#define ALLOCATE_ARRAY(Type, name, len) \ #define ALLOCATE_ARRAY(Type, name, len) \
Type *name = (Type *) scratch; \ Type *name = (Type *) scratch; \
{ \ do { \
unsigned int _consumed = DIV_CEIL ((len) * sizeof (Type), sizeof (*scratch)); \ unsigned int _consumed = DIV_CEIL ((len) * sizeof (Type), sizeof (*scratch)); \
assert (_consumed <= scratch_size); \ assert (_consumed <= scratch_size); \
scratch += _consumed; \ scratch += _consumed; \
scratch_size -= _consumed; \ scratch_size -= _consumed; \
} } while (0)
ALLOCATE_ARRAY (hb_graphite2_cluster_t, clusters, buffer->len); ALLOCATE_ARRAY (hb_graphite2_cluster_t, clusters, buffer->len);
ALLOCATE_ARRAY (hb_codepoint_t, gids, glyph_count); ALLOCATE_ARRAY (hb_codepoint_t, gids, glyph_count);

View File

@@ -49,6 +49,9 @@
* Functions for using HarfBuzz with the ICU library to provide Unicode data. * Functions for using HarfBuzz with the ICU library to provide Unicode data.
**/ **/
/* ICU doesn't do-while(0) around their statements. Ugh!
* https://unicode-org.atlassian.net/browse/CLDR-13027 */
#define HB_ICU_STMT(S) do { S } while (0)
hb_script_t hb_script_t
hb_icu_script_to_script (UScriptCode script) hb_icu_script_to_script (UScriptCode script)
@@ -183,9 +186,9 @@ hb_icu_unicode_compose (hb_unicode_funcs_t *ufuncs HB_UNUSED,
len = 0; len = 0;
err = false; err = false;
U16_APPEND (utf16, len, ARRAY_LENGTH (utf16), a, err); HB_ICU_STMT (U16_APPEND (utf16, len, ARRAY_LENGTH (utf16), a, err));
if (err) return false; if (err) return false;
U16_APPEND (utf16, len, ARRAY_LENGTH (utf16), b, err); HB_ICU_STMT (U16_APPEND (utf16, len, ARRAY_LENGTH (utf16), b, err));
if (err) return false; if (err) return false;
icu_err = U_ZERO_ERROR; icu_err = U_ZERO_ERROR;
@@ -193,7 +196,7 @@ hb_icu_unicode_compose (hb_unicode_funcs_t *ufuncs HB_UNUSED,
if (U_FAILURE (icu_err)) if (U_FAILURE (icu_err))
return false; return false;
if (u_countChar32 (normalized, len) == 1) { if (u_countChar32 (normalized, len) == 1) {
U16_GET_UNSAFE (normalized, 0, *ab); HB_ICU_STMT (U16_GET_UNSAFE (normalized, 0, *ab));
ret = true; ret = true;
} else { } else {
ret = false; ret = false;
@@ -221,13 +224,13 @@ hb_icu_unicode_decompose (hb_unicode_funcs_t *ufuncs HB_UNUSED,
len = u_countChar32 (decomposed, len); len = u_countChar32 (decomposed, len);
if (len == 1) { if (len == 1) {
U16_GET_UNSAFE (decomposed, 0, *a); HB_ICU_STMT (U16_GET_UNSAFE (decomposed, 0, *a));
*b = 0; *b = 0;
return *a != ab; return *a != ab;
} else if (len == 2) { } else if (len == 2) {
len =0; len =0;
U16_NEXT_UNSAFE (decomposed, len, *a); HB_ICU_STMT (U16_NEXT_UNSAFE (decomposed, len, *a));
U16_NEXT_UNSAFE (decomposed, len, *b); HB_ICU_STMT (U16_NEXT_UNSAFE (decomposed, len, *b));
} }
return true; return true;
} }
@@ -236,7 +239,7 @@ hb_icu_unicode_decompose (hb_unicode_funcs_t *ufuncs HB_UNUSED,
/* We don't ifdef-out the fallback code such that compiler always /* We don't ifdef-out the fallback code such that compiler always
* sees it and makes sure it's compilable. */ * sees it and makes sure it's compilable. */
UChar utf16[2], normalized[2 * HB_UNICODE_MAX_DECOMPOSITION_LEN + 1]; UChar utf16[2], normalized[2 * 19/*HB_UNICODE_MAX_DECOMPOSITION_LEN*/ + 1];
unsigned int len; unsigned int len;
hb_bool_t ret, err; hb_bool_t ret, err;
UErrorCode icu_err; UErrorCode icu_err;
@@ -247,7 +250,7 @@ hb_icu_unicode_decompose (hb_unicode_funcs_t *ufuncs HB_UNUSED,
len = 0; len = 0;
err = false; err = false;
U16_APPEND (utf16, len, ARRAY_LENGTH (utf16), ab, err); HB_ICU_STMT (U16_APPEND (utf16, len, ARRAY_LENGTH (utf16), ab, err));
if (err) return false; if (err) return false;
icu_err = U_ZERO_ERROR; icu_err = U_ZERO_ERROR;
@@ -258,13 +261,13 @@ hb_icu_unicode_decompose (hb_unicode_funcs_t *ufuncs HB_UNUSED,
len = u_countChar32 (normalized, len); len = u_countChar32 (normalized, len);
if (len == 1) { if (len == 1) {
U16_GET_UNSAFE (normalized, 0, *a); HB_ICU_STMT (U16_GET_UNSAFE (normalized, 0, *a));
*b = 0; *b = 0;
ret = *a != ab; ret = *a != ab;
} else if (len == 2) { } else if (len == 2) {
len =0; len =0;
U16_NEXT_UNSAFE (normalized, len, *a); HB_ICU_STMT (U16_NEXT_UNSAFE (normalized, len, *a));
U16_NEXT_UNSAFE (normalized, len, *b); HB_ICU_STMT (U16_NEXT_UNSAFE (normalized, len, *b));
/* Here's the ugly part: if ab decomposes to a single character and /* Here's the ugly part: if ab decomposes to a single character and
* that character decomposes again, we have to detect that and undo * that character decomposes again, we have to detect that and undo
@@ -275,7 +278,7 @@ hb_icu_unicode_decompose (hb_unicode_funcs_t *ufuncs HB_UNUSED,
if (U_FAILURE (icu_err)) if (U_FAILURE (icu_err))
return false; return false;
hb_codepoint_t c; hb_codepoint_t c;
U16_GET_UNSAFE (recomposed, 0, c); HB_ICU_STMT (U16_GET_UNSAFE (recomposed, 0, c));
if (c != *a && c != ab) { if (c != *a && c != ab) {
*a = c; *a = c;
*b = 0; *b = 0;
@@ -284,7 +287,7 @@ hb_icu_unicode_decompose (hb_unicode_funcs_t *ufuncs HB_UNUSED,
} else { } else {
/* If decomposed to more than two characters, take the last one, /* If decomposed to more than two characters, take the last one,
* and recompose the rest to get the first component. */ * and recompose the rest to get the first component. */
U16_PREV_UNSAFE (normalized, len, *b); /* Changes len in-place. */ HB_ICU_STMT (U16_PREV_UNSAFE (normalized, len, *b)); /* Changes len in-place. */
UChar recomposed[18 * 2]; UChar recomposed[18 * 2];
icu_err = U_ZERO_ERROR; icu_err = U_ZERO_ERROR;
len = unorm2_normalize (unorm2_getNFCInstance (&icu_err), normalized, len, recomposed, ARRAY_LENGTH (recomposed), &icu_err); len = unorm2_normalize (unorm2_getNFCInstance (&icu_err), normalized, len, recomposed, ARRAY_LENGTH (recomposed), &icu_err);
@@ -293,7 +296,7 @@ hb_icu_unicode_decompose (hb_unicode_funcs_t *ufuncs HB_UNUSED,
/* We expect that recomposed has exactly one character now. */ /* We expect that recomposed has exactly one character now. */
if (unlikely (u_countChar32 (recomposed, len) != 1)) if (unlikely (u_countChar32 (recomposed, len) != 1))
return false; return false;
U16_GET_UNSAFE (recomposed, 0, *a); HB_ICU_STMT (U16_GET_UNSAFE (recomposed, 0, *a));
ret = true; ret = true;
} }

View File

@@ -1,5 +1,6 @@
/* /*
* Copyright © 2018 Google, Inc. * Copyright © 2018 Google, Inc.
* Copyright © 2019 Facebook, Inc.
* *
* This is part of HarfBuzz, a text shaping library. * This is part of HarfBuzz, a text shaping library.
* *
@@ -22,13 +23,15 @@
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
* *
* Google Author(s): Behdad Esfahbod * Google Author(s): Behdad Esfahbod
* Facebook Author(s): Behdad Esfahbod
*/ */
#ifndef HB_ITER_HH #ifndef HB_ITER_HH
#define HB_ITER_HH #define HB_ITER_HH
#include "hb.hh" #include "hb.hh"
#include "hb-null.hh" #include "hb-algs.hh"
#include "hb-meta.hh"
/* Unified iterator object. /* Unified iterator object.
@@ -39,16 +42,32 @@
* copied by value. If the collection / object being iterated on * copied by value. If the collection / object being iterated on
* is writable, then the iterator returns lvalues, otherwise it * is writable, then the iterator returns lvalues, otherwise it
* returns rvalues. * returns rvalues.
*
* TODO Document more.
*
* If iterator implementation implements operator!=, then can be
* used in range-based for loop. That comes free if the iterator
* is random-access. Otherwise, the range-based for loop incurs
* one traversal to find end(), which can be avoided if written
* as a while-style for loop, or if iterator implements a faster
* __end__() method.
* TODO When opting in for C++17, address this by changing return
* type of .end()?
*/
/*
* Base classes for iterators.
*/ */
/* Base class for all iterators. */ /* Base class for all iterators. */
template <typename Iter, typename Item = typename Iter::__item_type__> template <typename iter_t, typename Item = typename iter_t::__item_t__>
struct hb_iter_t struct hb_iter_t
{ {
typedef Iter iter_t;
typedef iter_t const_iter_t;
typedef Item item_t; typedef Item item_t;
static constexpr unsigned item_size = hb_static_size (Item); static constexpr unsigned item_size = hb_static_size (Item);
static constexpr bool is_iterator = true;
static constexpr bool is_random_access_iterator = false;
static constexpr bool is_sorted_iterator = false;
private: private:
/* https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern */ /* https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern */
@@ -56,53 +75,110 @@ struct hb_iter_t
iter_t* thiz () { return static_cast< iter_t *> (this); } iter_t* thiz () { return static_cast< iter_t *> (this); }
public: public:
/* TODO:
* Port operators below to use hb_enable_if to sniff which method implements
* an operator and use it, and remove hb_iter_fallback_mixin_t completely. */
/* Operators. */ /* Operators. */
operator iter_t () { return iter(); }
explicit_operator bool () const { return more (); }
item_t& operator * () const { return item (); }
item_t& operator [] (signed i) const { return item_at ((unsigned) i); }
iter_t& operator += (unsigned count) { forward (count); return *thiz(); }
iter_t& operator ++ () { next (); return *thiz(); }
iter_t& operator -= (unsigned count) { rewind (count); return *thiz(); }
iter_t& operator -- () { prev (); return *thiz(); }
iter_t operator + (unsigned count) { iter_t c (*thiz()); c += count; return c; }
iter_t operator ++ (int) { iter_t c (*thiz()); ++*thiz(); return c; }
iter_t operator - (unsigned count) { iter_t c (*thiz()); c -= count; return c; }
iter_t operator -- (int) { iter_t c (*thiz()); --*thiz(); return c; }
/* Methods. */
iter_t iter () const { return *thiz(); } iter_t iter () const { return *thiz(); }
const_iter_t const_iter () const { return iter (); } iter_t operator + () const { return *thiz(); }
item_t& item () const { return thiz()->__item__ (); } iter_t begin () const { return *thiz(); }
item_t& item_at (unsigned i) const { return thiz()->__item_at__ (i); } iter_t end () const { return thiz()->__end__ (); }
bool more () const { return thiz()->__more__ (); } explicit operator bool () const { return thiz()->__more__ (); }
unsigned len () const { return thiz()->__len__ (); } unsigned len () const { return thiz()->__len__ (); }
void next () { thiz()->__next__ (); } /* The following can only be enabled if item_t is reference type. Otherwise
void forward (unsigned n) { thiz()->__forward__ (n); } * it will be returning pointer to temporary rvalue.
void prev () { thiz()->__prev__ (); } * TODO Use a wrapper return type to fix for non-reference type. */
void rewind (unsigned n) { thiz()->__rewind__ (n); } template <typename T = item_t,
bool random_access () const { return thiz()->__random_access__ (); } hb_enable_if (hb_is_reference (T))>
hb_remove_reference<item_t>* operator -> () const { return hb_addressof (**thiz()); }
item_t operator * () const { return thiz()->__item__ (); }
item_t operator * () { return thiz()->__item__ (); }
item_t operator [] (unsigned i) const { return thiz()->__item_at__ (i); }
item_t operator [] (unsigned i) { return thiz()->__item_at__ (i); }
iter_t& operator += (unsigned count) & { thiz()->__forward__ (count); return *thiz(); }
iter_t operator += (unsigned count) && { thiz()->__forward__ (count); return *thiz(); }
iter_t& operator ++ () & { thiz()->__next__ (); return *thiz(); }
iter_t operator ++ () && { thiz()->__next__ (); return *thiz(); }
iter_t& operator -= (unsigned count) & { thiz()->__rewind__ (count); return *thiz(); }
iter_t operator -= (unsigned count) && { thiz()->__rewind__ (count); return *thiz(); }
iter_t& operator -- () & { thiz()->__prev__ (); return *thiz(); }
iter_t operator -- () && { thiz()->__prev__ (); return *thiz(); }
iter_t operator + (unsigned count) const { auto c = thiz()->iter (); c += count; return c; }
friend iter_t operator + (unsigned count, const iter_t &it) { return it + count; }
iter_t operator ++ (int) { iter_t c (*thiz()); ++*thiz(); return c; }
iter_t operator - (unsigned count) const { auto c = thiz()->iter (); c -= count; return c; }
iter_t operator -- (int) { iter_t c (*thiz()); --*thiz(); return c; }
template <typename T>
iter_t& operator >> (T &v) & { v = **thiz(); ++*thiz(); return *thiz(); }
template <typename T>
iter_t operator >> (T &v) && { v = **thiz(); ++*thiz(); return *thiz(); }
template <typename T>
iter_t& operator << (const T v) & { **thiz() = v; ++*thiz(); return *thiz(); }
template <typename T>
iter_t operator << (const T v) && { **thiz() = v; ++*thiz(); return *thiz(); }
protected: protected:
hb_iter_t () {} hb_iter_t () = default;
hb_iter_t (const hb_iter_t &o HB_UNUSED) {} hb_iter_t (const hb_iter_t &o HB_UNUSED) = default;
void operator = (const hb_iter_t &o HB_UNUSED) {} hb_iter_t (hb_iter_t &&o HB_UNUSED) = default;
hb_iter_t& operator = (const hb_iter_t &o HB_UNUSED) = default;
hb_iter_t& operator = (hb_iter_t &&o HB_UNUSED) = default;
}; };
/* Base class for sorted iterators. Does not enforce anything. #define HB_ITER_USING(Name) \
* Just for class taxonomy and requirements. */ using item_t = typename Name::item_t; \
template <typename Iter, typename Item = typename Iter::__item_type__> using Name::begin; \
struct hb_sorted_iter_t : hb_iter_t<Iter, Item> using Name::end; \
using Name::item_size; \
using Name::is_iterator; \
using Name::iter; \
using Name::operator bool; \
using Name::len; \
using Name::operator ->; \
using Name::operator *; \
using Name::operator []; \
using Name::operator +=; \
using Name::operator ++; \
using Name::operator -=; \
using Name::operator --; \
using Name::operator +; \
using Name::operator -; \
using Name::operator >>; \
using Name::operator <<; \
static_assert (true, "")
/* Returns iterator / item type of a type. */
template <typename Iterable>
using hb_iter_type = decltype (hb_deref (hb_declval (Iterable)).iter ());
template <typename Iterable>
using hb_item_type = decltype (*hb_deref (hb_declval (Iterable)).iter ());
template <typename> struct hb_array_t;
struct
{ {
protected: template <typename T> hb_iter_type<T>
hb_sorted_iter_t () {} operator () (T&& c) const
hb_sorted_iter_t (const hb_sorted_iter_t &o) : hb_iter_t<Iter, Item> (o) {} { return hb_deref (hb_forward<T> (c)).iter (); }
void operator = (const hb_sorted_iter_t &o HB_UNUSED) {}
}; /* Specialization for C arrays. */
template <typename Type> inline hb_array_t<Type>
operator () (Type *array, unsigned int length) const
{ return hb_array_t<Type> (array, length); }
template <typename Type, unsigned int length> hb_array_t<Type>
operator () (Type (&array)[length]) const
{ return hb_array_t<Type> (array, length); }
}
HB_FUNCOBJ (hb_iter);
/* Mixin to fill in what the subclass doesn't provide. */ /* Mixin to fill in what the subclass doesn't provide. */
template <typename iter_t, typename item_t = typename iter_t::__item_type__> template <typename iter_t, typename item_t = typename iter_t::__item_t__>
struct hb_iter_mixin_t struct hb_iter_fallback_mixin_t
{ {
private: private:
/* https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern */ /* https://en.wikipedia.org/wiki/Curiously_recurring_template_pattern */
@@ -111,42 +187,645 @@ struct hb_iter_mixin_t
public: public:
/* Access: Implement __item__(), or __item_at__() if random-access. */ /* Access: Implement __item__(), or __item_at__() if random-access. */
item_t& __item__ () const { return thiz()->item_at (0); } item_t __item__ () const { return (*thiz())[0]; }
item_t& __item_at__ (unsigned i) const { return *(thiz() + i); } item_t __item_at__ (unsigned i) const { return *(*thiz() + i); }
/* Termination: Implement __more__(), or __len__() if random-access. */ /* Termination: Implement __more__(), or __len__() if random-access. */
bool __more__ () const { return thiz()->__len__ (); } bool __more__ () const { return bool (thiz()->len ()); }
unsigned __len__ () const unsigned __len__ () const
{ iter_t c (*thiz()); unsigned l = 0; while (c) { c++; l++; }; return l; } { iter_t c (*thiz()); unsigned l = 0; while (c) { c++; l++; } return l; }
/* Advancing: Implement __next__(), or __forward__() if random-access. */ /* Advancing: Implement __next__(), or __forward__() if random-access. */
void __next__ () { thiz()->forward (1); } void __next__ () { *thiz() += 1; }
void __forward__ (unsigned n) { while (n--) thiz()->next (); } void __forward__ (unsigned n) { while (*thiz() && n--) ++*thiz(); }
/* Rewinding: Implement __prev__() or __rewind__() if bidirectional. */ /* Rewinding: Implement __prev__() or __rewind__() if bidirectional. */
void __prev__ () { thiz()->rewind (1); } void __prev__ () { *thiz() -= 1; }
void __rewind__ (unsigned n) { while (n--) thiz()->prev (); } void __rewind__ (unsigned n) { while (*thiz() && n--) --*thiz(); }
/* Random access: Return true if item_at(), len(), forward() are fast. */ /* Range-based for: Implement __end__() if can be done faster,
bool __random_access__ () const { return false; } * and operator!=. */
}; iter_t __end__ () const
/* Functions operating on iterators or iteratables. */
template <typename C, typename V> inline void
hb_fill (const C& c, const V &v)
{ {
for (typename C::iter_t i (c); i; i++) if (thiz()->is_random_access_iterator)
hb_assign (*i, v); return *thiz() + thiz()->len ();
/* Above expression loops twice. Following loops once. */
auto it = *thiz();
while (it) ++it;
return it;
} }
template <typename S, typename D> inline bool protected:
hb_copy (hb_iter_t<D> &id, hb_iter_t<S> &is) hb_iter_fallback_mixin_t () = default;
hb_iter_fallback_mixin_t (const hb_iter_fallback_mixin_t &o HB_UNUSED) = default;
hb_iter_fallback_mixin_t (hb_iter_fallback_mixin_t &&o HB_UNUSED) = default;
hb_iter_fallback_mixin_t& operator = (const hb_iter_fallback_mixin_t &o HB_UNUSED) = default;
hb_iter_fallback_mixin_t& operator = (hb_iter_fallback_mixin_t &&o HB_UNUSED) = default;
};
template <typename iter_t, typename item_t = typename iter_t::__item_t__>
struct hb_iter_with_fallback_t :
hb_iter_t<iter_t, item_t>,
hb_iter_fallback_mixin_t<iter_t, item_t>
{ {
for (; id && is; ++id, ++is) protected:
*id = *is; hb_iter_with_fallback_t () = default;
return !is; hb_iter_with_fallback_t (const hb_iter_with_fallback_t &o HB_UNUSED) = default;
hb_iter_with_fallback_t (hb_iter_with_fallback_t &&o HB_UNUSED) = default;
hb_iter_with_fallback_t& operator = (const hb_iter_with_fallback_t &o HB_UNUSED) = default;
hb_iter_with_fallback_t& operator = (hb_iter_with_fallback_t &&o HB_UNUSED) = default;
};
/*
* Meta-programming predicates.
*/
/* hb_is_iterator() / hb_is_iterator_of() */
template<typename Iter, typename Item>
struct hb_is_iterator_of
{
template <typename Item2 = Item>
static hb_true_type impl (hb_priority<2>, hb_iter_t<Iter, hb_type_identity<Item2>> *);
static hb_false_type impl (hb_priority<0>, const void *);
public:
static constexpr bool value = decltype (impl (hb_prioritize, hb_declval (Iter*)))::value;
};
#define hb_is_iterator_of(Iter, Item) hb_is_iterator_of<Iter, Item>::value
#define hb_is_iterator(Iter) hb_is_iterator_of (Iter, typename Iter::item_t)
/* hb_is_iterable() */
template <typename T>
struct hb_is_iterable
{
private:
template <typename U>
static auto impl (hb_priority<1>) -> decltype (hb_declval (U).iter (), hb_true_type ());
template <typename>
static hb_false_type impl (hb_priority<0>);
public:
static constexpr bool value = decltype (impl<T> (hb_prioritize))::value;
};
#define hb_is_iterable(Iterable) hb_is_iterable<Iterable>::value
/* hb_is_source_of() / hb_is_sink_of() */
template<typename Iter, typename Item>
struct hb_is_source_of
{
private:
template <typename Iter2 = Iter,
hb_enable_if (hb_is_convertible (typename Iter2::item_t, hb_add_lvalue_reference<hb_add_const<Item>>))>
static hb_true_type impl (hb_priority<2>);
template <typename Iter2 = Iter>
static auto impl (hb_priority<1>) -> decltype (hb_declval (Iter2) >> hb_declval (Item &), hb_true_type ());
static hb_false_type impl (hb_priority<0>);
public:
static constexpr bool value = decltype (impl (hb_prioritize))::value;
};
#define hb_is_source_of(Iter, Item) hb_is_source_of<Iter, Item>::value
template<typename Iter, typename Item>
struct hb_is_sink_of
{
private:
template <typename Iter2 = Iter,
hb_enable_if (hb_is_convertible (typename Iter2::item_t, hb_add_lvalue_reference<Item>))>
static hb_true_type impl (hb_priority<2>);
template <typename Iter2 = Iter>
static auto impl (hb_priority<1>) -> decltype (hb_declval (Iter2) << hb_declval (Item), hb_true_type ());
static hb_false_type impl (hb_priority<0>);
public:
static constexpr bool value = decltype (impl (hb_prioritize))::value;
};
#define hb_is_sink_of(Iter, Item) hb_is_sink_of<Iter, Item>::value
/* This is commonly used, so define: */
#define hb_is_sorted_source_of(Iter, Item) \
(hb_is_source_of(Iter, Item) && Iter::is_sorted_iterator)
/* Range-based 'for' for iterables. */
template <typename Iterable,
hb_requires (hb_is_iterable (Iterable))>
static inline auto begin (Iterable&& iterable) HB_AUTO_RETURN (hb_iter (iterable).begin ())
template <typename Iterable,
hb_requires (hb_is_iterable (Iterable))>
static inline auto end (Iterable&& iterable) HB_AUTO_RETURN (hb_iter (iterable).end ())
/* begin()/end() are NOT looked up non-ADL. So each namespace must declare them.
* Do it for namespace OT. */
namespace OT {
template <typename Iterable,
hb_requires (hb_is_iterable (Iterable))>
static inline auto begin (Iterable&& iterable) HB_AUTO_RETURN (hb_iter (iterable).begin ())
template <typename Iterable,
hb_requires (hb_is_iterable (Iterable))>
static inline auto end (Iterable&& iterable) HB_AUTO_RETURN (hb_iter (iterable).end ())
}
/*
* Adaptors, combiners, etc.
*/
template <typename Lhs, typename Rhs,
hb_requires (hb_is_iterator (Lhs))>
static inline auto
operator | (Lhs&& lhs, Rhs&& rhs) HB_AUTO_RETURN (hb_forward<Rhs> (rhs) (hb_forward<Lhs> (lhs)))
/* hb_map(), hb_filter(), hb_reduce() */
enum class hb_function_sortedness_t {
NOT_SORTED,
RETAINS_SORTING,
SORTED,
};
template <typename Iter, typename Proj, hb_function_sortedness_t Sorted,
hb_requires (hb_is_iterator (Iter))>
struct hb_map_iter_t :
hb_iter_t<hb_map_iter_t<Iter, Proj, Sorted>,
decltype (hb_get (hb_declval (Proj), *hb_declval (Iter)))>
{
hb_map_iter_t (const Iter& it, Proj f_) : it (it), f (f_) {}
typedef decltype (hb_get (hb_declval (Proj), *hb_declval (Iter))) __item_t__;
static constexpr bool is_random_access_iterator = Iter::is_random_access_iterator;
static constexpr bool is_sorted_iterator =
Sorted == hb_function_sortedness_t::SORTED ? true :
Sorted == hb_function_sortedness_t::RETAINS_SORTING ? Iter::is_sorted_iterator :
false;
__item_t__ __item__ () const { return hb_get (f.get (), *it); }
__item_t__ __item_at__ (unsigned i) const { return hb_get (f.get (), it[i]); }
bool __more__ () const { return bool (it); }
unsigned __len__ () const { return it.len (); }
void __next__ () { ++it; }
void __forward__ (unsigned n) { it += n; }
void __prev__ () { --it; }
void __rewind__ (unsigned n) { it -= n; }
hb_map_iter_t __end__ () const { return hb_map_iter_t (it.end (), f); }
bool operator != (const hb_map_iter_t& o) const
{ return it != o.it; }
private:
Iter it;
hb_reference_wrapper<Proj> f;
};
template <typename Proj, hb_function_sortedness_t Sorted>
struct hb_map_iter_factory_t
{
hb_map_iter_factory_t (Proj f) : f (f) {}
template <typename Iter,
hb_requires (hb_is_iterator (Iter))>
hb_map_iter_t<Iter, Proj, Sorted>
operator () (Iter it)
{ return hb_map_iter_t<Iter, Proj, Sorted> (it, f); }
private:
Proj f;
};
struct
{
template <typename Proj>
hb_map_iter_factory_t<Proj, hb_function_sortedness_t::NOT_SORTED>
operator () (Proj&& f) const
{ return hb_map_iter_factory_t<Proj, hb_function_sortedness_t::NOT_SORTED> (f); }
}
HB_FUNCOBJ (hb_map);
struct
{
template <typename Proj>
hb_map_iter_factory_t<Proj, hb_function_sortedness_t::RETAINS_SORTING>
operator () (Proj&& f) const
{ return hb_map_iter_factory_t<Proj, hb_function_sortedness_t::RETAINS_SORTING> (f); }
}
HB_FUNCOBJ (hb_map_retains_sorting);
struct
{
template <typename Proj>
hb_map_iter_factory_t<Proj, hb_function_sortedness_t::SORTED>
operator () (Proj&& f) const
{ return hb_map_iter_factory_t<Proj, hb_function_sortedness_t::SORTED> (f); }
}
HB_FUNCOBJ (hb_map_sorted);
template <typename Iter, typename Pred, typename Proj,
hb_requires (hb_is_iterator (Iter))>
struct hb_filter_iter_t :
hb_iter_with_fallback_t<hb_filter_iter_t<Iter, Pred, Proj>,
typename Iter::item_t>
{
hb_filter_iter_t (const Iter& it_, Pred p_, Proj f_) : it (it_), p (p_), f (f_)
{ while (it && !hb_has (p.get (), hb_get (f.get (), *it))) ++it; }
typedef typename Iter::item_t __item_t__;
static constexpr bool is_sorted_iterator = Iter::is_sorted_iterator;
__item_t__ __item__ () const { return *it; }
bool __more__ () const { return bool (it); }
void __next__ () { do ++it; while (it && !hb_has (p.get (), hb_get (f.get (), *it))); }
void __prev__ () { do --it; while (it && !hb_has (p.get (), hb_get (f.get (), *it))); }
hb_filter_iter_t __end__ () const { return hb_filter_iter_t (it.end (), p, f); }
bool operator != (const hb_filter_iter_t& o) const
{ return it != o.it; }
private:
Iter it;
hb_reference_wrapper<Pred> p;
hb_reference_wrapper<Proj> f;
};
template <typename Pred, typename Proj>
struct hb_filter_iter_factory_t
{
hb_filter_iter_factory_t (Pred p, Proj f) : p (p), f (f) {}
template <typename Iter,
hb_requires (hb_is_iterator (Iter))>
hb_filter_iter_t<Iter, Pred, Proj>
operator () (Iter it)
{ return hb_filter_iter_t<Iter, Pred, Proj> (it, p, f); }
private:
Pred p;
Proj f;
};
struct
{
template <typename Pred = decltype ((hb_identity)),
typename Proj = decltype ((hb_identity))>
hb_filter_iter_factory_t<Pred, Proj>
operator () (Pred&& p = hb_identity, Proj&& f = hb_identity) const
{ return hb_filter_iter_factory_t<Pred, Proj> (p, f); }
}
HB_FUNCOBJ (hb_filter);
template <typename Redu, typename InitT>
struct hb_reduce_t
{
hb_reduce_t (Redu r, InitT init_value) : r (r), init_value (init_value) {}
template <typename Iter,
hb_requires (hb_is_iterator (Iter)),
typename AccuT = decltype (hb_declval (Redu) (hb_declval (InitT), hb_declval (typename Iter::item_t)))>
AccuT
operator () (Iter it)
{
AccuT value = init_value;
for (; it; ++it)
value = r (value, *it);
return value;
}
private:
Redu r;
InitT init_value;
};
struct
{
template <typename Redu, typename InitT>
hb_reduce_t<Redu, InitT>
operator () (Redu&& r, InitT init_value) const
{ return hb_reduce_t<Redu, InitT> (r, init_value); }
}
HB_FUNCOBJ (hb_reduce);
/* hb_zip() */
template <typename A, typename B>
struct hb_zip_iter_t :
hb_iter_t<hb_zip_iter_t<A, B>,
hb_pair_t<typename A::item_t, typename B::item_t>>
{
hb_zip_iter_t () {}
hb_zip_iter_t (const A& a, const B& b) : a (a), b (b) {}
typedef hb_pair_t<typename A::item_t, typename B::item_t> __item_t__;
static constexpr bool is_random_access_iterator =
A::is_random_access_iterator &&
B::is_random_access_iterator;
/* Note. The following categorization is only valid if A is strictly sorted,
* ie. does NOT have duplicates. Previously I tried to categorize sortedness
* more granularly, see commits:
*
* 513762849a683914fc266a17ddf38f133cccf072
* 4d3cf2adb669c345cc43832d11689271995e160a
*
* However, that was not enough, since hb_sorted_array_t, hb_sorted_vector_t,
* SortedArrayOf, etc all needed to be updated to add more variants. At that
* point I saw it not worth the effort, and instead we now deem all sorted
* collections as essentially strictly-sorted for the purposes of zip.
*
* The above assumption is not as bad as it sounds. Our "sorted" comes with
* no guarantees. It's just a contract, put in place to help you remember,
* and think about, whether an iterator you receive is expected to be
* sorted or not. As such, it's not perfect by definition, and should not
* be treated so. The inaccuracy here just errs in the direction of being
* more permissive, so your code compiles instead of erring on the side of
* marking your zipped iterator unsorted in which case your code won't
* compile.
*
* This semantical limitation does NOT affect logic in any other place I
* know of as of this writing.
*/
static constexpr bool is_sorted_iterator = A::is_sorted_iterator;
__item_t__ __item__ () const { return __item_t__ (*a, *b); }
__item_t__ __item_at__ (unsigned i) const { return __item_t__ (a[i], b[i]); }
bool __more__ () const { return bool (a) && bool (b); }
unsigned __len__ () const { return hb_min (a.len (), b.len ()); }
void __next__ () { ++a; ++b; }
void __forward__ (unsigned n) { a += n; b += n; }
void __prev__ () { --a; --b; }
void __rewind__ (unsigned n) { a -= n; b -= n; }
hb_zip_iter_t __end__ () const { return hb_zip_iter_t (a.end (), b.end ()); }
/* Note, we should stop if ANY of the iters reaches end. As such two compare
* unequal if both items are unequal, NOT if either is unequal. */
bool operator != (const hb_zip_iter_t& o) const
{ return a != o.a && b != o.b; }
private:
A a;
B b;
};
struct
{
template <typename A, typename B,
hb_requires (hb_is_iterable (A) && hb_is_iterable (B))>
hb_zip_iter_t<hb_iter_type<A>, hb_iter_type<B>>
operator () (A&& a, B&& b) const
{ return hb_zip_iter_t<hb_iter_type<A>, hb_iter_type<B>> (hb_iter (a), hb_iter (b)); }
}
HB_FUNCOBJ (hb_zip);
/* hb_apply() */
template <typename Appl>
struct hb_apply_t
{
hb_apply_t (Appl a) : a (a) {}
template <typename Iter,
hb_requires (hb_is_iterator (Iter))>
void operator () (Iter it)
{
for (; it; ++it)
(void) hb_invoke (a, *it);
}
private:
Appl a;
};
struct
{
template <typename Appl> hb_apply_t<Appl>
operator () (Appl&& a) const
{ return hb_apply_t<Appl> (a); }
template <typename Appl> hb_apply_t<Appl&>
operator () (Appl *a) const
{ return hb_apply_t<Appl&> (*a); }
}
HB_FUNCOBJ (hb_apply);
/* hb_iota()/hb_range() */
template <typename T, typename S>
struct hb_counter_iter_t :
hb_iter_t<hb_counter_iter_t<T, S>, T>
{
hb_counter_iter_t (T start, T end_, S step) : v (start), end_ (end_for (start, end_, step)), step (step) {}
typedef T __item_t__;
static constexpr bool is_random_access_iterator = true;
static constexpr bool is_sorted_iterator = true;
__item_t__ __item__ () const { return +v; }
__item_t__ __item_at__ (unsigned j) const { return v + j * step; }
bool __more__ () const { return v != end_; }
unsigned __len__ () const { return !step ? UINT_MAX : (end_ - v) / step; }
void __next__ () { v += step; }
void __forward__ (unsigned n) { v += n * step; }
void __prev__ () { v -= step; }
void __rewind__ (unsigned n) { v -= n * step; }
hb_counter_iter_t __end__ () const { return hb_counter_iter_t (end_, end_, step); }
bool operator != (const hb_counter_iter_t& o) const
{ return v != o.v; }
private:
static inline T end_for (T start, T end_, S step)
{
if (!step)
return end_;
auto res = (end_ - start) % step;
if (!res)
return end_;
end_ += step - res;
return end_;
}
private:
T v;
T end_;
S step;
};
struct
{
template <typename T = unsigned, typename S = unsigned> hb_counter_iter_t<T, S>
operator () (T start = 0u, S&& step = 1u) const
{ return hb_counter_iter_t<T, S> (start, step >= 0 ? hb_int_max (T) : hb_int_min (T), step); }
}
HB_FUNCOBJ (hb_iota);
struct
{
template <typename T = unsigned> hb_counter_iter_t<T, unsigned>
operator () (T end = (unsigned) -1) const
{ return hb_counter_iter_t<T, unsigned> (0, end, 1u); }
template <typename T, typename S = unsigned> hb_counter_iter_t<T, S>
operator () (T start, T end, S&& step = 1u) const
{ return hb_counter_iter_t<T, S> (start, end, step); }
}
HB_FUNCOBJ (hb_range);
/* hb_enumerate */
struct
{
template <typename Iterable,
typename Index = unsigned,
hb_requires (hb_is_iterable (Iterable))>
auto operator () (Iterable&& it, Index start = 0u) const HB_AUTO_RETURN
( hb_zip (hb_iota (start), it) )
}
HB_FUNCOBJ (hb_enumerate);
/* hb_sink() */
template <typename Sink>
struct hb_sink_t
{
hb_sink_t (Sink s) : s (s) {}
template <typename Iter,
hb_requires (hb_is_iterator (Iter))>
void operator () (Iter it)
{
for (; it; ++it)
s << *it;
}
private:
Sink s;
};
struct
{
template <typename Sink> hb_sink_t<Sink>
operator () (Sink&& s) const
{ return hb_sink_t<Sink> (s); }
template <typename Sink> hb_sink_t<Sink&>
operator () (Sink *s) const
{ return hb_sink_t<Sink&> (*s); }
}
HB_FUNCOBJ (hb_sink);
/* hb-drain: hb_sink to void / blackhole / /dev/null. */
struct
{
template <typename Iter,
hb_requires (hb_is_iterator (Iter))>
void operator () (Iter it) const
{
for (; it; ++it)
(void) *it;
}
}
HB_FUNCOBJ (hb_drain);
/* hb_unzip(): unzip and sink to two sinks. */
template <typename Sink1, typename Sink2>
struct hb_unzip_t
{
hb_unzip_t (Sink1 s1, Sink2 s2) : s1 (s1), s2 (s2) {}
template <typename Iter,
hb_requires (hb_is_iterator (Iter))>
void operator () (Iter it)
{
for (; it; ++it)
{
const auto &v = *it;
s1 << v.first;
s2 << v.second;
}
}
private:
Sink1 s1;
Sink2 s2;
};
struct
{
template <typename Sink1, typename Sink2> hb_unzip_t<Sink1, Sink2>
operator () (Sink1&& s1, Sink2&& s2) const
{ return hb_unzip_t<Sink1, Sink2> (s1, s2); }
template <typename Sink1, typename Sink2> hb_unzip_t<Sink1&, Sink2&>
operator () (Sink1 *s1, Sink2 *s2) const
{ return hb_unzip_t<Sink1&, Sink2&> (*s1, *s2); }
}
HB_FUNCOBJ (hb_unzip);
/* hb-all, hb-any, hb-none. */
struct
{
template <typename Iterable,
typename Pred = decltype ((hb_identity)),
typename Proj = decltype ((hb_identity)),
hb_requires (hb_is_iterable (Iterable))>
bool operator () (Iterable&& c,
Pred&& p = hb_identity,
Proj&& f = hb_identity) const
{
for (auto it = hb_iter (c); it; ++it)
if (!hb_match (hb_forward<Pred> (p), hb_get (hb_forward<Proj> (f), *it)))
return false;
return true;
}
}
HB_FUNCOBJ (hb_all);
struct
{
template <typename Iterable,
typename Pred = decltype ((hb_identity)),
typename Proj = decltype ((hb_identity)),
hb_requires (hb_is_iterable (Iterable))>
bool operator () (Iterable&& c,
Pred&& p = hb_identity,
Proj&& f = hb_identity) const
{
for (auto it = hb_iter (c); it; ++it)
if (hb_match (hb_forward<Pred> (p), hb_get (hb_forward<Proj> (f), *it)))
return true;
return false;
}
}
HB_FUNCOBJ (hb_any);
struct
{
template <typename Iterable,
typename Pred = decltype ((hb_identity)),
typename Proj = decltype ((hb_identity)),
hb_requires (hb_is_iterable (Iterable))>
bool operator () (Iterable&& c,
Pred&& p = hb_identity,
Proj&& f = hb_identity) const
{
for (auto it = hb_iter (c); it; ++it)
if (hb_match (hb_forward<Pred> (p), hb_get (hb_forward<Proj> (f), *it)))
return false;
return true;
}
}
HB_FUNCOBJ (hb_none);
/*
* Algorithms operating on iterators.
*/
template <typename C, typename V,
hb_requires (hb_is_iterable (C))>
inline void
hb_fill (C& c, const V &v)
{
for (auto i = hb_iter (c); i; i++)
*i = v;
}
template <typename S, typename D>
inline void
hb_copy (S&& is, D&& id)
{
hb_iter (is) | hb_sink (id);
} }

View File

@@ -32,8 +32,9 @@
#include "hb.hh" #include "hb.hh"
#include "hb-blob.hh" #include "hb-blob.hh"
#include "hb-array.hh" #include "hb-dispatch.hh"
#include "hb-vector.hh" #include "hb-sanitize.hh"
#include "hb-serialize.hh"
/* /*
@@ -143,617 +144,6 @@ static inline Type& StructAfter(TObject &X)
DEFINE_SIZE_ARRAY(size, array) DEFINE_SIZE_ARRAY(size, array)
/*
* Dispatch
*/
template <typename Context, typename Return, unsigned int MaxDebugDepth>
struct hb_dispatch_context_t
{
static constexpr unsigned max_debug_depth = MaxDebugDepth;
typedef Return return_t;
template <typename T, typename F>
bool may_dispatch (const T *obj HB_UNUSED, const F *format HB_UNUSED) { return true; }
static return_t no_dispatch_return_value () { return Context::default_return_value (); }
static bool stop_sublookup_iteration (const return_t r HB_UNUSED) { return false; }
};
/*
* Sanitize
*
*
* === Introduction ===
*
* The sanitize machinery is at the core of our zero-cost font loading. We
* mmap() font file into memory and create a blob out of it. Font subtables
* are returned as a readonly sub-blob of the main font blob. These table
* blobs are then sanitized before use, to ensure invalid memory access does
* not happen. The toplevel sanitize API use is like, eg. to load the 'head'
* table:
*
* hb_blob_t *head_blob = hb_sanitize_context_t ().reference_table<OT::head> (face);
*
* The blob then can be converted to a head table struct with:
*
* const head *head_table = head_blob->as<head> ();
*
* What the reference_table does is, to call hb_face_reference_table() to load
* the table blob, sanitize it and return either the sanitized blob, or empty
* blob if sanitization failed. The blob->as() function returns the null
* object of its template type argument if the blob is empty. Otherwise, it
* just casts the blob contents to the desired type.
*
* Sanitizing a blob of data with a type T works as follows (with minor
* simplification):
*
* - Cast blob content to T*, call sanitize() method of it,
* - If sanitize succeeded, return blob.
* - Otherwise, if blob is not writable, try making it writable,
* or copy if cannot be made writable in-place,
* - Call sanitize() again. Return blob if sanitize succeeded.
* - Return empty blob otherwise.
*
*
* === The sanitize() contract ===
*
* The sanitize() method of each object type shall return true if it's safe to
* call other methods of the object, and false otherwise.
*
* Note that what sanitize() checks for might align with what the specification
* describes as valid table data, but does not have to be. In particular, we
* do NOT want to be pedantic and concern ourselves with validity checks that
* are irrelevant to our use of the table. On the contrary, we want to be
* lenient with error handling and accept invalid data to the extent that it
* does not impose extra burden on us.
*
* Based on the sanitize contract, one can see that what we check for depends
* on how we use the data in other table methods. Ie. if other table methods
* assume that offsets do NOT point out of the table data block, then that's
* something sanitize() must check for (GSUB/GPOS/GDEF/etc work this way). On
* the other hand, if other methods do such checks themselves, then sanitize()
* does not have to bother with them (glyf/local work this way). The choice
* depends on the table structure and sanitize() performance. For example, to
* check glyf/loca offsets in sanitize() would cost O(num-glyphs). We try hard
* to avoid such costs during font loading. By postponing such checks to the
* actual glyph loading, we reduce the sanitize cost to O(1) and total runtime
* cost to O(used-glyphs). As such, this is preferred.
*
* The same argument can be made re GSUB/GPOS/GDEF, but there, the table
* structure is so complicated that by checking all offsets at sanitize() time,
* we make the code much simpler in other methods, as offsets and referenced
* objects do not need to be validated at each use site.
*/
/* This limits sanitizing time on really broken fonts. */
#ifndef HB_SANITIZE_MAX_EDITS
#define HB_SANITIZE_MAX_EDITS 32
#endif
#ifndef HB_SANITIZE_MAX_OPS_FACTOR
#define HB_SANITIZE_MAX_OPS_FACTOR 8
#endif
#ifndef HB_SANITIZE_MAX_OPS_MIN
#define HB_SANITIZE_MAX_OPS_MIN 16384
#endif
#ifndef HB_SANITIZE_MAX_OPS_MAX
#define HB_SANITIZE_MAX_OPS_MAX 0x3FFFFFFF
#endif
struct hb_sanitize_context_t :
hb_dispatch_context_t<hb_sanitize_context_t, bool, HB_DEBUG_SANITIZE>
{
hb_sanitize_context_t () :
debug_depth (0),
start (nullptr), end (nullptr),
max_ops (0),
writable (false), edit_count (0),
blob (nullptr),
num_glyphs (65536),
num_glyphs_set (false) {}
const char *get_name () { return "SANITIZE"; }
template <typename T, typename F>
bool may_dispatch (const T *obj HB_UNUSED, const F *format)
{ return format->sanitize (this); }
template <typename T>
return_t dispatch (const T &obj) { return obj.sanitize (this); }
static return_t default_return_value () { return true; }
static return_t no_dispatch_return_value () { return false; }
bool stop_sublookup_iteration (const return_t r) const { return !r; }
void init (hb_blob_t *b)
{
this->blob = hb_blob_reference (b);
this->writable = false;
}
void set_num_glyphs (unsigned int num_glyphs_)
{
num_glyphs = num_glyphs_;
num_glyphs_set = true;
}
unsigned int get_num_glyphs () { return num_glyphs; }
void set_max_ops (int max_ops_) { max_ops = max_ops_; }
template <typename T>
void set_object (const T *obj)
{
reset_object ();
if (!obj) return;
const char *obj_start = (const char *) obj;
if (unlikely (obj_start < this->start || this->end <= obj_start))
this->start = this->end = nullptr;
else
{
this->start = obj_start;
this->end = obj_start + MIN<uintptr_t> (this->end - obj_start, obj->get_size ());
}
}
void reset_object ()
{
this->start = this->blob->data;
this->end = this->start + this->blob->length;
assert (this->start <= this->end); /* Must not overflow. */
}
void start_processing ()
{
reset_object ();
this->max_ops = MAX ((unsigned int) (this->end - this->start) * HB_SANITIZE_MAX_OPS_FACTOR,
(unsigned) HB_SANITIZE_MAX_OPS_MIN);
this->edit_count = 0;
this->debug_depth = 0;
DEBUG_MSG_LEVEL (SANITIZE, start, 0, +1,
"start [%p..%p] (%lu bytes)",
this->start, this->end,
(unsigned long) (this->end - this->start));
}
void end_processing ()
{
DEBUG_MSG_LEVEL (SANITIZE, this->start, 0, -1,
"end [%p..%p] %u edit requests",
this->start, this->end, this->edit_count);
hb_blob_destroy (this->blob);
this->blob = nullptr;
this->start = this->end = nullptr;
}
bool check_range (const void *base,
unsigned int len) const
{
const char *p = (const char *) base;
bool ok = !len ||
(this->start <= p &&
p <= this->end &&
(unsigned int) (this->end - p) >= len &&
this->max_ops-- > 0);
DEBUG_MSG_LEVEL (SANITIZE, p, this->debug_depth+1, 0,
"check_range [%p..%p]"
" (%d bytes) in [%p..%p] -> %s",
p, p + len, len,
this->start, this->end,
ok ? "OK" : "OUT-OF-RANGE");
return likely (ok);
}
template <typename T>
bool check_range (const T *base,
unsigned int a,
unsigned int b) const
{
return !hb_unsigned_mul_overflows (a, b) &&
this->check_range (base, a * b);
}
template <typename T>
bool check_range (const T *base,
unsigned int a,
unsigned int b,
unsigned int c) const
{
return !hb_unsigned_mul_overflows (a, b) &&
this->check_range (base, a * b, c);
}
template <typename T>
bool check_array (const T *base, unsigned int len) const
{
return this->check_range (base, len, hb_static_size (T));
}
template <typename T>
bool check_array (const T *base,
unsigned int a,
unsigned int b) const
{
return this->check_range (base, a, b, hb_static_size (T));
}
template <typename Type>
bool check_struct (const Type *obj) const
{ return likely (this->check_range (obj, obj->min_size)); }
bool may_edit (const void *base, unsigned int len)
{
if (this->edit_count >= HB_SANITIZE_MAX_EDITS)
return false;
const char *p = (const char *) base;
this->edit_count++;
DEBUG_MSG_LEVEL (SANITIZE, p, this->debug_depth+1, 0,
"may_edit(%u) [%p..%p] (%d bytes) in [%p..%p] -> %s",
this->edit_count,
p, p + len, len,
this->start, this->end,
this->writable ? "GRANTED" : "DENIED");
return this->writable;
}
template <typename Type, typename ValueType>
bool try_set (const Type *obj, const ValueType &v)
{
if (this->may_edit (obj, hb_static_size (Type)))
{
hb_assign (* const_cast<Type *> (obj), v);
return true;
}
return false;
}
template <typename Type>
hb_blob_t *sanitize_blob (hb_blob_t *blob)
{
bool sane;
init (blob);
retry:
DEBUG_MSG_FUNC (SANITIZE, start, "start");
start_processing ();
if (unlikely (!start))
{
end_processing ();
return blob;
}
Type *t = CastP<Type> (const_cast<char *> (start));
sane = t->sanitize (this);
if (sane)
{
if (edit_count)
{
DEBUG_MSG_FUNC (SANITIZE, start, "passed first round with %d edits; going for second round", edit_count);
/* sanitize again to ensure no toe-stepping */
edit_count = 0;
sane = t->sanitize (this);
if (edit_count) {
DEBUG_MSG_FUNC (SANITIZE, start, "requested %d edits in second round; FAILLING", edit_count);
sane = false;
}
}
}
else
{
if (edit_count && !writable) {
start = hb_blob_get_data_writable (blob, nullptr);
end = start + blob->length;
if (start)
{
writable = true;
/* ok, we made it writable by relocating. try again */
DEBUG_MSG_FUNC (SANITIZE, start, "retry");
goto retry;
}
}
}
end_processing ();
DEBUG_MSG_FUNC (SANITIZE, start, sane ? "PASSED" : "FAILED");
if (sane)
{
hb_blob_make_immutable (blob);
return blob;
}
else
{
hb_blob_destroy (blob);
return hb_blob_get_empty ();
}
}
template <typename Type>
hb_blob_t *reference_table (const hb_face_t *face, hb_tag_t tableTag = Type::tableTag)
{
if (!num_glyphs_set)
set_num_glyphs (hb_face_get_glyph_count (face));
return sanitize_blob<Type> (hb_face_reference_table (face, tableTag));
}
mutable unsigned int debug_depth;
const char *start, *end;
mutable int max_ops;
private:
bool writable;
unsigned int edit_count;
hb_blob_t *blob;
unsigned int num_glyphs;
bool num_glyphs_set;
};
struct hb_sanitize_with_object_t
{
template <typename T>
hb_sanitize_with_object_t (hb_sanitize_context_t *c,
const T& obj) : c (c)
{ c->set_object (obj); }
~hb_sanitize_with_object_t ()
{ c->reset_object (); }
private:
hb_sanitize_context_t *c;
};
/*
* Serialize
*/
struct hb_serialize_context_t
{
hb_serialize_context_t (void *start_, unsigned int size)
{
this->start = (char *) start_;
this->end = this->start + size;
reset ();
}
bool in_error () const { return !this->successful; }
void reset ()
{
this->successful = true;
this->head = this->start;
this->debug_depth = 0;
}
bool propagate_error (bool e)
{ return this->successful = this->successful && e; }
template <typename T> bool propagate_error (const T &obj)
{ return this->successful = this->successful && !obj.in_error (); }
template <typename T> bool propagate_error (const T *obj)
{ return this->successful = this->successful && !obj->in_error (); }
template <typename T1, typename T2> bool propagate_error (T1 &o1, T2 &o2)
{ return propagate_error (o1) && propagate_error (o2); }
template <typename T1, typename T2> bool propagate_error (T1 *o1, T2 *o2)
{ return propagate_error (o1) && propagate_error (o2); }
template <typename T1, typename T2, typename T3>
bool propagate_error (T1 &o1, T2 &o2, T3 &o3)
{ return propagate_error (o1) && propagate_error (o2, o3); }
template <typename T1, typename T2, typename T3>
bool propagate_error (T1 *o1, T2 *o2, T3 *o3)
{ return propagate_error (o1) && propagate_error (o2, o3); }
/* To be called around main operation. */
template <typename Type>
Type *start_serialize ()
{
DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, +1,
"start [%p..%p] (%lu bytes)",
this->start, this->end,
(unsigned long) (this->end - this->start));
return start_embed<Type> ();
}
void end_serialize ()
{
DEBUG_MSG_LEVEL (SERIALIZE, this->start, 0, -1,
"end [%p..%p] serialized %d bytes; %s",
this->start, this->end,
(int) (this->head - this->start),
this->successful ? "successful" : "UNSUCCESSFUL");
}
unsigned int length () const { return this->head - this->start; }
void align (unsigned int alignment)
{
unsigned int l = length () % alignment;
if (l)
allocate_size<void> (alignment - l);
}
template <typename Type>
Type *start_embed (const Type *_ HB_UNUSED = nullptr) const
{
Type *ret = reinterpret_cast<Type *> (this->head);
return ret;
}
template <typename Type>
Type *allocate_size (unsigned int size)
{
if (unlikely (!this->successful || this->end - this->head < ptrdiff_t (size))) {
this->successful = false;
return nullptr;
}
memset (this->head, 0, size);
char *ret = this->head;
this->head += size;
return reinterpret_cast<Type *> (ret);
}
template <typename Type>
Type *allocate_min ()
{
return this->allocate_size<Type> (Type::min_size);
}
template <typename Type>
Type *embed (const Type &obj)
{
unsigned int size = obj.get_size ();
Type *ret = this->allocate_size<Type> (size);
if (unlikely (!ret)) return nullptr;
memcpy (ret, &obj, size);
return ret;
}
template <typename Type>
hb_serialize_context_t &operator << (const Type &obj) { embed (obj); return *this; }
template <typename Type>
Type *extend_size (Type &obj, unsigned int size)
{
assert (this->start <= (char *) &obj);
assert ((char *) &obj <= this->head);
assert ((char *) &obj + size >= this->head);
if (unlikely (!this->allocate_size<Type> (((char *) &obj) + size - this->head))) return nullptr;
return reinterpret_cast<Type *> (&obj);
}
template <typename Type>
Type *extend_min (Type &obj) { return extend_size (obj, obj.min_size); }
template <typename Type>
Type *extend (Type &obj) { return extend_size (obj, obj.get_size ()); }
/* Output routines. */
template <typename Type>
Type *copy () const
{
assert (this->successful);
unsigned int len = this->head - this->start;
void *p = malloc (len);
if (p)
memcpy (p, this->start, len);
return reinterpret_cast<Type *> (p);
}
hb_bytes_t copy_bytes () const
{
assert (this->successful);
unsigned int len = this->head - this->start;
void *p = malloc (len);
if (p)
memcpy (p, this->start, len);
else
return hb_bytes_t ();
return hb_bytes_t ((char *) p, len);
}
hb_blob_t *copy_blob () const
{
assert (this->successful);
return hb_blob_create (this->start,
this->head - this->start,
HB_MEMORY_MODE_DUPLICATE,
nullptr, nullptr);
}
public:
unsigned int debug_depth;
char *start, *end, *head;
bool successful;
};
/*
* Big-endian integers.
*/
template <typename Type, int Bytes> struct BEInt;
template <typename Type>
struct BEInt<Type, 1>
{
public:
void set (Type V) { v = V; }
operator Type () const { return v; }
private: uint8_t v;
};
template <typename Type>
struct BEInt<Type, 2>
{
public:
void set (Type V)
{
v[0] = (V >> 8) & 0xFF;
v[1] = (V ) & 0xFF;
}
operator Type () const
{
#if ((defined(__GNUC__) && __GNUC__ >= 5) || defined(__clang__)) && \
defined(__BYTE_ORDER) && \
(__BYTE_ORDER == __LITTLE_ENDIAN || __BYTE_ORDER == __BIG_ENDIAN)
/* Spoon-feed the compiler a big-endian integer with alignment 1.
* https://github.com/harfbuzz/harfbuzz/pull/1398 */
struct __attribute__((packed)) packed_uint16_t { uint16_t v; };
#if __BYTE_ORDER == __LITTLE_ENDIAN
return __builtin_bswap16 (((packed_uint16_t *) this)->v);
#else /* __BYTE_ORDER == __BIG_ENDIAN */
return ((packed_uint16_t *) this)->v;
#endif
#endif
return (v[0] << 8)
+ (v[1] );
}
private: uint8_t v[2];
};
template <typename Type>
struct BEInt<Type, 3>
{
public:
void set (Type V)
{
v[0] = (V >> 16) & 0xFF;
v[1] = (V >> 8) & 0xFF;
v[2] = (V ) & 0xFF;
}
operator Type () const
{
return (v[0] << 16)
+ (v[1] << 8)
+ (v[2] );
}
private: uint8_t v[3];
};
template <typename Type>
struct BEInt<Type, 4>
{
public:
typedef Type type;
void set (Type V)
{
v[0] = (V >> 24) & 0xFF;
v[1] = (V >> 16) & 0xFF;
v[2] = (V >> 8) & 0xFF;
v[3] = (V ) & 0xFF;
}
operator Type () const
{
return (v[0] << 24)
+ (v[1] << 16)
+ (v[2] << 8)
+ (v[3] );
}
private: uint8_t v[4];
};
/* /*
* Lazy loaders. * Lazy loaders.
@@ -816,7 +206,7 @@ struct hb_lazy_loader_t : hb_data_wrapper_t<Data, WheresData>
const Returned * operator -> () const { return get (); } const Returned * operator -> () const { return get (); }
const Returned & operator * () const { return *get (); } const Returned & operator * () const { return *get (); }
explicit_operator bool () const explicit operator bool () const
{ return get_stored () != Funcs::get_null (); } { return get_stored () != Funcs::get_null (); }
template <typename C> operator const C * () const { return get (); } template <typename C> operator const C * () const { return get (); }

View File

@@ -30,31 +30,39 @@
#include "hb.hh" #include "hb.hh"
template <typename T>
inline uint32_t Hash (const T &v)
{
/* Knuth's multiplicative method: */
return (uint32_t) v * 2654435761u;
}
/* /*
* hb_map_t * hb_hashmap_t
*/ */
struct hb_map_t template <typename K, typename V,
K kINVALID = hb_is_pointer (K) ? 0 : hb_is_signed (K) ? hb_int_min (K) : (K) -1,
V vINVALID = hb_is_pointer (V) ? 0 : hb_is_signed (V) ? hb_int_min (V) : (V) -1>
struct hb_hashmap_t
{ {
HB_NO_COPY_ASSIGN (hb_map_t); HB_DELETE_COPY_ASSIGN (hb_hashmap_t);
hb_map_t () { init (); } hb_hashmap_t () { init (); }
~hb_map_t () { fini (); } ~hb_hashmap_t () { fini (); }
static_assert (hb_is_integral (K) || hb_is_pointer (K), "");
static_assert (hb_is_integral (V) || hb_is_pointer (V), "");
/* TODO If key type is a pointer, keep hash in item_t and use to:
* 1. avoid rehashing when resizing table, and
* 2. compare hash before comparing keys, for speed.
*/
struct item_t struct item_t
{ {
hb_codepoint_t key; K key;
hb_codepoint_t value; V value;
bool is_unused () const { return key == INVALID; } void clear () { key = kINVALID; value = vINVALID; }
bool is_tombstone () const { return key != INVALID && value == INVALID; }
bool operator == (K o) { return hb_deref (key) == hb_deref (o); }
bool operator == (const item_t &o) { return *this == o.key; }
bool is_unused () const { return key == kINVALID; }
bool is_tombstone () const { return key != kINVALID && value == vINVALID; }
bool is_real () const { return key != kINVALID && value != vINVALID; }
hb_pair_t<K, V> get_pair() const { return hb_pair_t<K, V> (key, value); }
}; };
hb_object_header_t header; hb_object_header_t header;
@@ -82,14 +90,22 @@ struct hb_map_t
{ {
free (items); free (items);
items = nullptr; items = nullptr;
population = occupancy = 0;
} }
void fini () void fini ()
{ {
population = occupancy = 0;
hb_object_fini (this); hb_object_fini (this);
fini_shallow (); fini_shallow ();
} }
void reset ()
{
if (unlikely (hb_object_is_immutable (this)))
return;
successful = true;
clear ();
}
bool in_error () const { return !successful; } bool in_error () const { return !successful; }
bool resize () bool resize ()
@@ -104,7 +120,9 @@ struct hb_map_t
successful = false; successful = false;
return false; return false;
} }
memset (new_items, 0xFF, (size_t) new_size * sizeof (item_t)); + hb_iter (new_items, new_size)
| hb_apply (&item_t::clear)
;
unsigned int old_size = mask + 1; unsigned int old_size = mask + 1;
item_t *old_items = items; item_t *old_items = items;
@@ -118,7 +136,7 @@ struct hb_map_t
/* Insert back old items. */ /* Insert back old items. */
if (old_items) if (old_items)
for (unsigned int i = 0; i < old_size; i++) for (unsigned int i = 0; i < old_size; i++)
if (old_items[i].key != INVALID && old_items[i].value != INVALID) if (old_items[i].is_real ())
set (old_items[i].key, old_items[i].value); set (old_items[i].key, old_items[i].value);
free (old_items); free (old_items);
@@ -126,14 +144,14 @@ struct hb_map_t
return true; return true;
} }
void set (hb_codepoint_t key, hb_codepoint_t value) void set (K key, V value)
{ {
if (unlikely (!successful)) return; if (unlikely (!successful)) return;
if (unlikely (key == INVALID)) return; if (unlikely (key == kINVALID)) return;
if ((occupancy + occupancy / 2) >= mask && !resize ()) return; if ((occupancy + occupancy / 2) >= mask && !resize ()) return;
unsigned int i = bucket_for (key); unsigned int i = bucket_for (key);
if (value == INVALID && items[i].key != key) if (value == vINVALID && items[i].key != key)
return; /* Trying to delete non-existent key. */ return; /* Trying to delete non-existent key. */
if (!items[i].is_unused ()) if (!items[i].is_unused ())
@@ -151,26 +169,37 @@ struct hb_map_t
population++; population++;
} }
hb_codepoint_t get (hb_codepoint_t key) const V get (K key) const
{ {
if (unlikely (!items)) return INVALID; if (unlikely (!items)) return vINVALID;
unsigned int i = bucket_for (key); unsigned int i = bucket_for (key);
return items[i].key == key ? items[i].value : INVALID; return items[i].is_real () && items[i] == key ? items[i].value : vINVALID;
} }
void del (hb_codepoint_t key) { set (key, INVALID); } void del (K key) { set (key, vINVALID); }
bool has (hb_codepoint_t key) const /* Has interface. */
{ return get (key) != INVALID; } static constexpr V SENTINEL = vINVALID;
typedef V value_t;
hb_codepoint_t operator [] (unsigned int key) const value_t operator [] (K k) const { return get (k); }
{ return get (key); } bool has (K k, V *vp = nullptr) const
{
static constexpr hb_codepoint_t INVALID = HB_MAP_VALUE_INVALID; V v = (*this)[k];
if (vp) *vp = v;
return v != SENTINEL;
}
/* Projection. */
V operator () (K k) const { return get (k); }
void clear () void clear ()
{ {
if (items) memset (items, 0xFF, ((size_t) mask + 1) * sizeof (item_t)); if (unlikely (hb_object_is_immutable (this)))
return;
if (items)
+ hb_iter (items, mask + 1)
| hb_apply (&item_t::clear)
;
population = occupancy = 0; population = occupancy = 0;
} }
@@ -178,22 +207,50 @@ struct hb_map_t
unsigned int get_population () const { return population; } unsigned int get_population () const { return population; }
/*
* Iterator
*/
auto iter () const HB_AUTO_RETURN
(
+ hb_array (items, mask ? mask + 1 : 0)
| hb_filter (&item_t::is_real)
| hb_map (&item_t::get_pair)
)
auto keys () const HB_AUTO_RETURN
(
+ hb_array (items, mask ? mask + 1 : 0)
| hb_filter (&item_t::is_real)
| hb_map (&item_t::key)
| hb_map (hb_ridentity)
)
auto values () const HB_AUTO_RETURN
(
+ hb_array (items, mask ? mask + 1 : 0)
| hb_filter (&item_t::is_real)
| hb_map (&item_t::value)
| hb_map (hb_ridentity)
)
/* Sink interface. */
hb_hashmap_t<K, V, kINVALID, vINVALID>& operator << (const hb_pair_t<K, V>& v)
{ set (v.first, v.second); return *this; }
protected: protected:
unsigned int bucket_for (hb_codepoint_t key) const unsigned int bucket_for (K key) const
{ {
unsigned int i = Hash (key) % prime; unsigned int i = hb_hash (key) % prime;
unsigned int step = 0; unsigned int step = 0;
unsigned int tombstone = INVALID; unsigned int tombstone = (unsigned) -1;
while (!items[i].is_unused ()) while (!items[i].is_unused ())
{ {
if (items[i].key == key) if (items[i] == key)
return i; return i;
if (tombstone == INVALID && items[i].is_tombstone ()) if (tombstone == (unsigned) -1 && items[i].is_tombstone ())
tombstone = i; tombstone = i;
i = (i + ++step) & mask; i = (i + ++step) & mask;
} }
return tombstone == INVALID ? i : tombstone; return tombstone == (unsigned) -1 ? i : tombstone;
} }
static unsigned int prime_for (unsigned int shift) static unsigned int prime_for (unsigned int shift)
@@ -248,5 +305,14 @@ struct hb_map_t
} }
}; };
/*
* hb_map_t
*/
struct hb_map_t : hb_hashmap_t<hb_codepoint_t,
hb_codepoint_t,
HB_MAP_VALUE_INVALID,
HB_MAP_VALUE_INVALID> {};
#endif /* HB_MAP_HH */ #endif /* HB_MAP_HH */

400
gfx/harfbuzz/src/hb-meta.hh Normal file
View File

@@ -0,0 +1,400 @@
/*
* Copyright © 2018 Google, Inc.
*
* This is part of HarfBuzz, a text shaping library.
*
* Permission is hereby granted, without written agreement and without
* license or royalty fees, to use, copy, modify, and distribute this
* software and its documentation for any purpose, provided that the
* above copyright notice and the following two paragraphs appear in
* all copies of this software.
*
* IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES
* ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN
* IF THE COPYRIGHT HOLDER HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
* THE COPYRIGHT HOLDER SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING,
* BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
* Google Author(s): Behdad Esfahbod
*/
#ifndef HB_META_HH
#define HB_META_HH
#include "hb.hh"
/*
* C++ template meta-programming & fundamentals used with them.
*/
/* Void! For when we need a expression-type of void. */
struct hb_empty_t {};
/* https://en.cppreference.com/w/cpp/types/void_t */
template<typename... Ts> struct _hb_void_t { typedef void type; };
template<typename... Ts> using hb_void_t = typename _hb_void_t<Ts...>::type;
template<typename Head, typename... Ts> struct _hb_head_t { typedef Head type; };
template<typename... Ts> using hb_head_t = typename _hb_head_t<Ts...>::type;
template <typename T, T v> struct hb_integral_constant { static constexpr T value = v; };
template <bool b> using hb_bool_constant = hb_integral_constant<bool, b>;
using hb_true_type = hb_bool_constant<true>;
using hb_false_type = hb_bool_constant<false>;
/* Basic type SFINAE. */
template <bool B, typename T = void> struct hb_enable_if {};
template <typename T> struct hb_enable_if<true, T> { typedef T type; };
#define hb_enable_if(Cond) typename hb_enable_if<(Cond)>::type* = nullptr
/* Concepts/Requires alias: */
#define hb_requires(Cond) hb_enable_if((Cond))
template <typename T, typename T2> struct hb_is_same : hb_false_type {};
template <typename T> struct hb_is_same<T, T> : hb_true_type {};
#define hb_is_same(T, T2) hb_is_same<T, T2>::value
/* Function overloading SFINAE and priority. */
#define HB_RETURN(Ret, E) -> hb_head_t<Ret, decltype ((E))> { return (E); }
#define HB_AUTO_RETURN(E) -> decltype ((E)) { return (E); }
#define HB_VOID_RETURN(E) -> hb_void_t<decltype ((E))> { (E); }
template <unsigned Pri> struct hb_priority : hb_priority<Pri - 1> {};
template <> struct hb_priority<0> {};
#define hb_prioritize hb_priority<16> ()
#define HB_FUNCOBJ(x) static_const x HB_UNUSED
template <typename T> struct hb_type_identity_t { typedef T type; };
template <typename T> using hb_type_identity = typename hb_type_identity_t<T>::type;
struct
{
template <typename T>
T* operator () (T& arg) const
{
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wcast-align"
/* https://en.cppreference.com/w/cpp/memory/addressof */
return reinterpret_cast<T*> (
&const_cast<char&> (
reinterpret_cast<const volatile char&> (arg)));
#pragma GCC diagnostic pop
}
}
HB_FUNCOBJ (hb_addressof);
template <typename T> static inline T hb_declval ();
#define hb_declval(T) (hb_declval<T> ())
template <typename T> struct hb_match_const : hb_type_identity_t<T>, hb_bool_constant<false>{};
template <typename T> struct hb_match_const<const T> : hb_type_identity_t<T>, hb_bool_constant<true> {};
template <typename T> using hb_remove_const = typename hb_match_const<T>::type;
template <typename T> using hb_add_const = const T;
#define hb_is_const(T) hb_match_const<T>::value
template <typename T> struct hb_match_reference : hb_type_identity_t<T>, hb_bool_constant<false>{};
template <typename T> struct hb_match_reference<T &> : hb_type_identity_t<T>, hb_bool_constant<true> {};
template <typename T> struct hb_match_reference<T &&> : hb_type_identity_t<T>, hb_bool_constant<true> {};
template <typename T> using hb_remove_reference = typename hb_match_reference<T>::type;
template <typename T> auto _hb_try_add_lvalue_reference (hb_priority<1>) -> hb_type_identity<T&>;
template <typename T> auto _hb_try_add_lvalue_reference (hb_priority<0>) -> hb_type_identity<T>;
template <typename T> using hb_add_lvalue_reference = decltype (_hb_try_add_lvalue_reference<T> (hb_prioritize));
template <typename T> auto _hb_try_add_rvalue_reference (hb_priority<1>) -> hb_type_identity<T&&>;
template <typename T> auto _hb_try_add_rvalue_reference (hb_priority<0>) -> hb_type_identity<T>;
template <typename T> using hb_add_rvalue_reference = decltype (_hb_try_add_rvalue_reference<T> (hb_prioritize));
#define hb_is_reference(T) hb_match_reference<T>::value
template <typename T> struct hb_match_pointer : hb_type_identity_t<T>, hb_bool_constant<false>{};
template <typename T> struct hb_match_pointer<T *> : hb_type_identity_t<T>, hb_bool_constant<true> {};
template <typename T> using hb_remove_pointer = typename hb_match_pointer<T>::type;
template <typename T> auto _hb_try_add_pointer (hb_priority<1>) -> hb_type_identity<hb_remove_reference<T>*>;
template <typename T> auto _hb_try_add_pointer (hb_priority<1>) -> hb_type_identity<T>;
template <typename T> using hb_add_pointer = decltype (_hb_try_add_pointer<T> (hb_prioritize));
#define hb_is_pointer(T) hb_match_pointer<T>::value
/* TODO Add feature-parity to std::decay. */
template <typename T> using hb_decay = hb_remove_const<hb_remove_reference<T>>;
template<bool B, class T, class F>
struct _hb_conditional { typedef T type; };
template<class T, class F>
struct _hb_conditional<false, T, F> { typedef F type; };
template<bool B, class T, class F>
using hb_conditional = typename _hb_conditional<B, T, F>::type;
template <typename From, typename To>
struct hb_is_convertible
{
private:
static constexpr bool from_void = hb_is_same (void, hb_decay<From>);
static constexpr bool to_void = hb_is_same (void, hb_decay<To> );
static constexpr bool either_void = from_void || to_void;
static constexpr bool both_void = from_void && to_void;
static hb_true_type impl2 (hb_conditional<to_void, int, To>);
template <typename T>
static auto impl (hb_priority<1>) -> decltype (impl2 (hb_declval (T)));
template <typename T>
static hb_false_type impl (hb_priority<0>);
public:
static constexpr bool value = both_void ||
(!either_void &&
decltype (impl<hb_conditional<from_void, int, From>> (hb_prioritize))::value);
};
#define hb_is_convertible(From,To) hb_is_convertible<From, To>::value
template <typename Base, typename Derived>
using hb_is_base_of = hb_is_convertible<hb_decay<Derived> *, hb_decay<Base> *>;
#define hb_is_base_of(Base,Derived) hb_is_base_of<Base, Derived>::value
template <typename From, typename To>
using hb_is_cr_convertible = hb_bool_constant<
hb_is_same (hb_decay<From>, hb_decay<To>) &&
(!hb_is_const (From) || hb_is_const (To)) &&
(!hb_is_reference (To) || hb_is_const (To) || hb_is_reference (To))
>;
#define hb_is_cr_convertible(From,To) hb_is_cr_convertible<From, To>::value
/* std::move and std::forward */
template <typename T>
static hb_remove_reference<T>&& hb_move (T&& t) { return (hb_remove_reference<T>&&) (t); }
template <typename T>
static T&& hb_forward (hb_remove_reference<T>& t) { return (T&&) t; }
template <typename T>
static T&& hb_forward (hb_remove_reference<T>&& t) { return (T&&) t; }
struct
{
template <typename T> auto
operator () (T&& v) const HB_AUTO_RETURN (hb_forward<T> (v))
template <typename T> auto
operator () (T *v) const HB_AUTO_RETURN (*v)
}
HB_FUNCOBJ (hb_deref);
struct
{
template <typename T> auto
operator () (T&& v) const HB_AUTO_RETURN (hb_forward<T> (v))
template <typename T> auto
operator () (T& v) const HB_AUTO_RETURN (hb_addressof (v))
}
HB_FUNCOBJ (hb_ref);
template <typename T>
struct hb_reference_wrapper
{
hb_reference_wrapper (T v) : v (v) {}
bool operator == (const hb_reference_wrapper& o) const { return v == o.v; }
bool operator != (const hb_reference_wrapper& o) const { return v != o.v; }
operator T () const { return v; }
T get () const { return v; }
T v;
};
template <typename T>
struct hb_reference_wrapper<T&>
{
hb_reference_wrapper (T& v) : v (hb_addressof (v)) {}
bool operator == (const hb_reference_wrapper& o) const { return v == o.v; }
bool operator != (const hb_reference_wrapper& o) const { return v != o.v; }
operator T& () const { return *v; }
T& get () const { return *v; }
T* v;
};
template <typename T>
using hb_is_integral = hb_bool_constant<
hb_is_same (hb_decay<T>, char) ||
hb_is_same (hb_decay<T>, signed char) ||
hb_is_same (hb_decay<T>, unsigned char) ||
hb_is_same (hb_decay<T>, signed int) ||
hb_is_same (hb_decay<T>, unsigned int) ||
hb_is_same (hb_decay<T>, signed short) ||
hb_is_same (hb_decay<T>, unsigned short) ||
hb_is_same (hb_decay<T>, signed long) ||
hb_is_same (hb_decay<T>, unsigned long) ||
hb_is_same (hb_decay<T>, signed long long) ||
hb_is_same (hb_decay<T>, unsigned long long) ||
false
>;
#define hb_is_integral(T) hb_is_integral<T>::value
template <typename T>
using hb_is_floating_point = hb_bool_constant<
hb_is_same (hb_decay<T>, float) ||
hb_is_same (hb_decay<T>, double) ||
hb_is_same (hb_decay<T>, long double) ||
false
>;
#define hb_is_floating_point(T) hb_is_floating_point<T>::value
template <typename T>
using hb_is_arithmetic = hb_bool_constant<
hb_is_integral (T) ||
hb_is_floating_point (T) ||
false
>;
#define hb_is_arithmetic(T) hb_is_arithmetic<T>::value
template <typename T>
using hb_is_signed = hb_conditional<hb_is_arithmetic (T),
hb_bool_constant<(T) -1 < (T) 0>,
hb_false_type>;
#define hb_is_signed(T) hb_is_signed<T>::value
template <typename T>
using hb_is_unsigned = hb_conditional<hb_is_arithmetic (T),
hb_bool_constant<(T) 0 < (T) -1>,
hb_false_type>;
#define hb_is_unsigned(T) hb_is_unsigned<T>::value
template <typename T> struct hb_int_min;
template <> struct hb_int_min<char> : hb_integral_constant<char, CHAR_MIN> {};
template <> struct hb_int_min<signed char> : hb_integral_constant<signed char, SCHAR_MIN> {};
template <> struct hb_int_min<unsigned char> : hb_integral_constant<unsigned char, 0> {};
template <> struct hb_int_min<signed short> : hb_integral_constant<signed short, SHRT_MIN> {};
template <> struct hb_int_min<unsigned short> : hb_integral_constant<unsigned short, 0> {};
template <> struct hb_int_min<signed int> : hb_integral_constant<signed int, INT_MIN> {};
template <> struct hb_int_min<unsigned int> : hb_integral_constant<unsigned int, 0> {};
template <> struct hb_int_min<signed long> : hb_integral_constant<signed long, LONG_MIN> {};
template <> struct hb_int_min<unsigned long> : hb_integral_constant<unsigned long, 0> {};
template <> struct hb_int_min<signed long long> : hb_integral_constant<signed long long, LLONG_MIN> {};
template <> struct hb_int_min<unsigned long long> : hb_integral_constant<unsigned long long, 0> {};
#define hb_int_min(T) hb_int_min<T>::value
template <typename T> struct hb_int_max;
template <> struct hb_int_max<char> : hb_integral_constant<char, CHAR_MAX> {};
template <> struct hb_int_max<signed char> : hb_integral_constant<signed char, SCHAR_MAX> {};
template <> struct hb_int_max<unsigned char> : hb_integral_constant<unsigned char, UCHAR_MAX> {};
template <> struct hb_int_max<signed short> : hb_integral_constant<signed short, SHRT_MAX> {};
template <> struct hb_int_max<unsigned short> : hb_integral_constant<unsigned short, USHRT_MAX> {};
template <> struct hb_int_max<signed int> : hb_integral_constant<signed int, INT_MAX> {};
template <> struct hb_int_max<unsigned int> : hb_integral_constant<unsigned int, UINT_MAX> {};
template <> struct hb_int_max<signed long> : hb_integral_constant<signed long, LONG_MAX> {};
template <> struct hb_int_max<unsigned long> : hb_integral_constant<unsigned long, ULONG_MAX> {};
template <> struct hb_int_max<signed long long> : hb_integral_constant<signed long long, LLONG_MAX> {};
template <> struct hb_int_max<unsigned long long> : hb_integral_constant<unsigned long long, ULLONG_MAX> {};
#define hb_int_max(T) hb_int_max<T>::value
template <typename T, typename>
struct _hb_is_destructible : hb_false_type {};
template <typename T>
struct _hb_is_destructible<T, hb_void_t<decltype (hb_declval (T).~T ())>> : hb_true_type {};
template <typename T>
using hb_is_destructible = _hb_is_destructible<T, void>;
#define hb_is_destructible(T) hb_is_destructible<T>::value
template <typename T, typename, typename ...Ts>
struct _hb_is_constructible : hb_false_type {};
template <typename T, typename ...Ts>
struct _hb_is_constructible<T, hb_void_t<decltype (T (hb_declval (Ts)...))>, Ts...> : hb_true_type {};
template <typename T, typename ...Ts>
using hb_is_constructible = _hb_is_constructible<T, void, Ts...>;
#define hb_is_constructible(...) hb_is_constructible<__VA_ARGS__>::value
template <typename T>
using hb_is_default_constructible = hb_is_constructible<T>;
#define hb_is_default_constructible(T) hb_is_default_constructible<T>::value
template <typename T>
using hb_is_copy_constructible = hb_is_constructible<T, hb_add_lvalue_reference<hb_add_const<T>>>;
#define hb_is_copy_constructible(T) hb_is_copy_constructible<T>::value
template <typename T>
using hb_is_move_constructible = hb_is_constructible<T, hb_add_rvalue_reference<hb_add_const<T>>>;
#define hb_is_move_constructible(T) hb_is_move_constructible<T>::value
template <typename T, typename U, typename>
struct _hb_is_assignable : hb_false_type {};
template <typename T, typename U>
struct _hb_is_assignable<T, U, hb_void_t<decltype (hb_declval (T) = hb_declval (U))>> : hb_true_type {};
template <typename T, typename U>
using hb_is_assignable = _hb_is_assignable<T, U, void>;
#define hb_is_assignable(T,U) hb_is_assignable<T, U>::value
template <typename T>
using hb_is_copy_assignable = hb_is_assignable<hb_add_lvalue_reference<T>,
hb_add_lvalue_reference<hb_add_const<T>>>;
#define hb_is_copy_assignable(T) hb_is_copy_assignable<T>::value
template <typename T>
using hb_is_move_assignable = hb_is_assignable<hb_add_lvalue_reference<T>,
hb_add_rvalue_reference<T>>;
#define hb_is_move_assignable(T) hb_is_move_assignable<T>::value
/* Trivial versions. */
template <typename T> union hb_trivial { T value; };
/* Don't know how to do the following. */
template <typename T>
using hb_is_trivially_destructible= hb_is_destructible<hb_trivial<T>>;
#define hb_is_trivially_destructible(T) hb_is_trivially_destructible<T>::value
/* Don't know how to do the following. */
//template <typename T, typename ...Ts>
//using hb_is_trivially_constructible= hb_is_constructible<hb_trivial<T>, hb_trivial<Ts>...>;
//#define hb_is_trivially_constructible(...) hb_is_trivially_constructible<__VA_ARGS__>::value
template <typename T>
using hb_is_trivially_default_constructible= hb_is_default_constructible<hb_trivial<T>>;
#define hb_is_trivially_default_constructible(T) hb_is_trivially_default_constructible<T>::value
template <typename T>
using hb_is_trivially_copy_constructible= hb_is_copy_constructible<hb_trivial<T>>;
#define hb_is_trivially_copy_constructible(T) hb_is_trivially_copy_constructible<T>::value
template <typename T>
using hb_is_trivially_move_constructible= hb_is_move_constructible<hb_trivial<T>>;
#define hb_is_trivially_move_constructible(T) hb_is_trivially_move_constructible<T>::value
/* Don't know how to do the following. */
//template <typename T, typename U>
//using hb_is_trivially_assignable= hb_is_assignable<hb_trivial<T>, hb_trivial<U>>;
//#define hb_is_trivially_assignable(T,U) hb_is_trivially_assignable<T, U>::value
template <typename T>
using hb_is_trivially_copy_assignable= hb_is_copy_assignable<hb_trivial<T>>;
#define hb_is_trivially_copy_assignable(T) hb_is_trivially_copy_assignable<T>::value
template <typename T>
using hb_is_trivially_move_assignable= hb_is_move_assignable<hb_trivial<T>>;
#define hb_is_trivially_move_assignable(T) hb_is_trivially_move_assignable<T>::value
template <typename T>
using hb_is_trivially_copyable= hb_bool_constant<
hb_is_trivially_destructible (T) &&
(!hb_is_move_assignable (T) || hb_is_trivially_move_assignable (T)) &&
(!hb_is_move_constructible (T) || hb_is_trivially_move_constructible (T)) &&
(!hb_is_copy_assignable (T) || hb_is_trivially_copy_assignable (T)) &&
(!hb_is_copy_constructible (T) || hb_is_trivially_copy_constructible (T)) &&
true
>;
#define hb_is_trivially_copyable(T) hb_is_trivially_copyable<T>::value
template <typename T>
using hb_is_trivial= hb_bool_constant<
hb_is_trivially_copyable (T) &&
hb_is_trivially_default_constructible (T)
>;
#define hb_is_trivial(T) hb_is_trivial<T>::value
#endif /* HB_META_HH */

View File

@@ -48,6 +48,17 @@
/* Defined externally, i.e. in config.h; must have typedef'ed hb_mutex_impl_t as well. */ /* Defined externally, i.e. in config.h; must have typedef'ed hb_mutex_impl_t as well. */
#elif !defined(HB_NO_MT) && (defined(HAVE_PTHREAD) || defined(__APPLE__))
#include <pthread.h>
typedef pthread_mutex_t hb_mutex_impl_t;
#define HB_MUTEX_IMPL_INIT PTHREAD_MUTEX_INITIALIZER
#define hb_mutex_impl_init(M) pthread_mutex_init (M, nullptr)
#define hb_mutex_impl_lock(M) pthread_mutex_lock (M)
#define hb_mutex_impl_unlock(M) pthread_mutex_unlock (M)
#define hb_mutex_impl_finish(M) pthread_mutex_destroy (M)
#elif !defined(HB_NO_MT) && defined(_WIN32) #elif !defined(HB_NO_MT) && defined(_WIN32)
#include <windows.h> #include <windows.h>
@@ -63,17 +74,6 @@ typedef CRITICAL_SECTION hb_mutex_impl_t;
#define hb_mutex_impl_finish(M) DeleteCriticalSection (M) #define hb_mutex_impl_finish(M) DeleteCriticalSection (M)
#elif !defined(HB_NO_MT) && (defined(HAVE_PTHREAD) || defined(__APPLE__))
#include <pthread.h>
typedef pthread_mutex_t hb_mutex_impl_t;
#define HB_MUTEX_IMPL_INIT PTHREAD_MUTEX_INITIALIZER
#define hb_mutex_impl_init(M) pthread_mutex_init (M, nullptr)
#define hb_mutex_impl_lock(M) pthread_mutex_lock (M)
#define hb_mutex_impl_unlock(M) pthread_mutex_unlock (M)
#define hb_mutex_impl_finish(M) pthread_mutex_destroy (M)
#elif !defined(HB_NO_MT) && defined(HAVE_INTEL_ATOMIC_PRIMITIVES) #elif !defined(HB_NO_MT) && defined(HAVE_INTEL_ATOMIC_PRIMITIVES)
#if defined(HAVE_SCHED_H) && defined(HAVE_SCHED_YIELD) #if defined(HAVE_SCHED_H) && defined(HAVE_SCHED_YIELD)
@@ -127,8 +127,6 @@ typedef int hb_mutex_impl_t;
struct hb_mutex_t struct hb_mutex_t
{ {
/* TODO Add tracing. */
hb_mutex_impl_t m; hb_mutex_impl_t m;
void init () { hb_mutex_impl_init (&m); } void init () { hb_mutex_impl_init (&m); }

View File

@@ -28,6 +28,7 @@
#define HB_NULL_HH #define HB_NULL_HH
#include "hb.hh" #include "hb.hh"
#include "hb-meta.hh"
/* /*
@@ -36,7 +37,7 @@
/* Global nul-content Null pool. Enlarge as necessary. */ /* Global nul-content Null pool. Enlarge as necessary. */
#define HB_NULL_POOL_SIZE 9880 #define HB_NULL_POOL_SIZE 384
/* Use SFINAE to sniff whether T has min_size; in which case return T::null_size, /* Use SFINAE to sniff whether T has min_size; in which case return T::null_size,
* otherwise return sizeof(T). */ * otherwise return sizeof(T). */
@@ -45,18 +46,13 @@
* https://stackoverflow.com/questions/7776448/sfinae-tried-with-bool-gives-compiler-error-template-argument-tvalue-invol * https://stackoverflow.com/questions/7776448/sfinae-tried-with-bool-gives-compiler-error-template-argument-tvalue-invol
*/ */
template<bool> struct _hb_bool_type {}; template <typename T, typename>
struct _hb_null_size : hb_integral_constant<unsigned, sizeof (T)> {};
template <typename T, typename B>
struct _hb_null_size
{ enum { value = sizeof (T) }; };
template <typename T> template <typename T>
struct _hb_null_size<T, _hb_bool_type<(bool) (1 + (unsigned int) T::min_size)> > struct _hb_null_size<T, hb_void_t<decltype (T::min_size)>> : hb_integral_constant<unsigned, T::null_size> {};
{ enum { value = T::null_size }; };
template <typename T> template <typename T>
struct hb_null_size using hb_null_size = _hb_null_size<T, void>;
{ enum { value = _hb_null_size<T, _hb_bool_type<true> >::value }; };
#define hb_null_size(T) hb_null_size<T>::value #define hb_null_size(T) hb_null_size<T>::value
/* These doesn't belong here, but since is copy/paste from above, put it here. */ /* These doesn't belong here, but since is copy/paste from above, put it here. */
@@ -64,38 +60,15 @@ struct hb_null_size
/* hb_static_size (T) /* hb_static_size (T)
* Returns T::static_size if T::min_size is defined, or sizeof (T) otherwise. */ * Returns T::static_size if T::min_size is defined, or sizeof (T) otherwise. */
template <typename T, typename B> template <typename T, typename>
struct _hb_static_size struct _hb_static_size : hb_integral_constant<unsigned, sizeof (T)> {};
{ enum { value = sizeof (T) }; };
template <typename T> template <typename T>
struct _hb_static_size<T, _hb_bool_type<(bool) (1 + (unsigned int) T::min_size)> > struct _hb_static_size<T, hb_void_t<decltype (T::min_size)>> : hb_integral_constant<unsigned, T::static_size> {};
{ enum { value = T::static_size }; };
template <typename T> template <typename T>
struct hb_static_size using hb_static_size = _hb_static_size<T, void>;
{ enum { value = _hb_static_size<T, _hb_bool_type<true> >::value }; };
#define hb_static_size(T) hb_static_size<T>::value #define hb_static_size(T) hb_static_size<T>::value
/* hb_assign (obj, value)
* Calls obj.set (value) if obj.min_size is defined and value has different type
* from obj, or obj = v otherwise. */
template <typename T, typename V, typename B>
struct _hb_assign
{ static inline void value (T &o, const V v) { o = v; } };
template <typename T, typename V>
struct _hb_assign<T, V, _hb_bool_type<(bool) (1 + (unsigned int) T::min_size)> >
{ static inline void value (T &o, const V v) { o.set (v); } };
template <typename T>
struct _hb_assign<T, T, _hb_bool_type<(bool) (1 + (unsigned int) T::min_size)> >
{ static inline void value (T &o, const T v) { o = v; } };
template <typename T, typename V>
static inline void hb_assign (T &o, const V v)
{ _hb_assign<T, V, _hb_bool_type<true> >::value (o, v); }
/* /*
* Null() * Null()
*/ */
@@ -115,7 +88,7 @@ struct Null {
template <typename QType> template <typename QType>
struct NullHelper struct NullHelper
{ {
typedef typename hb_remove_const (typename hb_remove_reference (QType)) Type; typedef hb_remove_const<hb_remove_reference<QType>> Type;
static const Type & get_null () { return Null<Type>::get_null (); } static const Type & get_null () { return Null<Type>::get_null (); }
}; };
#define Null(Type) NullHelper<Type>::get_null () #define Null(Type) NullHelper<Type>::get_null ()
@@ -168,7 +141,7 @@ static inline Type& Crap () {
template <typename QType> template <typename QType>
struct CrapHelper struct CrapHelper
{ {
typedef typename hb_remove_const (typename hb_remove_reference (QType)) Type; typedef hb_remove_const<hb_remove_reference<QType>> Type;
static Type & get_crap () { return Crap<Type> (); } static Type & get_crap () { return Crap<Type> (); }
}; };
#define Crap(Type) CrapHelper<Type>::get_crap () #define Crap(Type) CrapHelper<Type>::get_crap ()
@@ -191,7 +164,7 @@ struct CrapOrNullHelper<const Type> {
template <typename P> template <typename P>
struct hb_nonnull_ptr_t struct hb_nonnull_ptr_t
{ {
typedef typename hb_remove_pointer (P) T; typedef hb_remove_pointer<P> T;
hb_nonnull_ptr_t (T *v_ = nullptr) : v (v_) {} hb_nonnull_ptr_t (T *v_ = nullptr) : v (v_) {}
T * operator = (T *v_) { return v = v_; } T * operator = (T *v_) { return v = v_; }

View File

@@ -56,7 +56,7 @@ typedef struct TableRecord
{ {
int cmp (Tag t) const { return -t.cmp (tag); } int cmp (Tag t) const { return -t.cmp (tag); }
static int cmp (const void *pa, const void *pb) HB_INTERNAL static int cmp (const void *pa, const void *pb)
{ {
const TableRecord *a = (const TableRecord *) pa; const TableRecord *a = (const TableRecord *) pa;
const TableRecord *b = (const TableRecord *) pb; const TableRecord *b = (const TableRecord *) pb;
@@ -94,7 +94,7 @@ typedef struct OffsetTable
if (start_offset >= tables.len) if (start_offset >= tables.len)
*table_count = 0; *table_count = 0;
else else
*table_count = MIN<unsigned int> (*table_count, tables.len - start_offset); *table_count = hb_min (*table_count, tables.len - start_offset);
const TableRecord *sub_tables = tables.arrayZ + start_offset; const TableRecord *sub_tables = tables.arrayZ + start_offset;
unsigned int count = *table_count; unsigned int count = *table_count;
@@ -106,7 +106,7 @@ typedef struct OffsetTable
bool find_table_index (hb_tag_t tag, unsigned int *table_index) const bool find_table_index (hb_tag_t tag, unsigned int *table_index) const
{ {
Tag t; Tag t;
t.set (tag); t = tag;
return tables.bfind (t, table_index, HB_BFIND_NOT_FOUND_STORE, Index::NOT_FOUND_INDEX); return tables.bfind (t, table_index, HB_BFIND_NOT_FOUND_STORE, Index::NOT_FOUND_INDEX);
} }
const TableRecord& get_table_by_tag (hb_tag_t tag) const const TableRecord& get_table_by_tag (hb_tag_t tag) const
@@ -127,7 +127,7 @@ typedef struct OffsetTable
/* Alloc 12 for the OTHeader. */ /* Alloc 12 for the OTHeader. */
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
/* Write sfntVersion (bytes 0..3). */ /* Write sfntVersion (bytes 0..3). */
sfnt_version.set (sfnt_tag); sfnt_version = sfnt_tag;
/* Take space for numTables, searchRange, entrySelector, RangeShift /* Take space for numTables, searchRange, entrySelector, RangeShift
* and the TableRecords themselves. */ * and the TableRecords themselves. */
if (unlikely (!tables.serialize (c, items.length))) return_trace (false); if (unlikely (!tables.serialize (c, items.length))) return_trace (false);
@@ -140,8 +140,8 @@ typedef struct OffsetTable
{ {
TableRecord &rec = tables.arrayZ[i]; TableRecord &rec = tables.arrayZ[i];
hb_blob_t *blob = items[i].blob; hb_blob_t *blob = items[i].blob;
rec.tag.set (items[i].tag); rec.tag = items[i].tag;
rec.length.set (hb_blob_get_length (blob)); rec.length = hb_blob_get_length (blob);
rec.offset.serialize (c, this); rec.offset.serialize (c, this);
/* Allocate room for the table and copy it. */ /* Allocate room for the table and copy it. */
@@ -159,7 +159,7 @@ typedef struct OffsetTable
{ {
head *h = (head *) start; head *h = (head *) start;
checksum_adjustment = &h->checkSumAdjustment; checksum_adjustment = &h->checkSumAdjustment;
checksum_adjustment->set (0); *checksum_adjustment = 0;
} }
rec.checkSum.set_for_data (start, end - start); rec.checkSum.set_for_data (start, end - start);
@@ -177,10 +177,10 @@ typedef struct OffsetTable
for (unsigned int i = 0; i < items.length; i++) for (unsigned int i = 0; i < items.length; i++)
{ {
TableRecord &rec = tables.arrayZ[i]; TableRecord &rec = tables.arrayZ[i];
checksum.set (checksum + rec.checkSum); checksum = checksum + rec.checkSum;
} }
checksum_adjustment->set (0xB1B0AFBAu - checksum); *checksum_adjustment = 0xB1B0AFBAu - checksum;
} }
return_trace (true); return_trace (true);

View File

@@ -52,22 +52,19 @@ namespace OT {
* Int types * Int types
*/ */
template <bool is_signed> struct hb_signedness_int;
template <> struct hb_signedness_int<false> { typedef unsigned int value; };
template <> struct hb_signedness_int<true> { typedef signed int value; };
/* Integer types in big-endian order and no alignment requirement */ /* Integer types in big-endian order and no alignment requirement */
template <typename Type, unsigned int Size> template <typename Type, unsigned int Size>
struct IntType struct IntType
{ {
typedef Type type; typedef Type type;
typedef typename hb_signedness_int<hb_is_signed<Type>::value>::value wide_type; typedef hb_conditional<hb_is_signed (Type), signed, unsigned> wide_type;
void set (wide_type i) { v.set (i); } IntType<Type, Size>& operator = (wide_type i) { v = i; return *this; }
operator wide_type () const { return v; } operator wide_type () const { return v; }
bool operator == (const IntType<Type,Size> &o) const { return (Type) v == (Type) o.v; } bool operator == (const IntType<Type,Size> &o) const { return (Type) v == (Type) o.v; }
bool operator != (const IntType<Type,Size> &o) const { return !(*this == o); } bool operator != (const IntType<Type,Size> &o) const { return !(*this == o); }
static int cmp (const IntType<Type,Size> *a, const IntType<Type,Size> *b) { return b->cmp (*a); } HB_INTERNAL static int cmp (const IntType<Type,Size> *a, const IntType<Type,Size> *b)
{ return b->cmp (*a); }
template <typename Type2> template <typename Type2>
int cmp (Type2 a) const int cmp (Type2 a) const
{ {
@@ -110,9 +107,10 @@ typedef HBUINT16 UFWORD;
/* 16-bit signed fixed number with the low 14 bits of fraction (2.14). */ /* 16-bit signed fixed number with the low 14 bits of fraction (2.14). */
struct F2DOT14 : HBINT16 struct F2DOT14 : HBINT16
{ {
F2DOT14& operator = (uint16_t i ) { HBINT16::operator= (i); return *this; }
// 16384 means 1<<14 // 16384 means 1<<14
float to_float () const { return ((int32_t) v) / 16384.f; } float to_float () const { return ((int32_t) v) / 16384.f; }
void set_float (float f) { v.set (round (f * 16384.f)); } void set_float (float f) { v = roundf (f * 16384.f); }
public: public:
DEFINE_SIZE_STATIC (2); DEFINE_SIZE_STATIC (2);
}; };
@@ -120,9 +118,10 @@ struct F2DOT14 : HBINT16
/* 32-bit signed fixed-point number (16.16). */ /* 32-bit signed fixed-point number (16.16). */
struct Fixed : HBINT32 struct Fixed : HBINT32
{ {
Fixed& operator = (uint32_t i) { HBINT32::operator= (i); return *this; }
// 65536 means 1<<16 // 65536 means 1<<16
float to_float () const { return ((int32_t) v) / 65536.f; } float to_float () const { return ((int32_t) v) / 65536.f; }
void set_float (float f) { v.set (round (f * 65536.f)); } void set_float (float f) { v = roundf (f * 65536.f); }
public: public:
DEFINE_SIZE_STATIC (4); DEFINE_SIZE_STATIC (4);
}; };
@@ -147,6 +146,7 @@ struct LONGDATETIME
* system, feature, or baseline */ * system, feature, or baseline */
struct Tag : HBUINT32 struct Tag : HBUINT32
{ {
Tag& operator = (uint32_t i) { HBUINT32::operator= (i); return *this; }
/* What the char* converters return is NOT nul-terminated. Print using "%.4s" */ /* What the char* converters return is NOT nul-terminated. Print using "%.4s" */
operator const char* () const { return reinterpret_cast<const char *> (&this->v); } operator const char* () const { return reinterpret_cast<const char *> (&this->v); }
operator char* () { return reinterpret_cast<char *> (&this->v); } operator char* () { return reinterpret_cast<char *> (&this->v); }
@@ -155,11 +155,15 @@ struct Tag : HBUINT32
}; };
/* Glyph index number, same as uint16 (length = 16 bits) */ /* Glyph index number, same as uint16 (length = 16 bits) */
typedef HBUINT16 GlyphID; struct GlyphID : HBUINT16
{
GlyphID& operator = (uint16_t i) { HBUINT16::operator= (i); return *this; }
};
/* Script/language-system/feature index */ /* Script/language-system/feature index */
struct Index : HBUINT16 { struct Index : HBUINT16 {
static constexpr unsigned NOT_FOUND_INDEX = 0xFFFFu; static constexpr unsigned NOT_FOUND_INDEX = 0xFFFFu;
Index& operator = (uint16_t i) { HBUINT16::operator= (i); return *this; }
}; };
DECLARE_NULL_NAMESPACE_BYTES (OT, Index); DECLARE_NULL_NAMESPACE_BYTES (OT, Index);
@@ -169,6 +173,8 @@ typedef Index NameID;
template <typename Type, bool has_null=true> template <typename Type, bool has_null=true>
struct Offset : Type struct Offset : Type
{ {
Offset& operator = (typename Type::type i) { Type::operator= (i); return *this; }
typedef Type type; typedef Type type;
bool is_null () const { return has_null && 0 == *this; } bool is_null () const { return has_null && 0 == *this; }
@@ -176,7 +182,7 @@ struct Offset : Type
void *serialize (hb_serialize_context_t *c, const void *base) void *serialize (hb_serialize_context_t *c, const void *base)
{ {
void *t = c->start_embed<void> (); void *t = c->start_embed<void> ();
this->set ((char *) t - (char *) base); /* TODO(serialize) Overflow? */ c->check_assign (*this, (unsigned) ((char *) t - (char *) base));
return t; return t;
} }
@@ -191,6 +197,8 @@ typedef Offset<HBUINT32> Offset32;
/* CheckSum */ /* CheckSum */
struct CheckSum : HBUINT32 struct CheckSum : HBUINT32
{ {
CheckSum& operator = (uint32_t i) { HBUINT32::operator= (i); return *this; }
/* This is reference implementation from the spec. */ /* This is reference implementation from the spec. */
static uint32_t CalcTableChecksum (const HBUINT32 *Table, uint32_t Length) static uint32_t CalcTableChecksum (const HBUINT32 *Table, uint32_t Length)
{ {
@@ -205,7 +213,7 @@ struct CheckSum : HBUINT32
/* Note: data should be 4byte aligned and have 4byte padding at the end. */ /* Note: data should be 4byte aligned and have 4byte padding at the end. */
void set_for_data (const void *data, unsigned int length) void set_for_data (const void *data, unsigned int length)
{ set (CalcTableChecksum ((const HBUINT32 *) data, length)); } { *this = CalcTableChecksum ((const HBUINT32 *) data, length); }
public: public:
DEFINE_SIZE_STATIC (4); DEFINE_SIZE_STATIC (4);
@@ -255,6 +263,11 @@ struct _hb_has_null<Type, true>
template <typename Type, typename OffsetType=HBUINT16, bool has_null=true> template <typename Type, typename OffsetType=HBUINT16, bool has_null=true>
struct OffsetTo : Offset<OffsetType, has_null> struct OffsetTo : Offset<OffsetType, has_null>
{ {
HB_DELETE_COPY_ASSIGN (OffsetTo);
OffsetTo () = default;
OffsetTo& operator = (typename OffsetType::type i) { OffsetType::operator= (i); return *this; }
const Type& operator () (const void *base) const const Type& operator () (const void *base) const
{ {
if (unlikely (this->is_null ())) return *_hb_has_null<Type, has_null>::get_null (); if (unlikely (this->is_null ())) return *_hb_has_null<Type, has_null>::get_null ();
@@ -266,22 +279,68 @@ struct OffsetTo : Offset<OffsetType, has_null>
return StructAtOffset<Type> (base, *this); return StructAtOffset<Type> (base, *this);
} }
template <typename Base,
hb_enable_if (hb_is_convertible (const Base, const void *))>
friend const Type& operator + (const Base &base, const OffsetTo &offset) { return offset ((const void *) base); }
template <typename Base,
hb_enable_if (hb_is_convertible (const Base, const void *))>
friend const Type& operator + (const OffsetTo &offset, const Base &base) { return offset ((const void *) base); }
template <typename Base,
hb_enable_if (hb_is_convertible (Base, void *))>
friend Type& operator + (Base &&base, OffsetTo &offset) { return offset ((void *) base); }
template <typename Base,
hb_enable_if (hb_is_convertible (Base, void *))>
friend Type& operator + (OffsetTo &offset, Base &&base) { return offset ((void *) base); }
Type& serialize (hb_serialize_context_t *c, const void *base) Type& serialize (hb_serialize_context_t *c, const void *base)
{ {
return * (Type *) Offset<OffsetType>::serialize (c, base); return * (Type *) Offset<OffsetType>::serialize (c, base);
} }
template <typename T> template <typename ...Ts>
void serialize_subset (hb_subset_context_t *c, const T &src, const void *base) bool serialize_subset (hb_subset_context_t *c,
const OffsetTo& src,
const void *src_base,
const void *dst_base,
Ts&&... ds)
{ {
if (&src == &Null (T)) *this = 0;
{ if (src.is_null ())
this->set (0); return false;
return;
auto *s = c->serializer;
s->push ();
bool ret = c->dispatch (src_base+src, hb_forward<Ts> (ds)...);
if (ret || !has_null)
s->add_link (*this, s->pop_pack (), dst_base);
else
s->pop_discard ();
return ret;
} }
serialize (c->serializer, base);
if (!src.subset (c)) /* TODO: Somehow merge this with previous function into a serialize_dispatch(). */
this->set (0); template <typename ...Ts>
bool serialize_copy (hb_serialize_context_t *c,
const OffsetTo& src,
const void *src_base,
const void *dst_base,
Ts&&... ds)
{
*this = 0;
if (src.is_null ())
return false;
c->push ();
bool ret = c->copy (src_base+src, hb_forward<Ts> (ds)...);
c->add_link (*this, c->pop_pack (), dst_base);
return ret;
} }
bool sanitize_shallow (hb_sanitize_context_t *c, const void *base) const bool sanitize_shallow (hb_sanitize_context_t *c, const void *base) const
@@ -293,39 +352,13 @@ struct OffsetTo : Offset<OffsetType, has_null>
return_trace (true); return_trace (true);
} }
bool sanitize (hb_sanitize_context_t *c, const void *base) const template <typename ...Ts>
bool sanitize (hb_sanitize_context_t *c, const void *base, Ts&&... ds) const
{ {
TRACE_SANITIZE (this); TRACE_SANITIZE (this);
return_trace (sanitize_shallow (c, base) && return_trace (sanitize_shallow (c, base) &&
(this->is_null () || (this->is_null () ||
StructAtOffset<Type> (base, *this).sanitize (c) || c->dispatch (StructAtOffset<Type> (base, *this), hb_forward<Ts> (ds)...) ||
neuter (c)));
}
template <typename T1>
bool sanitize (hb_sanitize_context_t *c, const void *base, T1 d1) const
{
TRACE_SANITIZE (this);
return_trace (sanitize_shallow (c, base) &&
(this->is_null () ||
StructAtOffset<Type> (base, *this).sanitize (c, d1) ||
neuter (c)));
}
template <typename T1, typename T2>
bool sanitize (hb_sanitize_context_t *c, const void *base, T1 d1, T2 d2) const
{
TRACE_SANITIZE (this);
return_trace (sanitize_shallow (c, base) &&
(this->is_null () ||
StructAtOffset<Type> (base, *this).sanitize (c, d1, d2) ||
neuter (c)));
}
template <typename T1, typename T2, typename T3>
bool sanitize (hb_sanitize_context_t *c, const void *base, T1 d1, T2 d2, T3 d3) const
{
TRACE_SANITIZE (this);
return_trace (sanitize_shallow (c, base) &&
(this->is_null () ||
StructAtOffset<Type> (base, *this).sanitize (c, d1, d2, d3) ||
neuter (c))); neuter (c)));
} }
@@ -338,14 +371,12 @@ struct OffsetTo : Offset<OffsetType, has_null>
DEFINE_SIZE_STATIC (sizeof (OffsetType)); DEFINE_SIZE_STATIC (sizeof (OffsetType));
}; };
/* Partial specializations. */ /* Partial specializations. */
template <typename Type, bool has_null=true> struct LOffsetTo : OffsetTo<Type, HBUINT32, has_null> {}; template <typename Type, bool has_null=true>
template <typename Type, typename OffsetType=HBUINT16 > struct NNOffsetTo : OffsetTo<Type, OffsetType, false> {}; using LOffsetTo = OffsetTo<Type, HBUINT32, has_null>;
template <typename Type > struct LNNOffsetTo : OffsetTo<Type, HBUINT32, false> {}; template <typename Type, typename OffsetType=HBUINT16>
using NNOffsetTo = OffsetTo<Type, OffsetType, false>;
template <typename Base, typename OffsetType, bool has_null, typename Type> template <typename Type>
static inline const Type& operator + (const Base &base, const OffsetTo<Type, OffsetType, has_null> &offset) { return offset (base); } using LNNOffsetTo = LOffsetTo<Type, false>;
template <typename Base, typename OffsetType, bool has_null, typename Type>
static inline Type& operator + (Base &base, OffsetTo<Type, OffsetType, has_null> &offset) { return offset (base); }
/* /*
@@ -358,7 +389,7 @@ struct UnsizedArrayOf
typedef Type item_t; typedef Type item_t;
static constexpr unsigned item_size = hb_static_size (Type); static constexpr unsigned item_size = hb_static_size (Type);
HB_NO_CREATE_COPY_ASSIGN_TEMPLATE (UnsizedArrayOf, Type); HB_DELETE_CREATE_COPY_ASSIGN (UnsizedArrayOf);
const Type& operator [] (int i_) const const Type& operator [] (int i_) const
{ {
@@ -397,38 +428,42 @@ struct UnsizedArrayOf
void qsort (unsigned int len, unsigned int start = 0, unsigned int end = (unsigned int) -1) void qsort (unsigned int len, unsigned int start = 0, unsigned int end = (unsigned int) -1)
{ as_array (len).qsort (start, end); } { as_array (len).qsort (start, end); }
bool sanitize (hb_sanitize_context_t *c, unsigned int count) const bool serialize (hb_serialize_context_t *c, unsigned int items_len)
{ {
TRACE_SANITIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!sanitize_shallow (c, count))) return_trace (false); if (unlikely (!c->extend (*this, items_len))) return_trace (false);
/* Note: for structs that do not reference other structs,
* we do not need to call their sanitize() as we already did
* a bound check on the aggregate array size. We just include
* a small unreachable expression to make sure the structs
* pointed to do have a simple sanitize(), ie. they do not
* reference other structs via offsets.
*/
(void) (false && arrayZ[0].sanitize (c));
return_trace (true); return_trace (true);
} }
bool sanitize (hb_sanitize_context_t *c, unsigned int count, const void *base) const template <typename Iterator,
hb_requires (hb_is_source_of (Iterator, Type))>
bool serialize (hb_serialize_context_t *c, Iterator items)
{ {
TRACE_SANITIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!sanitize_shallow (c, count))) return_trace (false); unsigned count = items.len ();
for (unsigned int i = 0; i < count; i++) if (unlikely (!serialize (c, count))) return_trace (false);
if (unlikely (!arrayZ[i].sanitize (c, base))) /* TODO Umm. Just exhaust the iterator instead? Being extra
return_trace (false); * cautious right now.. */
for (unsigned i = 0; i < count; i++, ++items)
arrayZ[i] = *items;
return_trace (true); return_trace (true);
} }
template <typename T>
bool sanitize (hb_sanitize_context_t *c, unsigned int count, const void *base, T user_data) const UnsizedArrayOf* copy (hb_serialize_context_t *c, unsigned count) const
{
TRACE_SERIALIZE (this);
auto *out = c->start_embed (this);
if (unlikely (!as_array (count).copy (c))) return_trace (nullptr);
return_trace (out);
}
template <typename ...Ts>
bool sanitize (hb_sanitize_context_t *c, unsigned int count, Ts&&... ds) const
{ {
TRACE_SANITIZE (this); TRACE_SANITIZE (this);
if (unlikely (!sanitize_shallow (c, count))) return_trace (false); if (unlikely (!sanitize_shallow (c, count))) return_trace (false);
if (!sizeof... (Ts) && hb_is_trivially_copyable (Type)) return_trace (true);
for (unsigned int i = 0; i < count; i++) for (unsigned int i = 0; i < count; i++)
if (unlikely (!arrayZ[i].sanitize (c, base, user_data))) if (unlikely (!c->dispatch (arrayZ[i], hb_forward<Ts> (ds)...)))
return_trace (false); return_trace (false);
return_trace (true); return_trace (true);
} }
@@ -447,7 +482,7 @@ struct UnsizedArrayOf
/* Unsized array of offset's */ /* Unsized array of offset's */
template <typename Type, typename OffsetType, bool has_null=true> template <typename Type, typename OffsetType, bool has_null=true>
struct UnsizedOffsetArrayOf : UnsizedArrayOf<OffsetTo<Type, OffsetType, has_null> > {}; using UnsizedOffsetArrayOf = UnsizedArrayOf<OffsetTo<Type, OffsetType, has_null>>;
/* Unsized array of offsets relative to the beginning of the array itself. */ /* Unsized array of offsets relative to the beginning of the array itself. */
template <typename Type, typename OffsetType, bool has_null=true> template <typename Type, typename OffsetType, bool has_null=true>
@@ -468,17 +503,12 @@ struct UnsizedOffsetListOf : UnsizedOffsetArrayOf<Type, OffsetType, has_null>
return this+*p; return this+*p;
} }
template <typename ...Ts>
bool sanitize (hb_sanitize_context_t *c, unsigned int count) const bool sanitize (hb_sanitize_context_t *c, unsigned int count, Ts&&... ds) const
{ {
TRACE_SANITIZE (this); TRACE_SANITIZE (this);
return_trace ((UnsizedOffsetArrayOf<Type, OffsetType, has_null>::sanitize (c, count, this))); return_trace ((UnsizedOffsetArrayOf<Type, OffsetType, has_null>
} ::sanitize (c, count, this, hb_forward<Ts> (ds)...)));
template <typename T>
bool sanitize (hb_sanitize_context_t *c, unsigned int count, T user_data) const
{
TRACE_SANITIZE (this);
return_trace ((UnsizedOffsetArrayOf<Type, OffsetType, has_null>::sanitize (c, count, this, user_data)));
} }
}; };
@@ -514,7 +544,7 @@ struct ArrayOf
typedef Type item_t; typedef Type item_t;
static constexpr unsigned item_size = hb_static_size (Type); static constexpr unsigned item_size = hb_static_size (Type);
HB_NO_CREATE_COPY_ASSIGN_TEMPLATE2 (ArrayOf, Type, LenType); HB_DELETE_CREATE_COPY_ASSIGN (ArrayOf);
const Type& operator [] (int i_) const const Type& operator [] (int i_) const
{ {
@@ -532,12 +562,18 @@ struct ArrayOf
unsigned int get_size () const unsigned int get_size () const
{ return len.static_size + len * Type::static_size; } { return len.static_size + len * Type::static_size; }
hb_array_t<Type> as_array () explicit operator bool () const { return len; }
{ return hb_array (arrayZ, len); }
hb_array_t<const Type> as_array () const hb_array_t< Type> as_array () { return hb_array (arrayZ, len); }
{ return hb_array (arrayZ, len); } hb_array_t<const Type> as_array () const { return hb_array (arrayZ, len); }
operator hb_array_t<Type> (void) { return as_array (); }
operator hb_array_t<const Type> (void) const { return as_array (); } /* Iterator. */
typedef hb_array_t<const Type> iter_t;
typedef hb_array_t< Type> writer_t;
iter_t iter () const { return as_array (); }
writer_t writer () { return as_array (); }
operator iter_t () const { return iter (); }
operator writer_t () { return writer (); }
hb_array_t<const Type> sub_array (unsigned int start_offset, unsigned int count) const hb_array_t<const Type> sub_array (unsigned int start_offset, unsigned int count) const
{ return as_array ().sub_array (start_offset, count);} { return as_array ().sub_array (start_offset, count);}
@@ -552,54 +588,43 @@ struct ArrayOf
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
len.set (items_len); /* TODO(serialize) Overflow? */ c->check_assign (len, items_len);
if (unlikely (!c->extend (*this))) return_trace (false); if (unlikely (!c->extend (*this))) return_trace (false);
return_trace (true); return_trace (true);
} }
template <typename T> template <typename Iterator,
bool serialize (hb_serialize_context_t *c, hb_array_t<const T> items) hb_requires (hb_is_source_of (Iterator, Type))>
bool serialize (hb_serialize_context_t *c, Iterator items)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!serialize (c, items.length))) return_trace (false); unsigned count = items.len ();
for (unsigned int i = 0; i < items.length; i++) if (unlikely (!serialize (c, count))) return_trace (false);
hb_assign (arrayZ[i], items[i]); /* TODO Umm. Just exhaust the iterator instead? Being extra
* cautious right now.. */
for (unsigned i = 0; i < count; i++, ++items)
arrayZ[i] = *items;
return_trace (true); return_trace (true);
} }
bool sanitize (hb_sanitize_context_t *c) const ArrayOf* copy (hb_serialize_context_t *c) const
{ {
TRACE_SANITIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!sanitize_shallow (c))) return_trace (false); auto *out = c->start_embed (this);
if (unlikely (!c->extend_min (out))) return_trace (nullptr);
/* Note: for structs that do not reference other structs, c->check_assign (out->len, len);
* we do not need to call their sanitize() as we already did if (unlikely (!as_array ().copy (c))) return_trace (nullptr);
* a bound check on the aggregate array size. We just include return_trace (out);
* a small unreachable expression to make sure the structs
* pointed to do have a simple sanitize(), ie. they do not
* reference other structs via offsets.
*/
(void) (false && arrayZ[0].sanitize (c));
return_trace (true);
} }
bool sanitize (hb_sanitize_context_t *c, const void *base) const
template <typename ...Ts>
bool sanitize (hb_sanitize_context_t *c, Ts&&... ds) const
{ {
TRACE_SANITIZE (this); TRACE_SANITIZE (this);
if (unlikely (!sanitize_shallow (c))) return_trace (false); if (unlikely (!sanitize_shallow (c))) return_trace (false);
if (!sizeof... (Ts) && hb_is_trivially_copyable (Type)) return_trace (true);
unsigned int count = len; unsigned int count = len;
for (unsigned int i = 0; i < count; i++) for (unsigned int i = 0; i < count; i++)
if (unlikely (!arrayZ[i].sanitize (c, base))) if (unlikely (!c->dispatch (arrayZ[i], hb_forward<Ts> (ds)...)))
return_trace (false);
return_trace (true);
}
template <typename T>
bool sanitize (hb_sanitize_context_t *c, const void *base, T user_data) const
{
TRACE_SANITIZE (this);
if (unlikely (!sanitize_shallow (c))) return_trace (false);
unsigned int count = len;
for (unsigned int i = 0; i < count; i++)
if (unlikely (!arrayZ[i].sanitize (c, base, user_data)))
return_trace (false); return_trace (false);
return_trace (true); return_trace (true);
} }
@@ -626,16 +651,17 @@ struct ArrayOf
public: public:
DEFINE_SIZE_ARRAY (sizeof (LenType), arrayZ); DEFINE_SIZE_ARRAY (sizeof (LenType), arrayZ);
}; };
template <typename Type> struct LArrayOf : ArrayOf<Type, HBUINT32> {}; template <typename Type>
typedef ArrayOf<HBUINT8, HBUINT8> PString; using LArrayOf = ArrayOf<Type, HBUINT32>;
using PString = ArrayOf<HBUINT8, HBUINT8>;
/* Array of Offset's */ /* Array of Offset's */
template <typename Type> template <typename Type>
struct OffsetArrayOf : ArrayOf<OffsetTo<Type, HBUINT16> > {}; using OffsetArrayOf = ArrayOf<OffsetTo<Type, HBUINT16>>;
template <typename Type> template <typename Type>
struct LOffsetArrayOf : ArrayOf<OffsetTo<Type, HBUINT32> > {}; using LOffsetArrayOf = ArrayOf<OffsetTo<Type, HBUINT32>>;
template <typename Type> template <typename Type>
struct LOffsetLArrayOf : ArrayOf<OffsetTo<Type, HBUINT32>, HBUINT32> {}; using LOffsetLArrayOf = ArrayOf<OffsetTo<Type, HBUINT32>, HBUINT32>;
/* Array of offsets relative to the beginning of the array itself. */ /* Array of offsets relative to the beginning of the array itself. */
template <typename Type> template <typename Type>
@@ -661,20 +687,15 @@ struct OffsetListOf : OffsetArrayOf<Type>
if (unlikely (!out)) return_trace (false); if (unlikely (!out)) return_trace (false);
unsigned int count = this->len; unsigned int count = this->len;
for (unsigned int i = 0; i < count; i++) for (unsigned int i = 0; i < count; i++)
out->arrayZ[i].serialize_subset (c, (*this)[i], out); out->arrayZ[i].serialize_subset (c, this->arrayZ[i], this, out);
return_trace (true); return_trace (true);
} }
bool sanitize (hb_sanitize_context_t *c) const template <typename ...Ts>
bool sanitize (hb_sanitize_context_t *c, Ts&&... ds) const
{ {
TRACE_SANITIZE (this); TRACE_SANITIZE (this);
return_trace (OffsetArrayOf<Type>::sanitize (c, this)); return_trace (OffsetArrayOf<Type>::sanitize (c, this, hb_forward<Ts> (ds)...));
}
template <typename T>
bool sanitize (hb_sanitize_context_t *c, T user_data) const
{
TRACE_SANITIZE (this);
return_trace (OffsetArrayOf<Type>::sanitize (c, this, user_data));
} }
}; };
@@ -684,7 +705,7 @@ struct HeadlessArrayOf
{ {
static constexpr unsigned item_size = Type::static_size; static constexpr unsigned item_size = Type::static_size;
HB_NO_CREATE_COPY_ASSIGN_TEMPLATE2 (HeadlessArrayOf, Type, LenType); HB_DELETE_CREATE_COPY_ASSIGN (HeadlessArrayOf);
const Type& operator [] (int i_) const const Type& operator [] (int i_) const
{ {
@@ -706,27 +727,23 @@ struct HeadlessArrayOf
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
lenP1.set (items.length + 1); /* TODO(serialize) Overflow? */ c->check_assign (lenP1, items.length + 1);
if (unlikely (!c->extend (*this))) return_trace (false); if (unlikely (!c->extend (*this))) return_trace (false);
for (unsigned int i = 0; i < items.length; i++) for (unsigned int i = 0; i < items.length; i++)
arrayZ[i] = items[i]; arrayZ[i] = items[i];
return_trace (true); return_trace (true);
} }
bool sanitize (hb_sanitize_context_t *c) const template <typename ...Ts>
bool sanitize (hb_sanitize_context_t *c, Ts&&... ds) const
{ {
TRACE_SANITIZE (this); TRACE_SANITIZE (this);
if (unlikely (!sanitize_shallow (c))) return_trace (false); if (unlikely (!sanitize_shallow (c))) return_trace (false);
if (!sizeof... (Ts) && hb_is_trivially_copyable (Type)) return_trace (true);
/* Note: for structs that do not reference other structs, unsigned int count = lenP1 ? lenP1 - 1 : 0;
* we do not need to call their sanitize() as we already did for (unsigned int i = 0; i < count; i++)
* a bound check on the aggregate array size. We just include if (unlikely (!c->dispatch (arrayZ[i], hb_forward<Ts> (ds)...)))
* a small unreachable expression to make sure the structs return_trace (false);
* pointed to do have a simple sanitize(), ie. they do not
* reference other structs via offsets.
*/
(void) (false && arrayZ[0].sanitize (c));
return_trace (true); return_trace (true);
} }
@@ -749,7 +766,7 @@ struct HeadlessArrayOf
template <typename Type, typename LenType=HBUINT16> template <typename Type, typename LenType=HBUINT16>
struct ArrayOfM1 struct ArrayOfM1
{ {
HB_NO_CREATE_COPY_ASSIGN_TEMPLATE2 (ArrayOfM1, Type, LenType); HB_DELETE_CREATE_COPY_ASSIGN (ArrayOfM1);
const Type& operator [] (int i_) const const Type& operator [] (int i_) const
{ {
@@ -766,14 +783,14 @@ struct ArrayOfM1
unsigned int get_size () const unsigned int get_size () const
{ return lenM1.static_size + (lenM1 + 1) * Type::static_size; } { return lenM1.static_size + (lenM1 + 1) * Type::static_size; }
template <typename T> template <typename ...Ts>
bool sanitize (hb_sanitize_context_t *c, const void *base, T user_data) const bool sanitize (hb_sanitize_context_t *c, Ts&&... ds) const
{ {
TRACE_SANITIZE (this); TRACE_SANITIZE (this);
if (unlikely (!sanitize_shallow (c))) return_trace (false); if (unlikely (!sanitize_shallow (c))) return_trace (false);
unsigned int count = lenM1 + 1; unsigned int count = lenM1 + 1;
for (unsigned int i = 0; i < count; i++) for (unsigned int i = 0; i < count; i++)
if (unlikely (!arrayZ[i].sanitize (c, base, user_data))) if (unlikely (!c->dispatch (arrayZ[i], hb_forward<Ts> (ds)...)))
return_trace (false); return_trace (false);
return_trace (true); return_trace (true);
} }
@@ -797,22 +814,41 @@ struct ArrayOfM1
template <typename Type, typename LenType=HBUINT16> template <typename Type, typename LenType=HBUINT16>
struct SortedArrayOf : ArrayOf<Type, LenType> struct SortedArrayOf : ArrayOf<Type, LenType>
{ {
hb_sorted_array_t<Type> as_array () hb_sorted_array_t< Type> as_array () { return hb_sorted_array (this->arrayZ, this->len); }
{ return hb_sorted_array (this->arrayZ, this->len); } hb_sorted_array_t<const Type> as_array () const { return hb_sorted_array (this->arrayZ, this->len); }
hb_sorted_array_t<const Type> as_array () const
{ return hb_sorted_array (this->arrayZ, this->len); }
operator hb_sorted_array_t<Type> () { return as_array (); }
operator hb_sorted_array_t<const Type> () const { return as_array (); }
hb_array_t<const Type> sub_array (unsigned int start_offset, unsigned int count) const /* Iterator. */
typedef hb_sorted_array_t<const Type> iter_t;
typedef hb_sorted_array_t< Type> writer_t;
iter_t iter () const { return as_array (); }
writer_t writer () { return as_array (); }
operator iter_t () const { return iter (); }
operator writer_t () { return writer (); }
hb_sorted_array_t<const Type> sub_array (unsigned int start_offset, unsigned int count) const
{ return as_array ().sub_array (start_offset, count);} { return as_array ().sub_array (start_offset, count);}
hb_array_t<const Type> sub_array (unsigned int start_offset, unsigned int *count = nullptr /* IN/OUT */) const hb_sorted_array_t<const Type> sub_array (unsigned int start_offset, unsigned int *count = nullptr /* IN/OUT */) const
{ return as_array ().sub_array (start_offset, count);} { return as_array ().sub_array (start_offset, count);}
hb_array_t<Type> sub_array (unsigned int start_offset, unsigned int count) hb_sorted_array_t<Type> sub_array (unsigned int start_offset, unsigned int count)
{ return as_array ().sub_array (start_offset, count);} { return as_array ().sub_array (start_offset, count);}
hb_array_t<Type> sub_array (unsigned int start_offset, unsigned int *count = nullptr /* IN/OUT */) hb_sorted_array_t<Type> sub_array (unsigned int start_offset, unsigned int *count = nullptr /* IN/OUT */)
{ return as_array ().sub_array (start_offset, count);} { return as_array ().sub_array (start_offset, count);}
bool serialize (hb_serialize_context_t *c, unsigned int items_len)
{
TRACE_SERIALIZE (this);
bool ret = ArrayOf<Type, LenType>::serialize (c, items_len);
return_trace (ret);
}
template <typename Iterator,
hb_requires (hb_is_sorted_source_of (Iterator, Type))>
bool serialize (hb_serialize_context_t *c, Iterator items)
{
TRACE_SERIALIZE (this);
bool ret = ArrayOf<Type, LenType>::serialize (c, items);
return_trace (ret);
}
template <typename T> template <typename T>
Type &bsearch (const T &x, Type &not_found = Crap (Type)) Type &bsearch (const T &x, Type &not_found = Crap (Type))
{ return *as_array ().bsearch (x, &not_found); } { return *as_array ().bsearch (x, &not_found); }
@@ -841,15 +877,16 @@ struct BinSearchHeader
return_trace (c->check_struct (this)); return_trace (c->check_struct (this));
} }
void set (unsigned int v) BinSearchHeader& operator = (unsigned int v)
{ {
len.set (v); len = v;
assert (len == v); assert (len == v);
entrySelector.set (MAX (1u, hb_bit_storage (v)) - 1); entrySelector = hb_max (1u, hb_bit_storage (v)) - 1;
searchRange.set (16 * (1u << entrySelector)); searchRange = 16 * (1u << entrySelector);
rangeShift.set (v * 16 > searchRange rangeShift = v * 16 > searchRange
? 16 * v - searchRange ? 16 * v - searchRange
: 0); : 0;
return *this;
} }
protected: protected:
@@ -863,7 +900,7 @@ struct BinSearchHeader
}; };
template <typename Type, typename LenType=HBUINT16> template <typename Type, typename LenType=HBUINT16>
struct BinSearchArrayOf : SortedArrayOf<Type, BinSearchHeader<LenType> > {}; using BinSearchArrayOf = SortedArrayOf<Type, BinSearchHeader<LenType>>;
struct VarSizedBinSearchHeader struct VarSizedBinSearchHeader
@@ -893,7 +930,7 @@ struct VarSizedBinSearchArrayOf
{ {
static constexpr unsigned item_size = Type::static_size; static constexpr unsigned item_size = Type::static_size;
HB_NO_CREATE_COPY_ASSIGN_TEMPLATE (VarSizedBinSearchArrayOf, Type); HB_DELETE_CREATE_COPY_ASSIGN (VarSizedBinSearchArrayOf);
bool last_is_terminator () const bool last_is_terminator () const
{ {
@@ -928,40 +965,15 @@ struct VarSizedBinSearchArrayOf
unsigned int get_size () const unsigned int get_size () const
{ return header.static_size + header.nUnits * header.unitSize; } { return header.static_size + header.nUnits * header.unitSize; }
bool sanitize (hb_sanitize_context_t *c) const template <typename ...Ts>
{ bool sanitize (hb_sanitize_context_t *c, Ts&&... ds) const
TRACE_SANITIZE (this);
if (unlikely (!sanitize_shallow (c))) return_trace (false);
/* Note: for structs that do not reference other structs,
* we do not need to call their sanitize() as we already did
* a bound check on the aggregate array size. We just include
* a small unreachable expression to make sure the structs
* pointed to do have a simple sanitize(), ie. they do not
* reference other structs via offsets.
*/
(void) (false && StructAtOffset<Type> (&bytesZ, 0).sanitize (c));
return_trace (true);
}
bool sanitize (hb_sanitize_context_t *c, const void *base) const
{ {
TRACE_SANITIZE (this); TRACE_SANITIZE (this);
if (unlikely (!sanitize_shallow (c))) return_trace (false); if (unlikely (!sanitize_shallow (c))) return_trace (false);
if (!sizeof... (Ts) && hb_is_trivially_copyable (Type)) return_trace (true);
unsigned int count = get_length (); unsigned int count = get_length ();
for (unsigned int i = 0; i < count; i++) for (unsigned int i = 0; i < count; i++)
if (unlikely (!(*this)[i].sanitize (c, base))) if (unlikely (!(*this)[i].sanitize (c, hb_forward<Ts> (ds)...)))
return_trace (false);
return_trace (true);
}
template <typename T>
bool sanitize (hb_sanitize_context_t *c, const void *base, T user_data) const
{
TRACE_SANITIZE (this);
if (unlikely (!sanitize_shallow (c))) return_trace (false);
unsigned int count = get_length ();
for (unsigned int i = 0; i < count; i++)
if (unlikely (!(*this)[i].sanitize (c, base, user_data)))
return_trace (false); return_trace (false);
return_trace (true); return_trace (true);
} }

View File

@@ -97,12 +97,12 @@ struct CFFIndex
unsigned int offset_array_size () const unsigned int offset_array_size () const
{ return calculate_offset_array_size (offSize, count); } { return calculate_offset_array_size (offSize, count); }
static unsigned int calculate_serialized_size (unsigned int offSize, unsigned int count, unsigned int dataSize) static unsigned int calculate_serialized_size (unsigned int offSize_, unsigned int count, unsigned int dataSize)
{ {
if (count == 0) if (count == 0)
return COUNT::static_size; return COUNT::static_size;
else else
return min_size + calculate_offset_array_size (offSize, count) + dataSize; return min_size + calculate_offset_array_size (offSize_, count) + dataSize;
} }
bool serialize (hb_serialize_context_t *c, const CFFIndex &src) bool serialize (hb_serialize_context_t *c, const CFFIndex &src)
@@ -124,15 +124,15 @@ struct CFFIndex
{ {
COUNT *dest = c->allocate_min<COUNT> (); COUNT *dest = c->allocate_min<COUNT> ();
if (unlikely (dest == nullptr)) return_trace (false); if (unlikely (dest == nullptr)) return_trace (false);
dest->set (0); *dest = 0;
} }
else else
{ {
/* serialize CFFIndex header */ /* serialize CFFIndex header */
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
this->count.set (byteArray.length); this->count = byteArray.length;
this->offSize.set (offSize_); this->offSize = offSize_;
if (!unlikely (c->allocate_size<HBUINT8> (offSize_ * (byteArray.length + 1)))) if (unlikely (!c->allocate_size<HBUINT8> (offSize_ * (byteArray.length + 1))))
return_trace (false); return_trace (false);
/* serialize indices */ /* serialize indices */
@@ -167,7 +167,7 @@ struct CFFIndex
byteArray.resize (buffArray.length); byteArray.resize (buffArray.length);
for (unsigned int i = 0; i < byteArray.length; i++) for (unsigned int i = 0; i < byteArray.length; i++)
{ {
byteArray[i] = byte_str_t (buffArray[i].arrayZ (), buffArray[i].length); byteArray[i] = byte_str_t (buffArray[i].arrayZ, buffArray[i].length);
} }
bool result = this->serialize (c, offSize_, byteArray); bool result = this->serialize (c, offSize_, byteArray);
byteArray.fini (); byteArray.fini ();
@@ -181,7 +181,7 @@ struct CFFIndex
for (; size; size--) for (; size; size--)
{ {
--p; --p;
p->set (offset & 0xFF); *p = offset & 0xFF;
offset >>= 8; offset >>= 8;
} }
} }
@@ -275,9 +275,9 @@ struct CFFIndexOf : CFFIndex<COUNT>
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
/* serialize CFFIndex header */ /* serialize CFFIndex header */
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
this->count.set (dataArrayLen); this->count = dataArrayLen;
this->offSize.set (offSize_); this->offSize = offSize_;
if (!unlikely (c->allocate_size<HBUINT8> (offSize_ * (dataArrayLen + 1)))) if (unlikely (!c->allocate_size<HBUINT8> (offSize_ * (dataArrayLen + 1))))
return_trace (false); return_trace (false);
/* serialize indices */ /* serialize indices */
@@ -376,11 +376,11 @@ struct Dict : UnsizedByteStr
if (unlikely (p == nullptr)) return_trace (false); if (unlikely (p == nullptr)) return_trace (false);
if (Is_OpCode_ESC (op)) if (Is_OpCode_ESC (op))
{ {
p->set (OpCode_escape); *p = OpCode_escape;
op = Unmake_OpCode_ESC (op); op = Unmake_OpCode_ESC (op);
p++; p++;
} }
p->set (op); *p = op;
return_trace (true); return_trace (true);
} }
@@ -477,9 +477,9 @@ struct FDArray : CFFIndexOf<COUNT, FontDict>
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
this->count.set (fontDicts.length); this->count = fontDicts.length;
this->offSize.set (offSize_); this->offSize = offSize_;
if (!unlikely (c->allocate_size<HBUINT8> (offSize_ * (fontDicts.length + 1)))) if (unlikely (!c->allocate_size<HBUINT8> (offSize_ * (fontDicts.length + 1))))
return_trace (false); return_trace (false);
/* serialize font dict offsets */ /* serialize font dict offsets */
@@ -514,9 +514,9 @@ struct FDArray : CFFIndexOf<COUNT, FontDict>
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
this->count.set (fdCount); this->count = fdCount;
this->offSize.set (offSize_); this->offSize = offSize_;
if (!unlikely (c->allocate_size<HBUINT8> (offSize_ * (fdCount + 1)))) if (unlikely (!c->allocate_size<HBUINT8> (offSize_ * (fdCount + 1))))
return_trace (false); return_trace (false);
/* serialize font dict offsets */ /* serialize font dict offsets */

View File

@@ -27,6 +27,8 @@
#include "hb-ot-cff1-table.hh" #include "hb-ot-cff1-table.hh"
#include "hb-cff1-interp-cs.hh" #include "hb-cff1-interp-cs.hh"
#ifndef HB_NO_CFF
using namespace CFF; using namespace CFF;
/* SID to code */ /* SID to code */
@@ -165,8 +167,8 @@ struct bounds_t
{ {
void init () void init ()
{ {
min.set_int (0x7FFFFFFF, 0x7FFFFFFF); min.set_int (INT_MAX, INT_MAX);
max.set_int (-0x80000000, -0x80000000); max.set_int (INT_MIN, INT_MIN);
} }
void update (const point_t &pt) void update (const point_t &pt)
@@ -305,6 +307,11 @@ bool _get_bounds (const OT::cff1::accelerator_t *cff, hb_codepoint_t glyph, boun
bool OT::cff1::accelerator_t::get_extents (hb_codepoint_t glyph, hb_glyph_extents_t *extents) const bool OT::cff1::accelerator_t::get_extents (hb_codepoint_t glyph, hb_glyph_extents_t *extents) const
{ {
#ifdef HB_NO_OT_FONT_CFF
/* XXX Remove check when this code moves to .hh file. */
return true;
#endif
bounds_t bounds; bounds_t bounds;
if (!_get_bounds (this, glyph, bounds)) if (!_get_bounds (this, glyph, bounds))
@@ -383,3 +390,5 @@ bool OT::cff1::accelerator_t::get_seac_components (hb_codepoint_t glyph, hb_code
} }
return false; return false;
} }
#endif

View File

@@ -110,7 +110,8 @@ struct Encoding1 {
{ {
if (glyph <= ranges[i].nLeft) if (glyph <= ranges[i].nLeft)
{ {
return (hb_codepoint_t)ranges[i].first + glyph; hb_codepoint_t code = (hb_codepoint_t) ranges[i].first + glyph;
return (likely (code < 0x100) ? code: CFF_UNDEF_CODE);
} }
glyph -= (ranges[i].nLeft + 1); glyph -= (ranges[i].nLeft + 1);
} }
@@ -196,18 +197,18 @@ struct Encoding {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
Encoding *dest = c->extend_min (*this); Encoding *dest = c->extend_min (*this);
if (unlikely (dest == nullptr)) return_trace (false); if (unlikely (dest == nullptr)) return_trace (false);
dest->format.set (format | ((supp_codes.length > 0)? 0x80: 0)); dest->format = format | ((supp_codes.length > 0)? 0x80: 0);
if (format == 0) if (format == 0)
{ {
Encoding0 *fmt0 = c->allocate_size<Encoding0> (Encoding0::min_size + HBUINT8::static_size * enc_count); Encoding0 *fmt0 = c->allocate_size<Encoding0> (Encoding0::min_size + HBUINT8::static_size * enc_count);
if (unlikely (fmt0 == nullptr)) return_trace (false); if (unlikely (fmt0 == nullptr)) return_trace (false);
fmt0->nCodes ().set (enc_count); fmt0->nCodes () = enc_count;
unsigned int glyph = 0; unsigned int glyph = 0;
for (unsigned int i = 0; i < code_ranges.length; i++) for (unsigned int i = 0; i < code_ranges.length; i++)
{ {
hb_codepoint_t code = code_ranges[i].code; hb_codepoint_t code = code_ranges[i].code;
for (int left = (int)code_ranges[i].glyph; left >= 0; left--) for (int left = (int)code_ranges[i].glyph; left >= 0; left--)
fmt0->codes[glyph++].set (code++); fmt0->codes[glyph++] = code++;
if (unlikely (!((glyph <= 0x100) && (code <= 0x100)))) if (unlikely (!((glyph <= 0x100) && (code <= 0x100))))
return_trace (false); return_trace (false);
} }
@@ -216,24 +217,24 @@ struct Encoding {
{ {
Encoding1 *fmt1 = c->allocate_size<Encoding1> (Encoding1::min_size + Encoding1_Range::static_size * code_ranges.length); Encoding1 *fmt1 = c->allocate_size<Encoding1> (Encoding1::min_size + Encoding1_Range::static_size * code_ranges.length);
if (unlikely (fmt1 == nullptr)) return_trace (false); if (unlikely (fmt1 == nullptr)) return_trace (false);
fmt1->nRanges ().set (code_ranges.length); fmt1->nRanges () = code_ranges.length;
for (unsigned int i = 0; i < code_ranges.length; i++) for (unsigned int i = 0; i < code_ranges.length; i++)
{ {
if (unlikely (!((code_ranges[i].code <= 0xFF) && (code_ranges[i].glyph <= 0xFF)))) if (unlikely (!((code_ranges[i].code <= 0xFF) && (code_ranges[i].glyph <= 0xFF))))
return_trace (false); return_trace (false);
fmt1->ranges[i].first.set (code_ranges[i].code); fmt1->ranges[i].first = code_ranges[i].code;
fmt1->ranges[i].nLeft.set (code_ranges[i].glyph); fmt1->ranges[i].nLeft = code_ranges[i].glyph;
} }
} }
if (supp_codes.length > 0) if (supp_codes.length > 0)
{ {
CFF1SuppEncData *suppData = c->allocate_size<CFF1SuppEncData> (CFF1SuppEncData::min_size + SuppEncoding::static_size * supp_codes.length); CFF1SuppEncData *suppData = c->allocate_size<CFF1SuppEncData> (CFF1SuppEncData::min_size + SuppEncoding::static_size * supp_codes.length);
if (unlikely (suppData == nullptr)) return_trace (false); if (unlikely (suppData == nullptr)) return_trace (false);
suppData->nSups ().set (supp_codes.length); suppData->nSups () = supp_codes.length;
for (unsigned int i = 0; i < supp_codes.length; i++) for (unsigned int i = 0; i < supp_codes.length; i++)
{ {
suppData->supps[i].code.set (supp_codes[i].code); suppData->supps[i].code = supp_codes[i].code;
suppData->supps[i].glyph.set (supp_codes[i].glyph); /* actually SID */ suppData->supps[i].glyph = supp_codes[i].glyph; /* actually SID */
} }
} }
return_trace (true); return_trace (true);
@@ -469,7 +470,7 @@ struct Charset {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
Charset *dest = c->extend_min (*this); Charset *dest = c->extend_min (*this);
if (unlikely (dest == nullptr)) return_trace (false); if (unlikely (dest == nullptr)) return_trace (false);
dest->format.set (format); dest->format = format;
if (format == 0) if (format == 0)
{ {
Charset0 *fmt0 = c->allocate_size<Charset0> (Charset0::min_size + HBUINT16::static_size * (num_glyphs - 1)); Charset0 *fmt0 = c->allocate_size<Charset0> (Charset0::min_size + HBUINT16::static_size * (num_glyphs - 1));
@@ -479,7 +480,7 @@ struct Charset {
{ {
hb_codepoint_t sid = sid_ranges[i].code; hb_codepoint_t sid = sid_ranges[i].code;
for (int left = (int)sid_ranges[i].glyph; left >= 0; left--) for (int left = (int)sid_ranges[i].glyph; left >= 0; left--)
fmt0->sids[glyph++].set (sid++); fmt0->sids[glyph++] = sid++;
} }
} }
else if (format == 1) else if (format == 1)
@@ -490,8 +491,8 @@ struct Charset {
{ {
if (unlikely (!(sid_ranges[i].glyph <= 0xFF))) if (unlikely (!(sid_ranges[i].glyph <= 0xFF)))
return_trace (false); return_trace (false);
fmt1->ranges[i].first.set (sid_ranges[i].code); fmt1->ranges[i].first = sid_ranges[i].code;
fmt1->ranges[i].nLeft.set (sid_ranges[i].glyph); fmt1->ranges[i].nLeft = sid_ranges[i].glyph;
} }
} }
else /* format 2 */ else /* format 2 */
@@ -502,8 +503,8 @@ struct Charset {
{ {
if (unlikely (!(sid_ranges[i].glyph <= 0xFFFF))) if (unlikely (!(sid_ranges[i].glyph <= 0xFFFF)))
return_trace (false); return_trace (false);
fmt2->ranges[i].first.set (sid_ranges[i].code); fmt2->ranges[i].first = sid_ranges[i].code;
fmt2->ranges[i].nLeft.set (sid_ranges[i].glyph); fmt2->ranges[i].nLeft = sid_ranges[i].glyph;
} }
} }
return_trace (true); return_trace (true);
@@ -575,9 +576,9 @@ struct CFF1StringIndex : CFF1Index
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely ((strings.count == 0) || (sidmap.get_count () == 0))) if (unlikely ((strings.count == 0) || (sidmap.get_count () == 0)))
{ {
if (!unlikely (c->extend_min (this->count))) if (unlikely (!c->extend_min (this->count)))
return_trace (false); return_trace (false);
count.set (0); count = 0;
return_trace (true); return_trace (true);
} }
@@ -598,9 +599,9 @@ struct CFF1StringIndex : CFF1Index
} }
/* in parallel to above */ /* in parallel to above */
unsigned int calculate_serialized_size (unsigned int &offSize /*OUT*/, const remap_t &sidmap) const unsigned int calculate_serialized_size (unsigned int &offSize_ /*OUT*/, const remap_t &sidmap) const
{ {
offSize = 0; offSize_ = 0;
if ((count == 0) || (sidmap.get_count () == 0)) if ((count == 0) || (sidmap.get_count () == 0))
return count.static_size; return count.static_size;
@@ -609,8 +610,8 @@ struct CFF1StringIndex : CFF1Index
if (sidmap[i] != CFF_UNDEF_CODE) if (sidmap[i] != CFF_UNDEF_CODE)
dataSize += length_at (i); dataSize += length_at (i);
offSize = calcOffSize(dataSize); offSize_ = calcOffSize(dataSize);
return CFF1Index::calculate_serialized_size (offSize, sidmap.get_count (), dataSize); return CFF1Index::calculate_serialized_size (offSize_, sidmap.get_count (), dataSize);
} }
}; };

View File

@@ -27,6 +27,8 @@
#include "hb-ot-cff2-table.hh" #include "hb-ot-cff2-table.hh"
#include "hb-cff2-interp-cs.hh" #include "hb-cff2-interp-cs.hh"
#ifndef HB_NO_OT_FONT_CFF
using namespace CFF; using namespace CFF;
struct extents_param_t struct extents_param_t
@@ -34,10 +36,10 @@ struct extents_param_t
void init () void init ()
{ {
path_open = false; path_open = false;
min_x.set_int (0x7FFFFFFF); min_x.set_int (INT_MAX);
min_y.set_int (0x7FFFFFFF); min_y.set_int (INT_MAX);
max_x.set_int (-0x80000000); max_x.set_int (INT_MIN);
max_y.set_int (-0x80000000); max_y.set_int (INT_MIN);
} }
void start_path () { path_open = true; } void start_path () { path_open = true; }
@@ -99,6 +101,11 @@ bool OT::cff2::accelerator_t::get_extents (hb_font_t *font,
hb_codepoint_t glyph, hb_codepoint_t glyph,
hb_glyph_extents_t *extents) const hb_glyph_extents_t *extents) const
{ {
#ifdef HB_NO_OT_FONT_CFF
/* XXX Remove check when this code moves to .hh file. */
return true;
#endif
if (unlikely (!is_valid () || (glyph >= num_glyphs))) return false; if (unlikely (!is_valid () || (glyph >= num_glyphs))) return false;
unsigned int num_coords; unsigned int num_coords;
@@ -134,3 +141,5 @@ bool OT::cff2::accelerator_t::get_extents (hb_font_t *font,
return true; return true;
} }
#endif

View File

@@ -83,21 +83,21 @@ struct CmapSubtableFormat4
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
const hb_subset_plan_t *plan, const hb_subset_plan_t *plan,
const hb_vector_t<segment_plan> &segments) const hb_sorted_vector_t<segment_plan> &segments)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
this->format.set (4); this->format = 4;
this->length.set (get_sub_table_size (segments)); this->length = get_sub_table_size (segments);
this->segCountX2.set (segments.length * 2); this->segCountX2 = segments.length * 2;
this->entrySelector.set (MAX (1u, hb_bit_storage (segments.length)) - 1); this->entrySelector = hb_max (1u, hb_bit_storage (segments.length)) - 1;
this->searchRange.set (2 * (1u << this->entrySelector)); this->searchRange = 2 * (1u << this->entrySelector);
this->rangeShift.set (segments.length * 2 > this->searchRange this->rangeShift = segments.length * 2 > this->searchRange
? 2 * segments.length - this->searchRange ? 2 * segments.length - this->searchRange
: 0); : 0;
HBUINT16 *end_count = c->allocate_size<HBUINT16> (HBUINT16::static_size * segments.length); HBUINT16 *end_count = c->allocate_size<HBUINT16> (HBUINT16::static_size * segments.length);
c->allocate_size<HBUINT16> (HBUINT16::static_size); // 2 bytes of padding. c->allocate_size<HBUINT16> (HBUINT16::static_size); // 2 bytes of padding.
@@ -110,17 +110,17 @@ struct CmapSubtableFormat4
for (unsigned int i = 0; i < segments.length; i++) for (unsigned int i = 0; i < segments.length; i++)
{ {
end_count[i].set (segments[i].end_code); end_count[i] = segments[i].end_code;
start_count[i].set (segments[i].start_code); start_count[i] = segments[i].start_code;
if (segments[i].use_delta) if (segments[i].use_delta)
{ {
hb_codepoint_t cp = segments[i].start_code; hb_codepoint_t cp = segments[i].start_code;
hb_codepoint_t start_gid = 0; hb_codepoint_t start_gid = 0;
if (unlikely (!plan->new_gid_for_codepoint (cp, &start_gid) && cp != 0xFFFF)) if (unlikely (!plan->new_gid_for_codepoint (cp, &start_gid) && cp != 0xFFFF))
return_trace (false); return_trace (false);
id_delta[i].set (start_gid - segments[i].start_code); id_delta[i] = start_gid - segments[i].start_code;
} else { } else {
id_delta[i].set (0); id_delta[i] = 0;
unsigned int num_codepoints = segments[i].end_code - segments[i].start_code + 1; unsigned int num_codepoints = segments[i].end_code - segments[i].start_code + 1;
HBUINT16 *glyph_id_array = c->allocate_size<HBUINT16> (HBUINT16::static_size * num_codepoints); HBUINT16 *glyph_id_array = c->allocate_size<HBUINT16> (HBUINT16::static_size * num_codepoints);
if (glyph_id_array == nullptr) if (glyph_id_array == nullptr)
@@ -138,15 +138,14 @@ struct CmapSubtableFormat4
// id_range_offset[i] // id_range_offset[i]
// = // =
// 2 * (glyph_id_array - id_range_offset - i) // 2 * (glyph_id_array - id_range_offset - i)
id_range_offset[i].set (2 * ( id_range_offset[i] = 2 * (glyph_id_array - id_range_offset - i);
glyph_id_array - id_range_offset - i));
for (unsigned int j = 0; j < num_codepoints; j++) for (unsigned int j = 0; j < num_codepoints; j++)
{ {
hb_codepoint_t cp = segments[i].start_code + j; hb_codepoint_t cp = segments[i].start_code + j;
hb_codepoint_t new_gid; hb_codepoint_t new_gid = 0;
if (unlikely (!plan->new_gid_for_codepoint (cp, &new_gid))) if (unlikely (!plan->new_gid_for_codepoint (cp, &new_gid)))
return_trace (false); return_trace (false);
glyph_id_array[j].set (new_gid); glyph_id_array[j] = new_gid;
} }
} }
} }
@@ -154,7 +153,7 @@ struct CmapSubtableFormat4
return_trace (true); return_trace (true);
} }
static size_t get_sub_table_size (const hb_vector_t<segment_plan> &segments) static size_t get_sub_table_size (const hb_sorted_vector_t<segment_plan> &segments)
{ {
size_t segment_size = 0; size_t segment_size = 0;
for (unsigned int i = 0; i < segments.length; i++) for (unsigned int i = 0; i < segments.length; i++)
@@ -177,14 +176,14 @@ struct CmapSubtableFormat4
} }
static bool create_sub_table_plan (const hb_subset_plan_t *plan, static bool create_sub_table_plan (const hb_subset_plan_t *plan,
hb_vector_t<segment_plan> *segments) hb_sorted_vector_t<segment_plan> *segments)
{ {
segment_plan *segment = nullptr; segment_plan *segment = nullptr;
hb_codepoint_t last_gid = 0; hb_codepoint_t last_gid = 0;
hb_codepoint_t cp = HB_SET_VALUE_INVALID; hb_codepoint_t cp = HB_SET_VALUE_INVALID;
while (plan->unicodes->next (&cp)) { while (plan->unicodes->next (&cp)) {
hb_codepoint_t new_gid; hb_codepoint_t new_gid = 0;
if (unlikely (!plan->new_gid_for_codepoint (cp, &new_gid))) if (unlikely (!plan->new_gid_for_codepoint (cp, &new_gid)))
{ {
DEBUG_MSG(SUBSET, nullptr, "Unable to find new gid for %04x", cp); DEBUG_MSG(SUBSET, nullptr, "Unable to find new gid for %04x", cp);
@@ -198,11 +197,11 @@ struct CmapSubtableFormat4
cp != segment->end_code + 1u) cp != segment->end_code + 1u)
{ {
segment = segments->push (); segment = segments->push ();
segment->start_code.set (cp); segment->start_code = cp;
segment->end_code.set (cp); segment->end_code = cp;
segment->use_delta = true; segment->use_delta = true;
} else { } else {
segment->end_code.set (cp); segment->end_code = cp;
if (last_gid + 1u != new_gid) if (last_gid + 1u != new_gid)
// gid's are not consecutive in this segment so delta // gid's are not consecutive in this segment so delta
// cannot be used. // cannot be used.
@@ -216,8 +215,8 @@ struct CmapSubtableFormat4
if (segment == nullptr || segment->end_code != 0xFFFF) if (segment == nullptr || segment->end_code != 0xFFFF)
{ {
segment = segments->push (); segment = segments->push ();
segment->start_code.set (0xFFFF); segment->start_code = 0xFFFF;
segment->end_code.set (0xFFFF); segment->end_code = 0xFFFF;
segment->use_delta = true; segment->use_delta = true;
} }
@@ -286,7 +285,7 @@ struct CmapSubtableFormat4
*glyph = gid; *glyph = gid;
return true; return true;
} }
static bool get_glyph_func (const void *obj, hb_codepoint_t codepoint, hb_codepoint_t *glyph) HB_INTERNAL static bool get_glyph_func (const void *obj, hb_codepoint_t codepoint, hb_codepoint_t *glyph)
{ {
return ((const accelerator_t *) obj)->get_glyph (codepoint, glyph); return ((const accelerator_t *) obj)->get_glyph (codepoint, glyph);
} }
@@ -349,7 +348,7 @@ struct CmapSubtableFormat4
/* Some broken fonts have too long of a "length" value. /* Some broken fonts have too long of a "length" value.
* If that is the case, just change the value to truncate * If that is the case, just change the value to truncate
* the subtable at the end of the blob. */ * the subtable at the end of the blob. */
uint16_t new_length = (uint16_t) MIN ((uintptr_t) 65535, uint16_t new_length = (uint16_t) hb_min ((uintptr_t) 65535,
(uintptr_t) (c->end - (uintptr_t) (c->end -
(char *) this)); (char *) this));
if (!c->try_set (&length, new_length)) if (!c->try_set (&length, new_length))
@@ -479,7 +478,7 @@ struct CmapSubtableLongSegmented
{ {
for (unsigned int i = 0; i < this->groups.len; i++) { for (unsigned int i = 0; i < this->groups.len; i++) {
out->add_range (this->groups[i].startCharCode, out->add_range (this->groups[i].startCharCode,
MIN ((hb_codepoint_t) this->groups[i].endCharCode, hb_min ((hb_codepoint_t) this->groups[i].endCharCode,
(hb_codepoint_t) HB_UNICODE_MAX)); (hb_codepoint_t) HB_UNICODE_MAX));
} }
} }
@@ -491,12 +490,12 @@ struct CmapSubtableLongSegmented
} }
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
const hb_vector_t<CmapSubtableLongGroup> &group_data) const hb_sorted_vector_t<CmapSubtableLongGroup> &group_data)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
if (unlikely (!groups.serialize (c, group_data.as_array ()))) return_trace (false); if (unlikely (!groups.serialize (c, group_data.as_array ()))) return_trace (false);
return true; return_trace (true);
} }
protected: protected:
@@ -519,30 +518,31 @@ struct CmapSubtableFormat12 : CmapSubtableLongSegmented<CmapSubtableFormat12>
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
const hb_vector_t<CmapSubtableLongGroup> &groups) const hb_sorted_vector_t<CmapSubtableLongGroup> &groups_data)
{ {
if (unlikely (!c->extend_min (*this))) return false; TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (*this))) return_trace (false);
this->format.set (12); this->format = 12;
this->reserved.set (0); this->reserved = 0;
this->length.set (get_sub_table_size (groups)); this->length = get_sub_table_size (groups_data);
return CmapSubtableLongSegmented<CmapSubtableFormat12>::serialize (c, groups); return_trace (CmapSubtableLongSegmented<CmapSubtableFormat12>::serialize (c, groups_data));
} }
static size_t get_sub_table_size (const hb_vector_t<CmapSubtableLongGroup> &groups) static size_t get_sub_table_size (const hb_sorted_vector_t<CmapSubtableLongGroup> &groups_data)
{ {
return 16 + 12 * groups.length; return 16 + 12 * groups_data.length;
} }
static bool create_sub_table_plan (const hb_subset_plan_t *plan, static bool create_sub_table_plan (const hb_subset_plan_t *plan,
hb_vector_t<CmapSubtableLongGroup> *groups) hb_sorted_vector_t<CmapSubtableLongGroup> *groups_out)
{ {
CmapSubtableLongGroup *group = nullptr; CmapSubtableLongGroup *group = nullptr;
hb_codepoint_t cp = HB_SET_VALUE_INVALID; hb_codepoint_t cp = HB_SET_VALUE_INVALID;
while (plan->unicodes->next (&cp)) { while (plan->unicodes->next (&cp)) {
hb_codepoint_t new_gid; hb_codepoint_t new_gid = 0;
if (unlikely (!plan->new_gid_for_codepoint (cp, &new_gid))) if (unlikely (!plan->new_gid_for_codepoint (cp, &new_gid)))
{ {
DEBUG_MSG(SUBSET, nullptr, "Unable to find new gid for %04x", cp); DEBUG_MSG(SUBSET, nullptr, "Unable to find new gid for %04x", cp);
@@ -551,17 +551,17 @@ struct CmapSubtableFormat12 : CmapSubtableLongSegmented<CmapSubtableFormat12>
if (!group || !_is_gid_consecutive (group, cp, new_gid)) if (!group || !_is_gid_consecutive (group, cp, new_gid))
{ {
group = groups->push (); group = groups_out->push ();
group->startCharCode.set (cp); group->startCharCode = cp;
group->endCharCode.set (cp); group->endCharCode = cp;
group->glyphID.set (new_gid); group->glyphID = new_gid;
} }
else group->endCharCode.set (cp); else group->endCharCode = cp;
} }
DEBUG_MSG(SUBSET, nullptr, "cmap"); DEBUG_MSG(SUBSET, nullptr, "cmap");
for (unsigned int i = 0; i < groups->length; i++) { for (unsigned int i = 0; i < groups_out->length; i++) {
CmapSubtableLongGroup& group = (*groups)[i]; CmapSubtableLongGroup& group = (*groups_out)[i];
DEBUG_MSG(SUBSET, nullptr, " %d: U+%04X-U+%04X, gid %d-%d", i, (uint32_t) group.startCharCode, (uint32_t) group.endCharCode, (uint32_t) group.glyphID, (uint32_t) group.glyphID + ((uint32_t) group.endCharCode - (uint32_t) group.startCharCode)); DEBUG_MSG(SUBSET, nullptr, " %d: U+%04X-U+%04X, gid %d-%d", i, (uint32_t) group.startCharCode, (uint32_t) group.endCharCode, (uint32_t) group.glyphID, (uint32_t) group.glyphID + ((uint32_t) group.endCharCode - (uint32_t) group.startCharCode));
} }
@@ -623,7 +623,7 @@ struct DefaultUVS : SortedArrayOf<UnicodeValueRange, HBUINT32>
for (unsigned int i = 0; i < count; i++) for (unsigned int i = 0; i < count; i++)
{ {
hb_codepoint_t first = arrayZ[i].startUnicodeValue; hb_codepoint_t first = arrayZ[i].startUnicodeValue;
hb_codepoint_t last = MIN ((hb_codepoint_t) (first + arrayZ[i].additionalCount), hb_codepoint_t last = hb_min ((hb_codepoint_t) (first + arrayZ[i].additionalCount),
(hb_codepoint_t) HB_UNICODE_MAX); (hb_codepoint_t) HB_UNICODE_MAX);
out->add_range (first, last); out->add_range (first, last);
} }
@@ -853,8 +853,8 @@ struct cmap
+ CmapSubtableFormat12::get_sub_table_size (this->format12_groups); + CmapSubtableFormat12::get_sub_table_size (this->format12_groups);
} }
hb_vector_t<CmapSubtableFormat4::segment_plan> format4_segments; hb_sorted_vector_t<CmapSubtableFormat4::segment_plan> format4_segments;
hb_vector_t<CmapSubtableLongGroup> format12_groups; hb_sorted_vector_t<CmapSubtableLongGroup> format12_groups;
}; };
bool _create_plan (const hb_subset_plan_t *plan, bool _create_plan (const hb_subset_plan_t *plan,
@@ -863,6 +863,7 @@ struct cmap
if (unlikely (!CmapSubtableFormat4::create_sub_table_plan (plan, &cmap_plan->format4_segments))) if (unlikely (!CmapSubtableFormat4::create_sub_table_plan (plan, &cmap_plan->format4_segments)))
return false; return false;
if (!find_subtable (12)) return true;
return CmapSubtableFormat12::create_sub_table_plan (plan, &cmap_plan->format12_groups); return CmapSubtableFormat12::create_sub_table_plan (plan, &cmap_plan->format12_groups);
} }
@@ -879,33 +880,35 @@ struct cmap
return false; return false;
} }
table->version.set (0); table->version = 0;
if (unlikely (!table->encodingRecord.serialize (&c, /* numTables */ 3))) if (unlikely (!table->encodingRecord.serialize (&c, /* numTables */ cmap_subset_plan.format12_groups ? 3 : 2))) return false;
return false;
// TODO(grieger): Convert the below to a for loop // TODO(grieger): Convert the below to a for loop
// Format 4, Plat 0 Encoding Record // Format 4, Plat 0 Encoding Record
EncodingRecord &format4_plat0_rec = table->encodingRecord[0]; EncodingRecord &format4_plat0_rec = table->encodingRecord[0];
format4_plat0_rec.platformID.set (0); // Unicode format4_plat0_rec.platformID = 0; // Unicode
format4_plat0_rec.encodingID.set (3); format4_plat0_rec.encodingID = 3;
// Format 4, Plat 3 Encoding Record // Format 4, Plat 3 Encoding Record
EncodingRecord &format4_plat3_rec = table->encodingRecord[1]; EncodingRecord &format4_plat3_rec = table->encodingRecord[1];
format4_plat3_rec.platformID.set (3); // Windows format4_plat3_rec.platformID = 3; // Windows
format4_plat3_rec.encodingID.set (1); // Unicode BMP format4_plat3_rec.encodingID = 1; // Unicode BMP
// Format 12 Encoding Record // Format 12 Encoding Record
if (cmap_subset_plan.format12_groups)
{
EncodingRecord &format12_rec = table->encodingRecord[2]; EncodingRecord &format12_rec = table->encodingRecord[2];
format12_rec.platformID.set (3); // Windows format12_rec.platformID = 3; // Windows
format12_rec.encodingID.set (10); // Unicode UCS-4 format12_rec.encodingID = 10; // Unicode UCS-4
}
// Write out format 4 sub table // Write out format 4 sub table
{ {
CmapSubtable &subtable = format4_plat0_rec.subtable.serialize (&c, table); CmapSubtable &subtable = format4_plat0_rec.subtable.serialize (&c, table);
format4_plat3_rec.subtable.set (format4_plat0_rec.subtable); format4_plat3_rec.subtable = (unsigned int) format4_plat0_rec.subtable;
subtable.u.format.set (4); subtable.u.format = 4;
CmapSubtableFormat4 &format4 = subtable.u.format4; CmapSubtableFormat4 &format4 = subtable.u.format4;
if (unlikely (!format4.serialize (&c, plan, cmap_subset_plan.format4_segments))) if (unlikely (!format4.serialize (&c, plan, cmap_subset_plan.format4_segments)))
@@ -913,9 +916,11 @@ struct cmap
} }
// Write out format 12 sub table. // Write out format 12 sub table.
if (cmap_subset_plan.format12_groups)
{ {
EncodingRecord &format12_rec = table->encodingRecord[2];
CmapSubtable &subtable = format12_rec.subtable.serialize (&c, table); CmapSubtable &subtable = format12_rec.subtable.serialize (&c, table);
subtable.u.format.set (12); subtable.u.format = 12;
CmapSubtableFormat12 &format12 = subtable.u.format12; CmapSubtableFormat12 &format12 = subtable.u.format12;
if (unlikely (!format12.serialize (&c, cmap_subset_plan.format12_groups))) if (unlikely (!format12.serialize (&c, cmap_subset_plan.format12_groups)))
@@ -1096,7 +1101,7 @@ struct cmap
hb_codepoint_t *glyph); hb_codepoint_t *glyph);
template <typename Type> template <typename Type>
static bool get_glyph_from (const void *obj, HB_INTERNAL static bool get_glyph_from (const void *obj,
hb_codepoint_t codepoint, hb_codepoint_t codepoint,
hb_codepoint_t *glyph) hb_codepoint_t *glyph)
{ {
@@ -1105,7 +1110,7 @@ struct cmap
} }
template <typename Type> template <typename Type>
static bool get_glyph_from_symbol (const void *obj, HB_INTERNAL static bool get_glyph_from_symbol (const void *obj,
hb_codepoint_t codepoint, hb_codepoint_t codepoint,
hb_codepoint_t *glyph) hb_codepoint_t *glyph)
{ {
@@ -1144,8 +1149,8 @@ struct cmap
unsigned int encoding_id) const unsigned int encoding_id) const
{ {
EncodingRecord key; EncodingRecord key;
key.platformID.set (platform_id); key.platformID = platform_id;
key.encodingID.set (encoding_id); key.encodingID = encoding_id;
const EncodingRecord &result = encodingRecord.bsearch (key); const EncodingRecord &result = encodingRecord.bsearch (key);
if (!result.subtable) if (!result.subtable)
@@ -1154,6 +1159,18 @@ struct cmap
return &(this+result.subtable); return &(this+result.subtable);
} }
bool find_subtable (unsigned format) const
{
auto it =
+ hb_iter (encodingRecord)
| hb_map (&EncodingRecord::subtable)
| hb_map (hb_add (this))
| hb_filter ([&] (const CmapSubtable& _) { return _.u.format == format; })
;
return it.len ();
}
public: public:
bool sanitize (hb_sanitize_context_t *c) const bool sanitize (hb_sanitize_context_t *c) const

View File

@@ -349,15 +349,15 @@ struct CBLC
if (unlikely (!count)) if (unlikely (!count))
return Null(BitmapSizeTable); return Null(BitmapSizeTable);
unsigned int requested_ppem = MAX (font->x_ppem, font->y_ppem); unsigned int requested_ppem = hb_max (font->x_ppem, font->y_ppem);
if (!requested_ppem) if (!requested_ppem)
requested_ppem = 1<<30; /* Choose largest strike. */ requested_ppem = 1<<30; /* Choose largest strike. */
unsigned int best_i = 0; unsigned int best_i = 0;
unsigned int best_ppem = MAX (sizeTables[0].ppemX, sizeTables[0].ppemY); unsigned int best_ppem = hb_max (sizeTables[0].ppemX, sizeTables[0].ppemY);
for (unsigned int i = 1; i < count; i++) for (unsigned int i = 1; i < count; i++)
{ {
unsigned int ppem = MAX (sizeTables[i].ppemX, sizeTables[i].ppemY); unsigned int ppem = hb_max (sizeTables[i].ppemX, sizeTables[i].ppemY);
if ((requested_ppem <= ppem && ppem < best_ppem) || if ((requested_ppem <= ppem && ppem < best_ppem) ||
(requested_ppem > best_ppem && ppem > best_ppem)) (requested_ppem > best_ppem && ppem > best_ppem))
{ {
@@ -442,12 +442,12 @@ struct CBDT
} }
/* Convert to font units. */ /* Convert to font units. */
double x_scale = upem / (double) strike.ppemX; float x_scale = upem / (float) strike.ppemX;
double y_scale = upem / (double) strike.ppemY; float y_scale = upem / (float) strike.ppemY;
extents->x_bearing = round (extents->x_bearing * x_scale); extents->x_bearing = roundf (extents->x_bearing * x_scale);
extents->y_bearing = round (extents->y_bearing * y_scale); extents->y_bearing = roundf (extents->y_bearing * y_scale);
extents->width = round (extents->width * x_scale); extents->width = roundf (extents->width * x_scale);
extents->height = round (extents->height * y_scale); extents->height = roundf (extents->height * y_scale);
return true; return true;
} }

View File

@@ -144,7 +144,7 @@ struct CPAL
{ {
hb_array_t<const BGRAColor> segment_colors = palette_colors.sub_array (start_offset, *color_count); hb_array_t<const BGRAColor> segment_colors = palette_colors.sub_array (start_offset, *color_count);
/* Always return numColors colors per palette even if it has out-of-bounds start index. */ /* Always return numColors colors per palette even if it has out-of-bounds start index. */
unsigned int count = MIN<unsigned int> (MAX<int> (numColors - start_offset, 0), *color_count); unsigned int count = hb_min ((unsigned) hb_max ((int) (numColors - start_offset), 0), *color_count);
*color_count = count; *color_count = count;
for (unsigned int i = 0; i < count; i++) for (unsigned int i = 0; i < count; i++)
colors[i] = segment_colors[i]; /* Bound-checked read. */ colors[i] = segment_colors[i]; /* Bound-checked read. */

View File

@@ -175,7 +175,7 @@ struct sbix
if (unlikely (!count)) if (unlikely (!count))
return Null(SBIXStrike); return Null(SBIXStrike);
unsigned int requested_ppem = MAX (font->x_ppem, font->y_ppem); unsigned int requested_ppem = hb_max (font->x_ppem, font->y_ppem);
if (!requested_ppem) if (!requested_ppem)
requested_ppem = 1<<30; /* Choose largest strike. */ requested_ppem = 1<<30; /* Choose largest strike. */
/* TODO Add DPI sensitivity as well? */ /* TODO Add DPI sensitivity as well? */
@@ -242,11 +242,11 @@ struct sbix
/* Convert to font units. */ /* Convert to font units. */
if (strike_ppem) if (strike_ppem)
{ {
double scale = font->face->get_upem () / (double) strike_ppem; float scale = font->face->get_upem () / (float) strike_ppem;
extents->x_bearing = round (extents->x_bearing * scale); extents->x_bearing = roundf (extents->x_bearing * scale);
extents->y_bearing = round (extents->y_bearing * scale); extents->y_bearing = roundf (extents->y_bearing * scale);
extents->width = round (extents->width * scale); extents->width = roundf (extents->width * scale);
extents->height = round (extents->height * scale); extents->height = roundf (extents->height * scale);
} }
hb_blob_destroy (blob); hb_blob_destroy (blob);

View File

@@ -47,6 +47,8 @@
* @include: hb-ot.h * @include: hb-ot.h
* *
* Functions for fetching color-font information from OpenType font faces. * Functions for fetching color-font information from OpenType font faces.
*
* HarfBuzz supports `COLR`/`CPAL`, `sbix`, `CBDT`, and `SVG` color fonts.
**/ **/
@@ -57,42 +59,54 @@
/** /**
* hb_ot_color_has_palettes: * hb_ot_color_has_palettes:
* @face: a font face. * @face: #hb_face_t to work upon
* *
* Returns: whether CPAL table is available. * Tests whether a face includes a `CPAL` color-palette table.
*
* Return value: true if data found, false otherwise
* *
* Since: 2.1.0 * Since: 2.1.0
*/ */
hb_bool_t hb_bool_t
hb_ot_color_has_palettes (hb_face_t *face) hb_ot_color_has_palettes (hb_face_t *face)
{ {
#ifdef HB_NO_COLOR
return false;
#endif
return face->table.CPAL->has_data (); return face->table.CPAL->has_data ();
} }
/** /**
* hb_ot_color_palette_get_count: * hb_ot_color_palette_get_count:
* @face: a font face. * @face: #hb_face_t to work upon
* *
* Returns: the number of color palettes in @face, or zero if @face has * Fetches the number of color palettes in a face.
* no colors. *
* Return value: the number of palettes found
* *
* Since: 2.1.0 * Since: 2.1.0
*/ */
unsigned int unsigned int
hb_ot_color_palette_get_count (hb_face_t *face) hb_ot_color_palette_get_count (hb_face_t *face)
{ {
#ifdef HB_NO_COLOR
return 0;
#endif
return face->table.CPAL->get_palette_count (); return face->table.CPAL->get_palette_count ();
} }
/** /**
* hb_ot_color_palette_get_name_id: * hb_ot_color_palette_get_name_id:
* @face: a font face. * @face: #hb_face_t to work upon
* @palette_index: the index of the color palette whose name is being requested. * @palette_index: The index of the color palette
* *
* Retrieves the name id of a color palette. For example, a color font can * Fetches the `name` table Name ID that provides display names for
* have themed palettes like "Spring", "Summer", "Fall", and "Winter". * a `CPAL` color palette.
* *
* Returns: an identifier within @face's `name` table. * Palette display names can be generic (e.g., "Default") or provide
* specific, themed names (e.g., "Spring", "Summer", "Fall", and "Winter").
*
* Return value: the Named ID found for the palette.
* If the requested palette has no name the result is #HB_OT_NAME_ID_INVALID. * If the requested palette has no name the result is #HB_OT_NAME_ID_INVALID.
* *
* Since: 2.1.0 * Since: 2.1.0
@@ -101,15 +115,24 @@ hb_ot_name_id_t
hb_ot_color_palette_get_name_id (hb_face_t *face, hb_ot_color_palette_get_name_id (hb_face_t *face,
unsigned int palette_index) unsigned int palette_index)
{ {
#ifdef HB_NO_COLOR
return HB_OT_NAME_ID_INVALID;
#endif
return face->table.CPAL->get_palette_name_id (palette_index); return face->table.CPAL->get_palette_name_id (palette_index);
} }
/** /**
* hb_ot_color_palette_color_get_name_id: * hb_ot_color_palette_color_get_name_id:
* @face: a font face. * @face: #hb_face_t to work upon
* @color_index: palette entry index. * @color_index: The index of the color
* *
* Returns: Name ID associated with a palette entry, e.g. eye color * Fetches the `name` table Name ID that provides display names for
* the specificed color in a face's `CPAL` color palette.
*
* Display names can be generic (e.g., "Background") or specific
* (e.g., "Eye color").
*
* Return value: the Name ID found for the color.
* *
* Since: 2.1.0 * Since: 2.1.0
*/ */
@@ -117,15 +140,20 @@ hb_ot_name_id_t
hb_ot_color_palette_color_get_name_id (hb_face_t *face, hb_ot_color_palette_color_get_name_id (hb_face_t *face,
unsigned int color_index) unsigned int color_index)
{ {
#ifdef HB_NO_COLOR
return HB_OT_NAME_ID_INVALID;
#endif
return face->table.CPAL->get_color_name_id (color_index); return face->table.CPAL->get_color_name_id (color_index);
} }
/** /**
* hb_ot_color_palette_get_flags: * hb_ot_color_palette_get_flags:
* @face: a font face * @face: #hb_face_t to work upon
* @palette_index: the index of the color palette whose flags are being requested * @palette_index: The index of the color palette
* *
* Returns: the flags for the requested color palette. * Fetches the flags defined for a color palette.
*
* Return value: the #hb_ot_color_palette_flags_t of the requested color palette
* *
* Since: 2.1.0 * Since: 2.1.0
*/ */
@@ -133,30 +161,30 @@ hb_ot_color_palette_flags_t
hb_ot_color_palette_get_flags (hb_face_t *face, hb_ot_color_palette_get_flags (hb_face_t *face,
unsigned int palette_index) unsigned int palette_index)
{ {
#ifdef HB_NO_COLOR
return HB_OT_COLOR_PALETTE_FLAG_DEFAULT;
#endif
return face->table.CPAL->get_palette_flags (palette_index); return face->table.CPAL->get_palette_flags (palette_index);
} }
/** /**
* hb_ot_color_palette_get_colors: * hb_ot_color_palette_get_colors:
* @face: a font face. * @face: #hb_face_t to work upon
* @palette_index:the index of the color palette whose colors * @palette_index: the index of the color palette to query
* are being requested. * @start_offset: offset of the first color to retrieve
* @start_offset: the index of the first color being requested. * @color_count: (inout) (optional): Input = the maximum number of colors to return;
* @color_count: (inout) (optional): on input, how many colors * Output = the actual number of colors returned (may be zero)
* can be maximally stored into the @colors array; * @colors: (out) (array length=color_count) (nullable): The array of #hb_color_t records found
* on output, how many colors were actually stored. *
* @colors: (array length=color_count) (out) (optional): * Fetches a list of the colors in a color palette.
* an array of #hb_color_t records. After calling *
* this function, @colors will be filled with * After calling this function, @colors will be filled with the palette
* the palette colors. If @colors is NULL, the function * colors. If @colors is NULL, the function will just return the number
* will just return the number of total colors * of total colors without storing any actual colors; this can be used
* without storing any actual colors; this can be used
* for allocating a buffer of suitable size before calling * for allocating a buffer of suitable size before calling
* hb_ot_color_palette_get_colors() a second time. * hb_ot_color_palette_get_colors() a second time.
* *
* Retrieves the colors in a color palette. * Return value: the total number of colors in the palette
*
* Returns: the total number of colors in the palette.
* *
* Since: 2.1.0 * Since: 2.1.0
*/ */
@@ -167,6 +195,11 @@ hb_ot_color_palette_get_colors (hb_face_t *face,
unsigned int *colors_count /* IN/OUT. May be NULL. */, unsigned int *colors_count /* IN/OUT. May be NULL. */,
hb_color_t *colors /* OUT. May be NULL. */) hb_color_t *colors /* OUT. May be NULL. */)
{ {
#ifdef HB_NO_COLOR
if (colors_count)
*colors_count = 0;
return 0;
#endif
return face->table.CPAL->get_palette_colors (palette_index, start_offset, colors_count, colors); return face->table.CPAL->get_palette_colors (palette_index, start_offset, colors_count, colors);
} }
@@ -177,28 +210,36 @@ hb_ot_color_palette_get_colors (hb_face_t *face,
/** /**
* hb_ot_color_has_layers: * hb_ot_color_has_layers:
* @face: a font face. * @face: #hb_face_t to work upon
* *
* Returns: whether COLR table is available. * Tests whether a face includes any `COLR` color layers.
*
* Return value: true if data found, false otherwise
* *
* Since: 2.1.0 * Since: 2.1.0
*/ */
hb_bool_t hb_bool_t
hb_ot_color_has_layers (hb_face_t *face) hb_ot_color_has_layers (hb_face_t *face)
{ {
#ifdef HB_NO_COLOR
return false;
#endif
return face->table.COLR->has_data (); return face->table.COLR->has_data ();
} }
/** /**
* hb_ot_color_glyph_get_layers: * hb_ot_color_glyph_get_layers:
* @face: a font face. * @face: #hb_face_t to work upon
* @glyph: a layered color glyph id. * @glyph: The glyph index to query
* @start_offset: starting offset of layers. * @start_offset: offset of the first layer to retrieve
* @count: (inout) (optional): gets number of layers available to be written on buffer * @layer_count: (inout) (optional): Input = the maximum number of layers to return;
* and returns number of written layers. * Output = the actual number of layers returned (may be zero)
* @layers: (array length=count) (out) (optional): layers buffer to buffer. * @layers: (out) (array length=layer_count) (nullable): The array of layers found
* *
* Returns: Total number of layers a layered color glyph have. * Fetches a list of all color layers for the specified glyph index in the specified
* face. The list returned will begin at the offset provided.
*
* Return value: Total number of layers available for the glyph index queried
* *
* Since: 2.1.0 * Since: 2.1.0
*/ */
@@ -206,10 +247,15 @@ unsigned int
hb_ot_color_glyph_get_layers (hb_face_t *face, hb_ot_color_glyph_get_layers (hb_face_t *face,
hb_codepoint_t glyph, hb_codepoint_t glyph,
unsigned int start_offset, unsigned int start_offset,
unsigned int *count, /* IN/OUT. May be NULL. */ unsigned int *layer_count, /* IN/OUT. May be NULL. */
hb_ot_color_layer_t *layers /* OUT. May be NULL. */) hb_ot_color_layer_t *layers /* OUT. May be NULL. */)
{ {
return face->table.COLR->get_glyph_layers (glyph, start_offset, count, layers); #ifdef HB_NO_COLOR
if (layer_count)
*layer_count = 0;
return 0;
#endif
return face->table.COLR->get_glyph_layers (glyph, start_offset, layer_count, layers);
} }
@@ -219,34 +265,40 @@ hb_ot_color_glyph_get_layers (hb_face_t *face,
/** /**
* hb_ot_color_has_svg: * hb_ot_color_has_svg:
* @face: a font face. * @face: #hb_face_t to work upon.
* *
* Check whether @face has SVG glyph images. * Tests whether a face includes any `SVG` glyph images.
* *
* Returns true if available, false otherwise. * Return value: true if data found, false otherwise.
* *
* Since: 2.1.0 * Since: 2.1.0
*/ */
hb_bool_t hb_bool_t
hb_ot_color_has_svg (hb_face_t *face) hb_ot_color_has_svg (hb_face_t *face)
{ {
#ifdef HB_NO_COLOR
return false;
#endif
return face->table.SVG->has_data (); return face->table.SVG->has_data ();
} }
/** /**
* hb_ot_color_glyph_reference_svg: * hb_ot_color_glyph_reference_svg:
* @face: a font face. * @face: #hb_face_t to work upon
* @glyph: a svg glyph index. * @glyph: a svg glyph index
* *
* Get SVG document for a glyph. The blob may be either plain text or gzip-encoded. * Fetches the SVG document for a glyph. The blob may be either plain text or gzip-encoded.
* *
* Returns: (transfer full): respective svg blob of the glyph, if available. * Return value: (transfer full): An #hb_blob_t containing the SVG document of the glyph, if available
* *
* Since: 2.1.0 * Since: 2.1.0
*/ */
hb_blob_t * hb_blob_t *
hb_ot_color_glyph_reference_svg (hb_face_t *face, hb_codepoint_t glyph) hb_ot_color_glyph_reference_svg (hb_face_t *face, hb_codepoint_t glyph)
{ {
#ifdef HB_NO_COLOR
return hb_blob_get_empty ();
#endif
return face->table.SVG->reference_blob_for_glyph (glyph); return face->table.SVG->reference_blob_for_glyph (glyph);
} }
@@ -257,36 +309,43 @@ hb_ot_color_glyph_reference_svg (hb_face_t *face, hb_codepoint_t glyph)
/** /**
* hb_ot_color_has_png: * hb_ot_color_has_png:
* @face: a font face. * @face: #hb_face_t to work upon
* *
* Check whether @face has PNG glyph images (either CBDT or sbix tables). * Tests whether a face has PNG glyph images (either in `CBDT` or `sbix` tables).
* *
* Returns true if available, false otherwise. * Return value: true if data found, false otherwise
* *
* Since: 2.1.0 * Since: 2.1.0
*/ */
hb_bool_t hb_bool_t
hb_ot_color_has_png (hb_face_t *face) hb_ot_color_has_png (hb_face_t *face)
{ {
#ifdef HB_NO_COLOR
return false;
#endif
return face->table.CBDT->has_data () || face->table.sbix->has_data (); return face->table.CBDT->has_data () || face->table.sbix->has_data ();
} }
/** /**
* hb_ot_color_glyph_reference_png: * hb_ot_color_glyph_reference_png:
* @font: a font object, not face. upem should be set on * @font: #hb_font_t to work upon
* that font object if one wants to get optimal png blob, otherwise * @glyph: a glyph index
* return the biggest one
* @glyph: a glyph index.
* *
* Get PNG image for a glyph. * Fetches the PNG image for a glyph. This function takes a font object, not a face object,
* as input. To get an optimally sized PNG blob, the UPEM value must be set on the @font
* object. If UPEM is unset, the blob returned will be the largest PNG available.
* *
* Returns: (transfer full): respective PNG blob of the glyph, if available. * Return value: (transfer full): An #hb_blob_t containing the PNG image for the glyph, if available
* *
* Since: 2.1.0 * Since: 2.1.0
*/ */
hb_blob_t * hb_blob_t *
hb_ot_color_glyph_reference_png (hb_font_t *font, hb_codepoint_t glyph) hb_ot_color_glyph_reference_png (hb_font_t *font, hb_codepoint_t glyph)
{ {
#ifdef HB_NO_COLOR
return hb_blob_get_empty ();
#endif
hb_blob_t *blob = hb_blob_get_empty (); hb_blob_t *blob = hb_blob_get_empty ();
if (font->face->table.sbix->has_data ()) if (font->face->table.sbix->has_data ())

View File

@@ -59,11 +59,11 @@ hb_ot_color_palette_color_get_name_id (hb_face_t *face,
/** /**
* hb_ot_color_palette_flags_t: * hb_ot_color_palette_flags_t:
* @HB_OT_COLOR_PALETTE_FLAG_DEFAULT: default indicating that there is nothing special * @HB_OT_COLOR_PALETTE_FLAG_DEFAULT: Default indicating that there is nothing special
* to note about a color palette. * to note about a color palette.
* @HB_OT_COLOR_PALETTE_FLAG_USABLE_WITH_LIGHT_BACKGROUND: flag indicating that the color * @HB_OT_COLOR_PALETTE_FLAG_USABLE_WITH_LIGHT_BACKGROUND: Flag indicating that the color
* palette is appropriate to use when displaying the font on a light background such as white. * palette is appropriate to use when displaying the font on a light background such as white.
* @HB_OT_COLOR_PALETTE_FLAG_USABLE_WITH_DARK_BACKGROUND: flag indicating that the color * @HB_OT_COLOR_PALETTE_FLAG_USABLE_WITH_DARK_BACKGROUND: Flag indicating that the color
* palette is appropriate to use when displaying the font on a dark background such as black. * palette is appropriate to use when displaying the font on a dark background such as black.
* *
* Since: 2.1.0 * Since: 2.1.0
@@ -110,7 +110,7 @@ HB_EXTERN unsigned int
hb_ot_color_glyph_get_layers (hb_face_t *face, hb_ot_color_glyph_get_layers (hb_face_t *face,
hb_codepoint_t glyph, hb_codepoint_t glyph,
unsigned int start_offset, unsigned int start_offset,
unsigned int *count, /* IN/OUT. May be NULL. */ unsigned int *layer_count, /* IN/OUT. May be NULL. */
hb_ot_color_layer_t *layers /* OUT. May be NULL. */); hb_ot_color_layer_t *layers /* OUT. May be NULL. */);
/* /*

View File

@@ -40,6 +40,10 @@ HB_BEGIN_DECLS
#ifndef HB_DISABLE_DEPRECATED #ifndef HB_DISABLE_DEPRECATED
/* https://github.com/harfbuzz/harfbuzz/issues/1734 */
#define HB_MATH_GLYPH_PART_FLAG_EXTENDER HB_OT_MATH_GLYPH_PART_FLAG_EXTENDER
/* Like hb_ot_layout_table_find_script, but takes zero-terminated array of scripts to test */ /* Like hb_ot_layout_table_find_script, but takes zero-terminated array of scripts to test */
HB_EXTERN HB_DEPRECATED_FOR (hb_ot_layout_table_select_script) hb_bool_t HB_EXTERN HB_DEPRECATED_FOR (hb_ot_layout_table_select_script) hb_bool_t
hb_ot_layout_table_choose_script (hb_face_t *face, hb_ot_layout_table_choose_script (hb_face_t *face,

View File

@@ -180,15 +180,20 @@ hb_ot_get_glyph_extents (hb_font_t *font,
void *user_data HB_UNUSED) void *user_data HB_UNUSED)
{ {
const hb_ot_face_t *ot_face = (const hb_ot_face_t *) font_data; const hb_ot_face_t *ot_face = (const hb_ot_face_t *) font_data;
bool ret = ot_face->sbix->get_extents (font, glyph, extents); bool ret = false;
if (!ret)
ret = ot_face->glyf->get_extents (glyph, extents); #if !defined(HB_NO_OT_FONT_BITMAP) && !defined(HB_NO_COLOR)
if (!ret) if (!ret) ret = ot_face->sbix->get_extents (font, glyph, extents);
ret = ot_face->cff1->get_extents (glyph, extents); #endif
if (!ret) if (!ret) ret = ot_face->glyf->get_extents (glyph, extents);
ret = ot_face->cff2->get_extents (font, glyph, extents); #ifndef HB_NO_OT_FONT_CFF
if (!ret) if (!ret) ret = ot_face->cff1->get_extents (glyph, extents);
ret = ot_face->CBDT->get_extents (font, glyph, extents); if (!ret) ret = ot_face->cff2->get_extents (font, glyph, extents);
#endif
#if !defined(HB_NO_OT_FONT_BITMAP) && !defined(HB_NO_COLOR)
if (!ret) ret = ot_face->CBDT->get_extents (font, glyph, extents);
#endif
// TODO Hook up side-bearings variations. // TODO Hook up side-bearings variations.
extents->x_bearing = font->em_scale_x (extents->x_bearing); extents->x_bearing = font->em_scale_x (extents->x_bearing);
extents->y_bearing = font->em_scale_y (extents->y_bearing); extents->y_bearing = font->em_scale_y (extents->y_bearing);

View File

@@ -21,7 +21,7 @@
* ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO * ON AN "AS IS" BASIS, AND THE COPYRIGHT HOLDER HAS NO OBLIGATION TO
* PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. * PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
* *
* Google Author(s): Behdad Esfahbod * Google Author(s): Behdad Esfahbod, Garret Rieger, Roderick Sheeter
*/ */
#ifndef HB_OT_GLYF_TABLE_HH #ifndef HB_OT_GLYF_TABLE_HH
@@ -29,7 +29,6 @@
#include "hb-open-type.hh" #include "hb-open-type.hh"
#include "hb-ot-head-table.hh" #include "hb-ot-head-table.hh"
#include "hb-subset-glyf.hh"
namespace OT { namespace OT {
@@ -58,7 +57,7 @@ struct loca
public: public:
DEFINE_SIZE_MIN (0); /* In reality, this is UNBOUNDED() type; but since we always DEFINE_SIZE_MIN (0); /* In reality, this is UNBOUNDED() type; but since we always
* check the size externally, allow Null() object of it by * check the size externally, allow Null() object of it by
* defining it MIN() instead. */ * defining it _MIN instead. */
}; };
@@ -81,24 +80,170 @@ struct glyf
return_trace (true); return_trace (true);
} }
bool subset (hb_subset_plan_t *plan) const template<typename Iterator,
hb_requires (hb_is_source_of (Iterator, unsigned int))>
static bool
_add_loca_and_head (hb_subset_plan_t * plan, Iterator padded_offsets)
{ {
hb_blob_t *glyf_prime = nullptr; unsigned max_offset = + padded_offsets | hb_reduce(hb_add, 0);
hb_blob_t *loca_prime = nullptr; unsigned num_offsets = padded_offsets.len () + 1;
bool use_short_loca = max_offset < 0x1FFFF;
unsigned entry_size = use_short_loca ? 2 : 4;
char *loca_prime_data = (char *) calloc (entry_size, num_offsets);
bool success = true; if (unlikely (!loca_prime_data)) return false;
bool use_short_loca = false;
if (hb_subset_glyf_and_loca (plan, &use_short_loca, &glyf_prime, &loca_prime)) { DEBUG_MSG(SUBSET, nullptr, "loca entry_size %d num_offsets %d max_offset %d size %d", entry_size, num_offsets, max_offset, entry_size * num_offsets);
success = success && plan->add_table (HB_OT_TAG_glyf, glyf_prime);
success = success && plan->add_table (HB_OT_TAG_loca, loca_prime); if (use_short_loca)
success = success && _add_head_and_set_loca_version (plan, use_short_loca); _write_loca (padded_offsets, 1, hb_array ((HBUINT16*) loca_prime_data, num_offsets));
} else { else
success = false; _write_loca (padded_offsets, 0, hb_array ((HBUINT32*) loca_prime_data, num_offsets));
hb_blob_t * loca_blob = hb_blob_create (loca_prime_data,
entry_size * num_offsets,
HB_MEMORY_MODE_WRITABLE,
loca_prime_data,
free);
bool result = plan->add_table (HB_OT_TAG_loca, loca_blob)
&& _add_head_and_set_loca_version(plan, use_short_loca);
hb_blob_destroy (loca_blob);
return result;
} }
hb_blob_destroy (loca_prime);
hb_blob_destroy (glyf_prime);
return success; template<typename IteratorIn, typename IteratorOut,
hb_requires (hb_is_source_of (IteratorIn, unsigned int)),
hb_requires (hb_is_sink_of (IteratorOut, unsigned))>
static void
_write_loca (IteratorIn it, unsigned right_shift, IteratorOut dest)
{
unsigned int offset = 0;
dest << 0;
+ it
| hb_map ([=, &offset] (unsigned int padded_size) {
offset += padded_size;
DEBUG_MSG(SUBSET, nullptr, "loca entry offset %d", offset);
return offset >> right_shift;
})
| hb_sink (dest)
;
}
// requires source of SubsetGlyph complains the identifier isn't declared
template <typename Iterator>
bool serialize(hb_serialize_context_t *c,
Iterator it,
const hb_subset_plan_t *plan)
{
TRACE_SERIALIZE (this);
+ it
| hb_apply ([=] (const SubsetGlyph& _) { _.serialize (c, plan); })
;
return_trace (true);
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
glyf *glyf_prime = c->serializer->start_embed <glyf> ();
if (unlikely (!c->serializer->check_success (glyf_prime))) return_trace (false);
// Byte region(s) per glyph to output
// unpadded, hints removed if so requested
// If we fail to process a glyph we produce an empty (0-length) glyph
hb_vector_t<SubsetGlyph> glyphs;
_populate_subset_glyphs (c->plan, &glyphs);
glyf_prime->serialize (c->serializer, hb_iter (glyphs), c->plan);
auto padded_offsets =
+ hb_iter (glyphs)
| hb_map (&SubsetGlyph::padded_size)
;
if (c->serializer->in_error ()) return_trace (false);
return_trace (c->serializer->check_success (_add_loca_and_head (c->plan, padded_offsets)));
}
template <typename SubsetGlyph>
void
_populate_subset_glyphs (const hb_subset_plan_t * plan,
hb_vector_t<SubsetGlyph> * glyphs /* OUT */) const
{
OT::glyf::accelerator_t glyf;
glyf.init (plan->source);
+ hb_range (plan->num_output_glyphs ())
| hb_map ([&] (hb_codepoint_t new_gid) {
SubsetGlyph subset_glyph;
subset_glyph.new_gid = new_gid;
// should never fail: all old gids should be mapped
if (!plan->old_gid_for_new_gid (new_gid, &subset_glyph.old_gid)) return subset_glyph;
subset_glyph.source_glyph = glyf.bytes_for_glyph ((const char *) this, subset_glyph.old_gid);
if (plan->drop_hints) subset_glyph.drop_hints (glyf);
else subset_glyph.dest_start = subset_glyph.source_glyph;
return subset_glyph;
})
| hb_sink (glyphs)
;
glyf.fini();
}
static void
_fix_component_gids (const hb_subset_plan_t *plan,
hb_bytes_t glyph)
{
OT::glyf::CompositeGlyphHeader::Iterator iterator;
if (OT::glyf::CompositeGlyphHeader::get_iterator (&glyph,
glyph.length,
&iterator))
{
do
{
hb_codepoint_t new_gid;
if (!plan->new_gid_for_old_gid (iterator.current->glyphIndex,
&new_gid))
continue;
((OT::glyf::CompositeGlyphHeader *) iterator.current)->glyphIndex = new_gid;
} while (iterator.move_to_next ());
}
}
static void
_zero_instruction_length (hb_bytes_t glyph)
{
const GlyphHeader &glyph_header = StructAtOffset<GlyphHeader> (&glyph, 0);
int16_t num_contours = (int16_t) glyph_header.numberOfContours;
if (num_contours <= 0) return; // only for simple glyphs
const HBUINT16 &instruction_length = StructAtOffset<HBUINT16> (&glyph, GlyphHeader::static_size + 2 * num_contours);
(HBUINT16 &) instruction_length = 0;
}
static bool _remove_composite_instruction_flag (hb_bytes_t glyph)
{
const GlyphHeader &glyph_header = StructAtOffset<GlyphHeader> (&glyph, 0);
if (glyph_header.numberOfContours >= 0) return true; // only for composites
/* remove WE_HAVE_INSTRUCTIONS from flags in dest */
OT::glyf::CompositeGlyphHeader::Iterator composite_it;
if (unlikely (!OT::glyf::CompositeGlyphHeader::get_iterator (&glyph, glyph.length, &composite_it))) return false;
const OT::glyf::CompositeGlyphHeader *composite_header;
do {
composite_header = composite_it.current;
OT::HBUINT16 *flags = const_cast<OT::HBUINT16 *> (&composite_header->flags);
*flags = (uint16_t) *flags & ~OT::glyf::CompositeGlyphHeader::WE_HAVE_INSTRUCTIONS;
} while (composite_it.move_to_next ());
return true;
} }
static bool static bool
@@ -112,7 +257,7 @@ struct glyf
return false; return false;
head *head_prime = (head *) hb_blob_get_data_writable (head_prime_blob, nullptr); head *head_prime = (head *) hb_blob_get_data_writable (head_prime_blob, nullptr);
head_prime->indexToLocFormat.set (use_short_loca ? 0 : 1); head_prime->indexToLocFormat = use_short_loca ? 0 : 1;
bool success = plan->add_table (HB_OT_TAG_head, head_prime_blob); bool success = plan->add_table (HB_OT_TAG_head, head_prime_blob);
hb_blob_destroy (head_prime_blob); hb_blob_destroy (head_prime_blob);
@@ -171,6 +316,7 @@ struct glyf
return size; return size;
} }
// TODO rewrite using new iterator framework if possible
struct Iterator struct Iterator
{ {
const char *glyph_start; const char *glyph_start;
@@ -241,7 +387,7 @@ struct glyf
loca_table = hb_sanitize_context_t ().reference_table<loca> (face); loca_table = hb_sanitize_context_t ().reference_table<loca> (face);
glyf_table = hb_sanitize_context_t ().reference_table<glyf> (face); glyf_table = hb_sanitize_context_t ().reference_table<glyf> (face);
num_glyphs = MAX (1u, loca_table.get_length () / (short_offset ? 2 : 4)) - 1; num_glyphs = hb_max (1u, loca_table.get_length () / (short_offset ? 2 : 4)) - 1;
} }
void fini () void fini ()
@@ -381,61 +527,55 @@ struct glyf
return true; return true;
} }
bool get_instruction_offsets (unsigned int start_offset, bool get_instruction_length (hb_bytes_t glyph,
unsigned int end_offset, unsigned int * length /* OUT */) const
unsigned int *instruction_start /* OUT */,
unsigned int *instruction_end /* OUT */) const
{ {
if (end_offset - start_offset < GlyphHeader::static_size) /* Empty glyph; no instructions. */
if (glyph.length < GlyphHeader::static_size)
{ {
*instruction_start = 0; *length = 0;
*instruction_end = 0; // only 0 byte glyphs are healthy when missing GlyphHeader
return true; /* Empty glyph; no instructions. */ return glyph.length == 0;
} }
const GlyphHeader &glyph_header = StructAtOffset<GlyphHeader> (glyf_table, start_offset); const GlyphHeader &glyph_header = StructAtOffset<GlyphHeader> (&glyph, 0);
int16_t num_contours = (int16_t) glyph_header.numberOfContours; int16_t num_contours = (int16_t) glyph_header.numberOfContours;
if (num_contours < 0) if (num_contours < 0)
{ {
unsigned int start = glyph.length;
unsigned int end = glyph.length;
unsigned int glyph_offset = &glyph - glyf_table;
CompositeGlyphHeader::Iterator composite_it; CompositeGlyphHeader::Iterator composite_it;
if (unlikely (!CompositeGlyphHeader::get_iterator ( if (unlikely (!CompositeGlyphHeader::get_iterator (&glyph, glyph.length, &composite_it))) return false;
(const char*) this->glyf_table + start_offset,
end_offset - start_offset, &composite_it))) return false;
const CompositeGlyphHeader *last; const CompositeGlyphHeader *last;
do { do {
last = composite_it.current; last = composite_it.current;
} while (composite_it.move_to_next ()); } while (composite_it.move_to_next ());
if ((uint16_t) last->flags & CompositeGlyphHeader::WE_HAVE_INSTRUCTIONS) if ((uint16_t) last->flags & CompositeGlyphHeader::WE_HAVE_INSTRUCTIONS)
*instruction_start = ((char *) last - (char *) glyf_table->dataZ.arrayZ) + last->get_size (); start = ((char *) last - (char *) glyf_table->dataZ.arrayZ) + last->get_size () - glyph_offset;
else if (unlikely (start > end))
*instruction_start = end_offset;
*instruction_end = end_offset;
if (unlikely (*instruction_start > *instruction_end))
{ {
DEBUG_MSG(SUBSET, nullptr, "Invalid instruction offset, %d is outside [%d, %d]", *instruction_start, start_offset, end_offset); DEBUG_MSG(SUBSET, nullptr, "Invalid instruction offset, %d is outside %d byte buffer", start, glyph.length);
return false; return false;
} }
*length = end - start;
} }
else else
{ {
unsigned int instruction_length_offset = start_offset + GlyphHeader::static_size + 2 * num_contours; unsigned int instruction_length_offset = GlyphHeader::static_size + 2 * num_contours;
if (unlikely (instruction_length_offset + 2 > end_offset)) if (unlikely (instruction_length_offset + 2 > glyph.length))
{ {
DEBUG_MSG(SUBSET, nullptr, "Glyph size is too short, missing field instructionLength."); DEBUG_MSG(SUBSET, nullptr, "Glyph size is too short, missing field instructionLength.");
return false; return false;
} }
const HBUINT16 &instruction_length = StructAtOffset<HBUINT16> (glyf_table, instruction_length_offset); const HBUINT16 &instruction_length = StructAtOffset<HBUINT16> (&glyph, instruction_length_offset);
unsigned int start = instruction_length_offset + 2; if (unlikely (instruction_length_offset + instruction_length > glyph.length)) // Out of bounds of the current glyph
unsigned int end = start + (uint16_t) instruction_length;
if (unlikely (end > end_offset)) // Out of bounds of the current glyph
{ {
DEBUG_MSG(SUBSET, nullptr, "The instructions array overruns the glyph's boundaries."); DEBUG_MSG(SUBSET, nullptr, "The instructions array overruns the glyph's boundaries.");
return false; return false;
} }
*length = (uint16_t) instruction_length;
*instruction_start = start;
*instruction_end = end;
} }
return true; return true;
} }
@@ -451,14 +591,33 @@ struct glyf
const GlyphHeader &glyph_header = StructAtOffset<GlyphHeader> (glyf_table, start_offset); const GlyphHeader &glyph_header = StructAtOffset<GlyphHeader> (glyf_table, start_offset);
extents->x_bearing = MIN (glyph_header.xMin, glyph_header.xMax); extents->x_bearing = hb_min (glyph_header.xMin, glyph_header.xMax);
extents->y_bearing = MAX (glyph_header.yMin, glyph_header.yMax); extents->y_bearing = hb_max (glyph_header.yMin, glyph_header.yMax);
extents->width = MAX (glyph_header.xMin, glyph_header.xMax) - extents->x_bearing; extents->width = hb_max (glyph_header.xMin, glyph_header.xMax) - extents->x_bearing;
extents->height = MIN (glyph_header.yMin, glyph_header.yMax) - extents->y_bearing; extents->height = hb_min (glyph_header.yMin, glyph_header.yMax) - extents->y_bearing;
return true; return true;
} }
hb_bytes_t bytes_for_glyph (const char * glyf, hb_codepoint_t gid)
{
unsigned int start_offset, end_offset;
if (unlikely (!(get_offsets (gid, &start_offset, &end_offset) &&
remove_padding (start_offset, &end_offset))))
{
DEBUG_MSG(SUBSET, nullptr, "Unable to get offset or remove padding for %d", gid);
return hb_bytes_t ();
}
hb_bytes_t glyph = hb_bytes_t (glyf + start_offset, end_offset - start_offset);
if (glyph.length == 0) return glyph;
if (unlikely (glyph.length < GlyphHeader::static_size))
{
DEBUG_MSG(SUBSET, nullptr, "Glyph size smaller than minimum header %d", gid);
return hb_bytes_t ();
}
return glyph;
}
private: private:
bool short_offset; bool short_offset;
unsigned int num_glyphs; unsigned int num_glyphs;
@@ -466,12 +625,99 @@ struct glyf
hb_blob_ptr_t<glyf> glyf_table; hb_blob_ptr_t<glyf> glyf_table;
}; };
struct SubsetGlyph
{
hb_codepoint_t new_gid;
hb_codepoint_t old_gid;
hb_bytes_t source_glyph;
hb_bytes_t dest_start; // region of source_glyph to copy first
hb_bytes_t dest_end; // region of source_glyph to copy second
bool serialize (hb_serialize_context_t *c,
const hb_subset_plan_t *plan) const
{
TRACE_SERIALIZE (this);
hb_bytes_t dest_glyph = dest_start.copy(c);
dest_glyph = hb_bytes_t (&dest_glyph, dest_glyph.length + dest_end.copy(c).length);
unsigned int pad_length = padding ();
DEBUG_MSG(SUBSET, nullptr, "serialize %d byte glyph, width %d pad %d", dest_glyph.length, dest_glyph.length + pad_length, pad_length);
HBUINT8 pad;
pad = 0;
while (pad_length > 0)
{
c->embed(pad);
pad_length--;
}
if (dest_glyph.length)
{
_fix_component_gids (plan, dest_glyph);
if (plan->drop_hints)
{
_zero_instruction_length (dest_glyph);
c->check_success (_remove_composite_instruction_flag (dest_glyph));
}
}
return_trace (true);
}
void drop_hints (const OT::glyf::accelerator_t& glyf)
{
if (source_glyph.length == 0) return;
unsigned int instruction_length = 0;
if (!glyf.get_instruction_length (source_glyph, &instruction_length))
{
DEBUG_MSG(SUBSET, nullptr, "Unable to read instruction length for new_gid %d", new_gid);
return ;
}
const GlyphHeader& header = StructAtOffset<GlyphHeader> (&source_glyph, 0);
int16_t num_contours = (int16_t) header.numberOfContours;
DEBUG_MSG(SUBSET, nullptr, "new_gid %d (%d contours) drop %d instruction bytes from %d byte source glyph", new_gid, num_contours, instruction_length, source_glyph.length);
if (num_contours < 0)
{
// composite, just chop instructions off the end
dest_start = hb_bytes_t (&source_glyph, source_glyph.length - instruction_length);
}
else
{
// simple glyph
dest_start = hb_bytes_t (&source_glyph, GlyphHeader::static_size + 2 * header.numberOfContours + 2);
dest_end = hb_bytes_t (&source_glyph + dest_start.length + instruction_length,
source_glyph.length - dest_start.length - instruction_length);
DEBUG_MSG(SUBSET, nullptr, "source_len %d start len %d instruction_len %d end len %d", source_glyph.length, dest_start.length, instruction_length, dest_end.length);
}
}
unsigned int length () const
{
return dest_start.length + dest_end.length;
}
// pad to 2 to ensure 2-byte loca will be ok
unsigned int padding () const
{
return length () % 2;
}
unsigned int padded_size () const
{
return length () + padding ();
}
};
protected: protected:
UnsizedArrayOf<HBUINT8> dataZ; /* Glyphs data. */ UnsizedArrayOf<HBUINT8> dataZ; /* Glyphs data. */
public: public:
DEFINE_SIZE_MIN (0); /* In reality, this is UNBOUNDED() type; but since we always DEFINE_SIZE_MIN (0); /* In reality, this is UNBOUNDED() type; but since we always
* check the size externally, allow Null() object of it by * check the size externally, allow Null() object of it by
* defining it MIN() instead. */ * defining it _MIN instead. */
}; };
struct glyf_accelerator_t : glyf::accelerator_t {}; struct glyf_accelerator_t : glyf::accelerator_t {};

View File

@@ -41,71 +41,31 @@ namespace OT {
struct DeviceRecord struct DeviceRecord
{ {
struct SubsetView static unsigned int get_size (unsigned count)
{
const DeviceRecord *source_device_record;
unsigned int sizeDeviceRecord;
hb_subset_plan_t *subset_plan;
void init (const DeviceRecord *source_device_record,
unsigned int sizeDeviceRecord,
hb_subset_plan_t *subset_plan)
{
this->source_device_record = source_device_record;
this->sizeDeviceRecord = sizeDeviceRecord;
this->subset_plan = subset_plan;
}
unsigned int len () const
{ return this->subset_plan->num_output_glyphs (); }
const HBUINT8* operator [] (unsigned int new_gid) const
{
if (unlikely (new_gid >= len ())) return nullptr;
hb_codepoint_t old_gid;
if (!this->subset_plan->old_gid_for_new_gid (new_gid, &old_gid))
return &Null(HBUINT8);
if (old_gid >= sizeDeviceRecord - DeviceRecord::min_size)
return nullptr;
return &(this->source_device_record->widthsZ[old_gid]);
}
};
static unsigned int get_size (unsigned int count)
{ return hb_ceil_to_4 (min_size + count * HBUINT8::static_size); } { return hb_ceil_to_4 (min_size + count * HBUINT8::static_size); }
bool serialize (hb_serialize_context_t *c, const SubsetView &subset_view) template<typename Iterator,
hb_requires (hb_is_iterator (Iterator))>
bool serialize (hb_serialize_context_t *c, unsigned pixelSize, Iterator it)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
unsigned int size = get_size (subset_view.len ()); unsigned length = it.len ();
if (unlikely (!c->allocate_size<DeviceRecord> (size)))
{
DEBUG_MSG(SUBSET, nullptr, "Couldn't allocate enough space for DeviceRecord: %d.",
size);
return_trace (false);
}
this->pixelSize.set (subset_view.source_device_record->pixelSize); if (unlikely (!c->extend (*this, length))) return_trace (false);
this->maxWidth.set (subset_view.source_device_record->maxWidth);
for (unsigned int i = 0; i < subset_view.len (); i++) this->pixelSize = pixelSize;
{ this->maxWidth =
const HBUINT8 *width = subset_view[i]; + it
if (!width) | hb_reduce (hb_max, 0u);
{
DEBUG_MSG(SUBSET, nullptr, "HDMX width for new gid %d is missing.", i); + it
return_trace (false); | hb_sink (widthsZ.as_array (length));
}
widthsZ[i].set (*width);
}
return_trace (true); return_trace (true);
} }
bool sanitize (hb_sanitize_context_t *c, unsigned int sizeDeviceRecord) const bool sanitize (hb_sanitize_context_t *c, unsigned sizeDeviceRecord) const
{ {
TRACE_SANITIZE (this); TRACE_SANITIZE (this);
return_trace (likely (c->check_struct (this) && return_trace (likely (c->check_struct (this) &&
@@ -135,62 +95,63 @@ struct hdmx
return StructAtOffset<DeviceRecord> (&this->firstDeviceRecord, i * sizeDeviceRecord); return StructAtOffset<DeviceRecord> (&this->firstDeviceRecord, i * sizeDeviceRecord);
} }
bool serialize (hb_serialize_context_t *c, const hdmx *source_hdmx, hb_subset_plan_t *plan) template<typename Iterator,
hb_requires (hb_is_iterator (Iterator))>
bool serialize (hb_serialize_context_t *c, unsigned version, Iterator it)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min ((*this)))) return_trace (false); if (unlikely (!c->extend_min ((*this)))) return_trace (false);
this->version.set (source_hdmx->version); this->version = version;
this->numRecords.set (source_hdmx->numRecords); this->numRecords = it.len ();
this->sizeDeviceRecord.set (DeviceRecord::get_size (plan->num_output_glyphs ())); this->sizeDeviceRecord = DeviceRecord::get_size (it ? (*it).second.len () : 0);
for (unsigned int i = 0; i < source_hdmx->numRecords; i++) + it
{ | hb_apply ([c] (const hb_item_type<Iterator>& _) {
DeviceRecord::SubsetView subset_view; c->start_embed<DeviceRecord> ()->serialize (c, _.first, _.second);
subset_view.init (&(*source_hdmx)[i], source_hdmx->sizeDeviceRecord, plan); })
;
if (!c->start_embed<DeviceRecord> ()->serialize (c, subset_view)) return_trace (c->successful);
return_trace (false);
} }
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
hdmx *hdmx_prime = c->serializer->start_embed <hdmx> ();
if (unlikely (!hdmx_prime)) return_trace (false);
auto it =
+ hb_range ((unsigned) numRecords)
| hb_map ([c, this] (unsigned _)
{
const DeviceRecord *device_record =
&StructAtOffset<DeviceRecord> (&firstDeviceRecord,
_ * sizeDeviceRecord);
auto row =
+ hb_range (c->plan->num_output_glyphs ())
| hb_map (c->plan->reverse_glyph_map)
| hb_map ([=] (hb_codepoint_t _)
{
if (c->plan->is_empty_glyph (_))
return Null(HBUINT8);
return device_record->widthsZ.as_array (get_num_glyphs ()) [_];
})
;
return hb_pair ((unsigned) device_record->pixelSize, +row);
})
;
hdmx_prime->serialize (c->serializer, version, it);
return_trace (true); return_trace (true);
} }
static size_t get_subsetted_size (const hdmx *source_hdmx, hb_subset_plan_t *plan) unsigned get_num_glyphs () const
{ {
return min_size + source_hdmx->numRecords * DeviceRecord::get_size (plan->num_output_glyphs ()); return sizeDeviceRecord - DeviceRecord::min_size;
}
bool subset (hb_subset_plan_t *plan) const
{
size_t dest_size = get_subsetted_size (this, plan);
hdmx *dest = (hdmx *) malloc (dest_size);
if (unlikely (!dest))
{
DEBUG_MSG(SUBSET, nullptr, "Unable to alloc %lu for hdmx subset output.", (unsigned long) dest_size);
return false;
}
hb_serialize_context_t c (dest, dest_size);
hdmx *hdmx_prime = c.start_serialize<hdmx> ();
if (!hdmx_prime || !hdmx_prime->serialize (&c, this, plan))
{
free (dest);
DEBUG_MSG(SUBSET, nullptr, "Failed to serialize write new hdmx.");
return false;
}
c.end_serialize ();
hb_blob_t *hdmx_prime_blob = hb_blob_create ((const char *) dest,
dest_size,
HB_MEMORY_MODE_READONLY,
dest,
free);
bool result = plan->add_table (HB_OT_TAG_hdmx, hdmx_prime_blob);
hb_blob_destroy (hdmx_prime_blob);
return result;
} }
bool sanitize (hb_sanitize_context_t *c) const bool sanitize (hb_sanitize_context_t *c) const

View File

@@ -78,7 +78,7 @@ struct hmtxvmtx
unsigned int length; unsigned int length;
H *table = (H *) hb_blob_get_data (dest_blob, &length); H *table = (H *) hb_blob_get_data (dest_blob, &length);
table->numberOfLongMetrics.set (num_hmetrics); table->numberOfLongMetrics = num_hmetrics;
bool result = plan->add_table (H::tableTag, dest_blob); bool result = plan->add_table (H::tableTag, dest_blob);
hb_blob_destroy (dest_blob); hb_blob_destroy (dest_blob);
@@ -128,12 +128,12 @@ struct hmtxvmtx
bool has_advance = i < num_advances; bool has_advance = i < num_advances;
if (has_advance) if (has_advance)
{ {
((LongMetric *) dest_pos)->advance.set (advance); ((LongMetric *) dest_pos)->advance = advance;
((LongMetric *) dest_pos)->sb.set (side_bearing); ((LongMetric *) dest_pos)->sb = side_bearing;
} }
else else
{ {
((FWORD *) dest_pos)->set (side_bearing); *((FWORD *) dest_pos) = side_bearing;
} }
dest_pos += (has_advance ? 4 : 2); dest_pos += (has_advance ? 4 : 2);
} }
@@ -240,7 +240,7 @@ struct hmtxvmtx
return default_advance; return default_advance;
} }
return table->longMetricZ[MIN (glyph, (uint32_t) num_advances - 1)].advance; return table->longMetricZ[hb_min (glyph, (uint32_t) num_advances - 1)].advance;
} }
unsigned int get_advance (hb_codepoint_t glyph, unsigned int get_advance (hb_codepoint_t glyph,

View File

@@ -121,16 +121,20 @@ struct KernSubTable
} }
} }
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
unsigned int subtable_type = get_type (); unsigned int subtable_type = get_type ();
TRACE_DISPATCH (this, subtable_type); TRACE_DISPATCH (this, subtable_type);
switch (subtable_type) { switch (subtable_type) {
case 0: return_trace (c->dispatch (u.format0)); case 0: return_trace (c->dispatch (u.format0));
case 1: return_trace (u.header.apple ? c->dispatch (u.format1) : c->default_return_value ()); #ifndef HB_NO_SHAPE_AAT
case 1: return_trace (u.header.apple ? c->dispatch (u.format1, hb_forward<Ts> (ds)...) : c->default_return_value ());
#endif
case 2: return_trace (c->dispatch (u.format2)); case 2: return_trace (c->dispatch (u.format2));
case 3: return_trace (u.header.apple ? c->dispatch (u.format3) : c->default_return_value ()); #ifndef HB_NO_SHAPE_AAT
case 3: return_trace (u.header.apple ? c->dispatch (u.format3, hb_forward<Ts> (ds)...) : c->default_return_value ());
#endif
default: return_trace (c->default_return_value ()); default: return_trace (c->default_return_value ());
} }
} }
@@ -278,7 +282,9 @@ struct kern
{ {
switch (get_type ()) { switch (get_type ()) {
case 0: return u.ot.has_state_machine (); case 0: return u.ot.has_state_machine ();
#ifndef HB_NO_SHAPE_AAT
case 1: return u.aat.has_state_machine (); case 1: return u.aat.has_state_machine ();
#endif
default:return false; default:return false;
} }
} }
@@ -287,7 +293,9 @@ struct kern
{ {
switch (get_type ()) { switch (get_type ()) {
case 0: return u.ot.has_cross_stream (); case 0: return u.ot.has_cross_stream ();
#ifndef HB_NO_SHAPE_AAT
case 1: return u.aat.has_cross_stream (); case 1: return u.aat.has_cross_stream ();
#endif
default:return false; default:return false;
} }
} }
@@ -296,7 +304,9 @@ struct kern
{ {
switch (get_type ()) { switch (get_type ()) {
case 0: return u.ot.get_h_kerning (left, right); case 0: return u.ot.get_h_kerning (left, right);
#ifndef HB_NO_SHAPE_AAT
case 1: return u.aat.get_h_kerning (left, right); case 1: return u.aat.get_h_kerning (left, right);
#endif
default:return 0; default:return 0;
} }
} }
@@ -304,14 +314,16 @@ struct kern
bool apply (AAT::hb_aat_apply_context_t *c) const bool apply (AAT::hb_aat_apply_context_t *c) const
{ return dispatch (c); } { return dispatch (c); }
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
unsigned int subtable_type = get_type (); unsigned int subtable_type = get_type ();
TRACE_DISPATCH (this, subtable_type); TRACE_DISPATCH (this, subtable_type);
switch (subtable_type) { switch (subtable_type) {
case 0: return_trace (c->dispatch (u.ot)); case 0: return_trace (c->dispatch (u.ot, hb_forward<Ts> (ds)...));
case 1: return_trace (c->dispatch (u.aat)); #ifndef HB_NO_SHAPE_AAT
case 1: return_trace (c->dispatch (u.aat, hb_forward<Ts> (ds)...));
#endif
default: return_trace (c->default_return_value ()); default: return_trace (c->default_return_value ());
} }
} }
@@ -328,7 +340,9 @@ struct kern
HBUINT32 version32; HBUINT32 version32;
HBUINT16 major; HBUINT16 major;
KernOT ot; KernOT ot;
#ifndef HB_NO_SHAPE_AAT
KernAAT aat; KernAAT aat;
#endif
} u; } u;
public: public:
DEFINE_SIZE_UNION (4, version32); DEFINE_SIZE_UNION (4, version32);

View File

@@ -153,7 +153,7 @@ struct BaseCoord
struct FeatMinMaxRecord struct FeatMinMaxRecord
{ {
static int cmp (const void *key_, const void *entry_) HB_INTERNAL static int cmp (const void *key_, const void *entry_)
{ {
hb_tag_t key = * (hb_tag_t *) key_; hb_tag_t key = * (hb_tag_t *) key_;
const FeatMinMaxRecord &entry = * (const FeatMinMaxRecord *) entry_; const FeatMinMaxRecord &entry = * (const FeatMinMaxRecord *) entry_;
@@ -271,7 +271,7 @@ struct BaseValues
struct BaseLangSysRecord struct BaseLangSysRecord
{ {
static int cmp (const void *key_, const void *entry_) HB_INTERNAL static int cmp (const void *key_, const void *entry_)
{ {
hb_tag_t key = * (hb_tag_t *) key_; hb_tag_t key = * (hb_tag_t *) key_;
const BaseLangSysRecord &entry = * (const BaseLangSysRecord *) entry_; const BaseLangSysRecord &entry = * (const BaseLangSysRecord *) entry_;
@@ -345,7 +345,7 @@ struct BaseScript
struct BaseScriptList; struct BaseScriptList;
struct BaseScriptRecord struct BaseScriptRecord
{ {
static int cmp (const void *key_, const void *entry_) HB_INTERNAL static int cmp (const void *key_, const void *entry_)
{ {
hb_tag_t key = * (hb_tag_t *) key_; hb_tag_t key = * (hb_tag_t *) key_;
const BaseScriptRecord &entry = * (const BaseScriptRecord *) entry_; const BaseScriptRecord &entry = * (const BaseScriptRecord *) entry_;

View File

@@ -66,7 +66,6 @@ namespace OT {
#define NOT_COVERED ((unsigned int) -1) #define NOT_COVERED ((unsigned int) -1)
/* /*
* *
* OpenType Layout Common Table Formats * OpenType Layout Common Table Formats
@@ -139,11 +138,11 @@ struct RecordListOf : RecordArrayOf<Type>
bool subset (hb_subset_context_t *c) const bool subset (hb_subset_context_t *c) const
{ {
TRACE_SUBSET (this); TRACE_SUBSET (this);
struct RecordListOf<Type> *out = c->serializer->embed (*this); auto *out = c->serializer->embed (*this);
if (unlikely (!out)) return_trace (false); if (unlikely (!out)) return_trace (false);
unsigned int count = this->len; unsigned int count = this->len;
for (unsigned int i = 0; i < count; i++) for (unsigned int i = 0; i < count; i++)
out->get_offset (i).serialize_subset (c, (*this)[i], out); out->get_offset (i).serialize_subset (c, this->get_offset (i), this, out);
return_trace (true); return_trace (true);
} }
@@ -227,13 +226,13 @@ struct LangSys
{ {
if (reqFeatureIndex == 0xFFFFu) if (reqFeatureIndex == 0xFFFFu)
return Index::NOT_FOUND_INDEX; return Index::NOT_FOUND_INDEX;
return reqFeatureIndex;; return reqFeatureIndex;
} }
bool subset (hb_subset_context_t *c) const LangSys* copy (hb_serialize_context_t *c) const
{ {
TRACE_SUBSET (this); TRACE_SERIALIZE (this);
return_trace (c->serializer->embed (*this)); return_trace (c->embed (*this));
} }
bool sanitize (hb_sanitize_context_t *c, bool sanitize (hb_sanitize_context_t *c,
@@ -278,12 +277,12 @@ struct Script
bool subset (hb_subset_context_t *c) const bool subset (hb_subset_context_t *c) const
{ {
TRACE_SUBSET (this); TRACE_SUBSET (this);
struct Script *out = c->serializer->embed (*this); auto *out = c->serializer->embed (*this);
if (unlikely (!out)) return_trace (false); if (unlikely (!out)) return_trace (false);
out->defaultLangSys.serialize_subset (c, this+defaultLangSys, out); out->defaultLangSys.serialize_copy (c->serializer, defaultLangSys, this, out);
unsigned int count = langSys.len; unsigned int count = langSys.len;
for (unsigned int i = 0; i < count; i++) for (unsigned int i = 0; i < count; i++)
out->langSys.arrayZ[i].offset.serialize_subset (c, this+langSys[i].offset, out); out->langSys.arrayZ[i].offset.serialize_copy (c->serializer, langSys[i].offset, this, out);
return_trace (true); return_trace (true);
} }
@@ -560,9 +559,9 @@ struct Feature
bool subset (hb_subset_context_t *c) const bool subset (hb_subset_context_t *c) const
{ {
TRACE_SUBSET (this); TRACE_SUBSET (this);
struct Feature *out = c->serializer->embed (*this); auto *out = c->serializer->embed (*this);
if (unlikely (!out)) return_trace (false); if (unlikely (!out)) return_trace (false);
out->featureParams.set (0); /* TODO(subset) FeatureParams. */ out->featureParams = 0; /* TODO(subset) FeatureParams. */
return_trace (true); return_trace (true);
} }
@@ -584,25 +583,25 @@ struct Feature
* Adobe tools, only the 'size' feature had FeatureParams defined. * Adobe tools, only the 'size' feature had FeatureParams defined.
*/ */
OffsetTo<FeatureParams> orig_offset = featureParams; if (likely (featureParams.is_null ()))
return_trace (true);
unsigned int orig_offset = featureParams;
if (unlikely (!featureParams.sanitize (c, this, closure ? closure->tag : HB_TAG_NONE))) if (unlikely (!featureParams.sanitize (c, this, closure ? closure->tag : HB_TAG_NONE)))
return_trace (false); return_trace (false);
if (likely (orig_offset.is_null ()))
return_trace (true);
if (featureParams == 0 && closure && if (featureParams == 0 && closure &&
closure->tag == HB_TAG ('s','i','z','e') && closure->tag == HB_TAG ('s','i','z','e') &&
closure->list_base && closure->list_base < this) closure->list_base && closure->list_base < this)
{ {
unsigned int new_offset_int = (unsigned int) orig_offset - unsigned int new_offset_int = orig_offset -
(((char *) this) - ((char *) closure->list_base)); (((char *) this) - ((char *) closure->list_base));
OffsetTo<FeatureParams> new_offset; OffsetTo<FeatureParams> new_offset;
/* Check that it did not overflow. */ /* Check that it would not overflow. */
new_offset.set (new_offset_int); new_offset = new_offset_int;
if (new_offset == new_offset_int && if (new_offset == new_offset_int &&
c->try_set (&featureParams, new_offset) && c->try_set (&featureParams, new_offset_int) &&
!featureParams.sanitize (c, this, closure ? closure->tag : HB_TAG_NONE)) !featureParams.sanitize (c, this, closure ? closure->tag : HB_TAG_NONE))
return_trace (false); return_trace (false);
} }
@@ -648,10 +647,6 @@ struct Lookup
{ {
unsigned int get_subtable_count () const { return subTable.len; } unsigned int get_subtable_count () const { return subTable.len; }
template <typename TSubTable>
const TSubTable& get_subtable (unsigned int i) const
{ return this+CastR<OffsetArrayOf<TSubTable> > (subTable)[i]; }
template <typename TSubTable> template <typename TSubTable>
const OffsetArrayOf<TSubTable>& get_subtables () const const OffsetArrayOf<TSubTable>& get_subtables () const
{ return CastR<OffsetArrayOf<TSubTable>> (subTable); } { return CastR<OffsetArrayOf<TSubTable>> (subTable); }
@@ -659,6 +654,13 @@ struct Lookup
OffsetArrayOf<TSubTable>& get_subtables () OffsetArrayOf<TSubTable>& get_subtables ()
{ return CastR<OffsetArrayOf<TSubTable>> (subTable); } { return CastR<OffsetArrayOf<TSubTable>> (subTable); }
template <typename TSubTable>
const TSubTable& get_subtable (unsigned int i) const
{ return this+get_subtables<TSubTable> ()[i]; }
template <typename TSubTable>
TSubTable& get_subtable (unsigned int i)
{ return this+get_subtables<TSubTable> ()[i]; }
unsigned int get_size () const unsigned int get_size () const
{ {
const HBUINT16 &markFilteringSet = StructAfter<const HBUINT16> (subTable); const HBUINT16 &markFilteringSet = StructAfter<const HBUINT16> (subTable);
@@ -683,14 +685,14 @@ struct Lookup
return flag; return flag;
} }
template <typename TSubTable, typename context_t> template <typename TSubTable, typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
unsigned int lookup_type = get_type (); unsigned int lookup_type = get_type ();
TRACE_DISPATCH (this, lookup_type); TRACE_DISPATCH (this, lookup_type);
unsigned int count = get_subtable_count (); unsigned int count = get_subtable_count ();
for (unsigned int i = 0; i < count; i++) { for (unsigned int i = 0; i < count; i++) {
typename context_t::return_t r = get_subtable<TSubTable> (i).dispatch (c, lookup_type); typename context_t::return_t r = get_subtable<TSubTable> (i).dispatch (c, lookup_type, hb_forward<Ts> (ds)...);
if (c->stop_sublookup_iteration (r)) if (c->stop_sublookup_iteration (r))
return_trace (r); return_trace (r);
} }
@@ -704,40 +706,23 @@ struct Lookup
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
lookupType.set (lookup_type); lookupType = lookup_type;
lookupFlag.set (lookup_props & 0xFFFFu); lookupFlag = lookup_props & 0xFFFFu;
if (unlikely (!subTable.serialize (c, num_subtables))) return_trace (false); if (unlikely (!subTable.serialize (c, num_subtables))) return_trace (false);
if (lookupFlag & LookupFlag::UseMarkFilteringSet) if (lookupFlag & LookupFlag::UseMarkFilteringSet)
{ {
if (unlikely (!c->extend (*this))) return_trace (false); if (unlikely (!c->extend (*this))) return_trace (false);
HBUINT16 &markFilteringSet = StructAfter<HBUINT16> (subTable); HBUINT16 &markFilteringSet = StructAfter<HBUINT16> (subTable);
markFilteringSet.set (lookup_props >> 16); markFilteringSet = lookup_props >> 16;
} }
return_trace (true); return_trace (true);
} }
/* Older compilers need this to NOT be locally defined in a function. */
template <typename TSubTable>
struct SubTableSubsetWrapper
{
SubTableSubsetWrapper (const TSubTable &subtable_,
unsigned int lookup_type_) :
subtable (subtable_),
lookup_type (lookup_type_) {}
bool subset (hb_subset_context_t *c) const
{ return subtable.dispatch (c, lookup_type); }
private:
const TSubTable &subtable;
unsigned int lookup_type;
};
template <typename TSubTable> template <typename TSubTable>
bool subset (hb_subset_context_t *c) const bool subset (hb_subset_context_t *c) const
{ {
TRACE_SUBSET (this); TRACE_SUBSET (this);
struct Lookup *out = c->serializer->embed (*this); auto *out = c->serializer->embed (*this);
if (unlikely (!out)) return_trace (false); if (unlikely (!out)) return_trace (false);
/* Subset the actual subtables. */ /* Subset the actual subtables. */
@@ -747,23 +732,11 @@ struct Lookup
OffsetArrayOf<TSubTable>& out_subtables = out->get_subtables<TSubTable> (); OffsetArrayOf<TSubTable>& out_subtables = out->get_subtables<TSubTable> ();
unsigned int count = subTable.len; unsigned int count = subTable.len;
for (unsigned int i = 0; i < count; i++) for (unsigned int i = 0; i < count; i++)
{ out_subtables[i].serialize_subset (c, subtables[i], this, out, get_type ());
SubTableSubsetWrapper<TSubTable> wrapper (this+subtables[i], get_type ());
out_subtables[i].serialize_subset (c, wrapper, out);
}
return_trace (true); return_trace (true);
} }
/* Older compilers need this to NOT be locally defined in a function. */
template <typename TSubTable>
struct SubTableSanitizeWrapper : TSubTable
{
bool sanitize (hb_sanitize_context_t *c, unsigned int lookup_type) const
{ return this->dispatch (c, lookup_type); }
};
template <typename TSubTable> template <typename TSubTable>
bool sanitize (hb_sanitize_context_t *c) const bool sanitize (hb_sanitize_context_t *c) const
{ {
@@ -775,16 +748,21 @@ struct Lookup
if (!markFilteringSet.sanitize (c)) return_trace (false); if (!markFilteringSet.sanitize (c)) return_trace (false);
} }
if (unlikely (!CastR<OffsetArrayOf<SubTableSanitizeWrapper<TSubTable> > > (subTable) if (unlikely (!get_subtables<TSubTable> ().sanitize (c, this, get_type ())))
.sanitize (c, this, get_type ())))
return_trace (false); return_trace (false);
if (unlikely (get_type () == TSubTable::Extension)) if (unlikely (get_type () == TSubTable::Extension && !c->get_edit_count ()))
{ {
/* The spec says all subtables of an Extension lookup should /* The spec says all subtables of an Extension lookup should
* have the same type, which shall not be the Extension type * have the same type, which shall not be the Extension type
* itself (but we already checked for that). * itself (but we already checked for that).
* This is specially important if one has a reverse type! */ * This is specially important if one has a reverse type!
*
* We only do this if sanitizer edit_count is zero. Otherwise,
* some of the subtables might have become insane after they
* were sanity-checked by the edits of subsequent subtables.
* https://bugs.chromium.org/p/chromium/issues/detail?id=960331
*/
unsigned int type = get_subtable<TSubTable> (0).u.extension.get_type (); unsigned int type = get_subtable<TSubTable> (0).u.extension.get_type ();
unsigned int count = get_subtable_count (); unsigned int count = get_subtable_count ();
for (unsigned int i = 1; i < count; i++) for (unsigned int i = 1; i < count; i++)
@@ -792,7 +770,6 @@ struct Lookup
return_trace (false); return_trace (false);
} }
return_trace (true); return_trace (true);
return_trace (true);
} }
private: private:
@@ -826,8 +803,9 @@ struct CoverageFormat1
return i; return i;
} }
bool serialize (hb_serialize_context_t *c, template <typename Iterator,
hb_array_t<const GlyphID> glyphs) hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c, Iterator glyphs)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
return_trace (glyphArray.serialize (c, glyphs)); return_trace (glyphArray.serialize (c, glyphs));
@@ -853,19 +831,19 @@ struct CoverageFormat1
template <typename set_t> template <typename set_t>
bool add_coverage (set_t *glyphs) const bool add_coverage (set_t *glyphs) const
{ { return glyphs->add_sorted_array (glyphArray.arrayZ, glyphArray.len); }
return glyphs->add_sorted_array (glyphArray.arrayZ, glyphArray.len);
}
public: public:
/* Older compilers need this to be public. */ /* Older compilers need this to be public. */
struct Iter { struct iter_t
{
void init (const struct CoverageFormat1 &c_) { c = &c_; i = 0; } void init (const struct CoverageFormat1 &c_) { c = &c_; i = 0; }
void fini () {} void fini () {}
bool more () { return i < c->glyphArray.len; } bool more () const { return i < c->glyphArray.len; }
void next () { i++; } void next () { i++; }
hb_codepoint_t get_glyph () { return c->glyphArray[i]; } hb_codepoint_t get_glyph () const { return c->glyphArray[i]; }
unsigned int get_coverage () { return i; } bool operator != (const iter_t& o) const
{ return i != o.i || c != o.c; }
private: private:
const struct CoverageFormat1 *c; const struct CoverageFormat1 *c;
@@ -894,38 +872,48 @@ struct CoverageFormat2
NOT_COVERED; NOT_COVERED;
} }
bool serialize (hb_serialize_context_t *c, template <typename Iterator,
hb_array_t<const GlyphID> glyphs) hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c, Iterator glyphs)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
if (unlikely (!glyphs.length)) if (unlikely (!glyphs))
{ {
rangeRecord.len.set (0); rangeRecord.len = 0;
return_trace (true); return_trace (true);
} }
unsigned int num_ranges = 1; /* TODO(iter) Write more efficiently? */
for (unsigned int i = 1; i < glyphs.length; i++)
if (glyphs[i - 1] + 1 != glyphs[i])
num_ranges++;
rangeRecord.len.set (num_ranges);
if (unlikely (!c->extend (rangeRecord))) return_trace (false);
unsigned int range = 0; unsigned num_ranges = 0;
rangeRecord[range].start = glyphs[0]; hb_codepoint_t last = (hb_codepoint_t) -2;
rangeRecord[range].value.set (0); for (auto g: glyphs)
for (unsigned int i = 1; i < glyphs.length; i++)
{ {
if (glyphs[i - 1] + 1 != glyphs[i]) if (last + 1 != g)
num_ranges++;
last = g;
}
if (unlikely (!rangeRecord.serialize (c, num_ranges))) return_trace (false);
unsigned count = 0;
unsigned range = (unsigned) -1;
last = (hb_codepoint_t) -2;
for (auto g: glyphs)
{
if (last + 1 != g)
{ {
range++; range++;
rangeRecord[range].start = glyphs[i]; rangeRecord[range].start = g;
rangeRecord[range].value.set (i); rangeRecord[range].value = count;
} }
rangeRecord[range].end = glyphs[i]; rangeRecord[range].end = g;
last = g;
count++;
} }
return_trace (true); return_trace (true);
} }
@@ -972,7 +960,7 @@ struct CoverageFormat2
public: public:
/* Older compilers need this to be public. */ /* Older compilers need this to be public. */
struct Iter struct iter_t
{ {
void init (const CoverageFormat2 &c_) void init (const CoverageFormat2 &c_)
{ {
@@ -987,7 +975,7 @@ struct CoverageFormat2
} }
} }
void fini () {} void fini () {}
bool more () { return i < c->rangeRecord.len; } bool more () const { return i < c->rangeRecord.len; }
void next () void next ()
{ {
if (j >= c->rangeRecord[i].end) if (j >= c->rangeRecord[i].end)
@@ -995,23 +983,27 @@ struct CoverageFormat2
i++; i++;
if (more ()) if (more ())
{ {
hb_codepoint_t old = j; unsigned int old = coverage;
j = c->rangeRecord[i].start; j = c->rangeRecord[i].start;
if (unlikely (j <= old)) coverage = c->rangeRecord[i].value;
if (unlikely (coverage != old + 1))
{ {
/* Broken table. Skip. Important to avoid DoS. */ /* Broken table. Skip. Important to avoid DoS.
* Also, our callers depend on coverage being
* consecutive and monotonically increasing,
* ie. iota(). */
i = c->rangeRecord.len; i = c->rangeRecord.len;
return; return;
} }
coverage = c->rangeRecord[i].value;
} }
return; return;
} }
coverage++; coverage++;
j++; j++;
} }
hb_codepoint_t get_glyph () { return j; } hb_codepoint_t get_glyph () const { return j; }
unsigned int get_coverage () { return coverage; } bool operator != (const iter_t& o) const
{ return i != o.i || j != o.j || c != o.c; }
private: private:
const struct CoverageFormat2 *c; const struct CoverageFormat2 *c;
@@ -1032,6 +1024,15 @@ struct CoverageFormat2
struct Coverage struct Coverage
{ {
/* Has interface. */
static constexpr unsigned SENTINEL = NOT_COVERED;
typedef unsigned int value_t;
value_t operator [] (hb_codepoint_t k) const { return get (k); }
bool has (hb_codepoint_t k) const { return (*this)[k] != SENTINEL; }
/* Predicate. */
bool operator () (hb_codepoint_t k) const { return has (k); }
unsigned int get (hb_codepoint_t k) const { return get_coverage (k); }
unsigned int get_coverage (hb_codepoint_t glyph_id) const unsigned int get_coverage (hb_codepoint_t glyph_id) const
{ {
switch (u.format) { switch (u.format) {
@@ -1041,17 +1042,24 @@ struct Coverage
} }
} }
bool serialize (hb_serialize_context_t *c, template <typename Iterator,
hb_array_t<const GlyphID> glyphs) hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c, Iterator glyphs)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
unsigned int num_ranges = 1; unsigned count = 0;
for (unsigned int i = 1; i < glyphs.length; i++) unsigned num_ranges = 0;
if (glyphs[i - 1] + 1 != glyphs[i]) hb_codepoint_t last = (hb_codepoint_t) -2;
for (auto g: glyphs)
{
if (last + 1 != g)
num_ranges++; num_ranges++;
u.format.set (glyphs.length * 2 < num_ranges * 3 ? 1 : 2); last = g;
count++;
}
u.format = count * 2 < num_ranges * 3 ? 1 : 2;
switch (u.format) switch (u.format)
{ {
@@ -1105,9 +1113,10 @@ struct Coverage
} }
} }
struct Iter struct iter_t : hb_iter_with_fallback_t<iter_t, hb_codepoint_t>
{ {
Iter (const Coverage &c_) static constexpr bool is_sorted_iterator = true;
iter_t (const Coverage &c_ = Null(Coverage))
{ {
memset (this, 0, sizeof (*this)); memset (this, 0, sizeof (*this));
format = c_.u.format; format = c_.u.format;
@@ -1118,7 +1127,7 @@ struct Coverage
default: return; default: return;
} }
} }
bool more () bool __more__ () const
{ {
switch (format) switch (format)
{ {
@@ -1127,7 +1136,7 @@ struct Coverage
default:return false; default:return false;
} }
} }
void next () void __next__ ()
{ {
switch (format) switch (format)
{ {
@@ -1136,7 +1145,10 @@ struct Coverage
default: break; default: break;
} }
} }
hb_codepoint_t get_glyph () typedef hb_codepoint_t __item_t__;
__item_t__ __item__ () const { return get_glyph (); }
hb_codepoint_t get_glyph () const
{ {
switch (format) switch (format)
{ {
@@ -1145,23 +1157,25 @@ struct Coverage
default:return 0; default:return 0;
} }
} }
unsigned int get_coverage () bool operator != (const iter_t& o) const
{ {
if (format != o.format) return true;
switch (format) switch (format)
{ {
case 1: return u.format1.get_coverage (); case 1: return u.format1 != o.u.format1;
case 2: return u.format2.get_coverage (); case 2: return u.format2 != o.u.format2;
default:return -1; default:return false;
} }
} }
private: private:
unsigned int format; unsigned int format;
union { union {
CoverageFormat2::Iter format2; /* Put this one first since it's larger; helps shut up compiler. */ CoverageFormat2::iter_t format2; /* Put this one first since it's larger; helps shut up compiler. */
CoverageFormat1::Iter format1; CoverageFormat1::iter_t format1;
} u; } u;
}; };
iter_t iter () const { return iter_t (*this); }
protected: protected:
union { union {
@@ -1193,24 +1207,24 @@ struct ClassDefFormat1
} }
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
hb_array_t<const HBUINT16> glyphs, hb_array_t<const GlyphID> glyphs,
hb_array_t<const HBUINT16> klasses) hb_array_t<const HBUINT16> klasses)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
if (unlikely (!glyphs.length)) if (unlikely (!glyphs))
{ {
startGlyph.set (0); startGlyph = 0;
classValue.len.set (0); classValue.len = 0;
return_trace (true); return_trace (true);
} }
hb_codepoint_t glyph_min = glyphs[0]; hb_codepoint_t glyph_min = +glyphs | hb_reduce (hb_min, 0xFFFFu);
hb_codepoint_t glyph_max = glyphs[glyphs.length - 1]; hb_codepoint_t glyph_max = +glyphs | hb_reduce (hb_max, 0u);
startGlyph.set (glyph_min); startGlyph = glyph_min;
classValue.len.set (glyph_max - glyph_min + 1); c->check_assign (classValue.len, glyph_max - glyph_min + 1);
if (unlikely (!c->extend (classValue))) return_trace (false); if (unlikely (!c->extend (classValue))) return_trace (false);
for (unsigned int i = 0; i < glyphs.length; i++) for (unsigned int i = 0; i < glyphs.length; i++)
@@ -1224,22 +1238,22 @@ struct ClassDefFormat1
TRACE_SUBSET (this); TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset (); const hb_set_t &glyphset = *c->plan->glyphset ();
const hb_map_t &glyph_map = *c->plan->glyph_map; const hb_map_t &glyph_map = *c->plan->glyph_map;
hb_vector_t<GlyphID> glyphs; hb_sorted_vector_t<GlyphID> glyphs;
hb_vector_t<HBUINT16> klasses; hb_vector_t<HBUINT16> klasses;
hb_codepoint_t start = startGlyph; hb_codepoint_t start = startGlyph;
hb_codepoint_t end = start + classValue.len; hb_codepoint_t end = start + classValue.len;
for (hb_codepoint_t g = start; g < end; g++) for (hb_codepoint_t g = start; g < end; g++)
{ {
if (!glyphset.has (g)) continue;
unsigned int value = classValue[g - start]; unsigned int value = classValue[g - start];
if (!value) continue; if (!value) continue;
if (!glyphset.has (g)) continue; glyphs.push(glyph_map[g]);
glyphs.push()->set (glyph_map[g]); klasses.push(value);
klasses.push()->set (value);
} }
c->serializer->propagate_error (glyphs, klasses); c->serializer->propagate_error (glyphs, klasses);
ClassDef_serialize (c->serializer, glyphs, klasses); ClassDef_serialize (c->serializer, glyphs, klasses);
return_trace (glyphs.length); return_trace ((bool) glyphs);
} }
bool sanitize (hb_sanitize_context_t *c) const bool sanitize (hb_sanitize_context_t *c) const
@@ -1329,40 +1343,42 @@ struct ClassDefFormat2
} }
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
hb_array_t<const HBUINT16> glyphs, hb_array_t<const GlyphID> glyphs,
hb_array_t<const HBUINT16> klasses) hb_array_t<const HBUINT16> klasses)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
if (unlikely (!glyphs.length)) if (unlikely (!glyphs))
{ {
rangeRecord.len.set (0); rangeRecord.len = 0;
return_trace (true); return_trace (true);
} }
unsigned int count = glyphs.len ();
unsigned int num_ranges = 1; unsigned int num_ranges = 1;
for (unsigned int i = 1; i < glyphs.length; i++) for (unsigned int i = 1; i < count; i++)
if (glyphs[i - 1] + 1 != glyphs[i] || if (glyphs[i - 1] + 1 != glyphs[i] ||
klasses[i - 1] != klasses[i]) klasses[i - 1] != klasses[i])
num_ranges++; num_ranges++;
rangeRecord.len.set (num_ranges); rangeRecord.len = num_ranges;
if (unlikely (!c->extend (rangeRecord))) return_trace (false); if (unlikely (!c->extend (rangeRecord))) return_trace (false);
unsigned int range = 0; unsigned int range = 0;
rangeRecord[range].start = glyphs[0]; rangeRecord[range].start = glyphs[0];
rangeRecord[range].value.set (klasses[0]); rangeRecord[range].value = klasses[0];
for (unsigned int i = 1; i < glyphs.length; i++) for (unsigned int i = 1; i < count; i++)
{ {
if (glyphs[i - 1] + 1 != glyphs[i] || if (glyphs[i - 1] + 1 != glyphs[i] ||
klasses[i - 1] != klasses[i]) klasses[i - 1] != klasses[i])
{ {
rangeRecord[range].end = glyphs[i - 1];
range++; range++;
rangeRecord[range].start = glyphs[i]; rangeRecord[range].start = glyphs[i];
rangeRecord[range].value = klasses[i]; rangeRecord[range].value = klasses[i];
} }
rangeRecord[range].end = glyphs[i];
} }
rangeRecord[range].end = glyphs[count - 1];
return_trace (true); return_trace (true);
} }
@@ -1384,13 +1400,13 @@ struct ClassDefFormat2
for (hb_codepoint_t g = start; g < end; g++) for (hb_codepoint_t g = start; g < end; g++)
{ {
if (!glyphset.has (g)) continue; if (!glyphset.has (g)) continue;
glyphs.push ()->set (glyph_map[g]); glyphs.push (glyph_map[g]);
klasses.push ()->set (value); klasses.push (value);
} }
} }
c->serializer->propagate_error (glyphs, klasses); c->serializer->propagate_error (glyphs, klasses);
ClassDef_serialize (c->serializer, glyphs, klasses); ClassDef_serialize (c->serializer, glyphs, klasses);
return_trace (glyphs.length); return_trace ((bool) glyphs);
} }
bool sanitize (hb_sanitize_context_t *c) const bool sanitize (hb_sanitize_context_t *c) const
@@ -1468,6 +1484,15 @@ struct ClassDefFormat2
struct ClassDef struct ClassDef
{ {
/* Has interface. */
static constexpr unsigned SENTINEL = 0;
typedef unsigned int value_t;
value_t operator [] (hb_codepoint_t k) const { return get (k); }
bool has (hb_codepoint_t k) const { return (*this)[k] != SENTINEL; }
/* Projection. */
hb_codepoint_t operator () (hb_codepoint_t k) const { return get (k); }
unsigned int get (hb_codepoint_t k) const { return get_class (k); }
unsigned int get_class (hb_codepoint_t glyph_id) const unsigned int get_class (hb_codepoint_t glyph_id) const
{ {
switch (u.format) { switch (u.format) {
@@ -1485,13 +1510,14 @@ struct ClassDef
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
unsigned int format = 2; unsigned int format = 2;
if (glyphs.length) if (likely (glyphs))
{ {
hb_codepoint_t glyph_min = glyphs[0]; hb_codepoint_t glyph_min = +glyphs | hb_reduce (hb_min, 0xFFFFu);
hb_codepoint_t glyph_max = glyphs[glyphs.length - 1]; hb_codepoint_t glyph_max = +glyphs | hb_reduce (hb_max, 0u);
unsigned int count = glyphs.len ();
unsigned int num_ranges = 1; unsigned int num_ranges = 1;
for (unsigned int i = 1; i < glyphs.length; i++) for (unsigned int i = 1; i < count; i++)
if (glyphs[i - 1] + 1 != glyphs[i] || if (glyphs[i - 1] + 1 != glyphs[i] ||
klasses[i - 1] != klasses[i]) klasses[i - 1] != klasses[i])
num_ranges++; num_ranges++;
@@ -1499,7 +1525,7 @@ struct ClassDef
if (1 + (glyph_max - glyph_min + 1) < num_ranges * 3) if (1 + (glyph_max - glyph_min + 1) < num_ranges * 3)
format = 1; format = 1;
} }
u.format.set (format); u.format = format;
switch (u.format) switch (u.format)
{ {
@@ -1975,10 +2001,10 @@ struct FeatureVariations
return (this+record.substitutions).find_substitute (feature_index); return (this+record.substitutions).find_substitute (feature_index);
} }
bool subset (hb_subset_context_t *c) const FeatureVariations* copy (hb_serialize_context_t *c) const
{ {
TRACE_SUBSET (this); TRACE_SERIALIZE (this);
return_trace (c->serializer->embed (*this)); return_trace (c->embed (*this));
} }
bool sanitize (hb_sanitize_context_t *c) const bool sanitize (hb_sanitize_context_t *c) const

View File

@@ -439,19 +439,19 @@ struct GDEF
bool subset (hb_subset_context_t *c) const bool subset (hb_subset_context_t *c) const
{ {
TRACE_SUBSET (this); TRACE_SUBSET (this);
struct GDEF *out = c->serializer->embed (*this); auto *out = c->serializer->embed (*this);
if (unlikely (!out)) return_trace (false); if (unlikely (!out)) return_trace (false);
out->glyphClassDef.serialize_subset (c, this+glyphClassDef, out); out->glyphClassDef.serialize_subset (c, glyphClassDef, this, out);
out->attachList.set (0);//TODO(subset) serialize_subset (c, this+attachList, out); out->attachList = 0;//TODO(subset) serialize_subset (c, attachList, this, out);
out->ligCaretList.set (0);//TODO(subset) serialize_subset (c, this+ligCaretList, out); out->ligCaretList = 0;//TODO(subset) serialize_subset (c, ligCaretList, this, out);
out->markAttachClassDef.serialize_subset (c, this+markAttachClassDef, out); out->markAttachClassDef.serialize_subset (c, markAttachClassDef, this, out);
if (version.to_int () >= 0x00010002u) if (version.to_int () >= 0x00010002u)
out->markGlyphSetsDef.set (0);// TODO(subset) serialize_subset (c, this+markGlyphSetsDef, out); out->markGlyphSetsDef = 0;// TODO(subset) serialize_subset (c, markGlyphSetsDef, this, out);
if (version.to_int () >= 0x00010003u) if (version.to_int () >= 0x00010003u)
out->varStore.set (0);// TODO(subset) serialize_subset (c, this+varStore, out); out->varStore = 0;// TODO(subset) serialize_subset (c, varStore, this, out);
return_trace (true); return_trace (true);
} }

View File

@@ -173,15 +173,15 @@ struct ValueFormat : HBUINT16
return true; return true;
} }
static OffsetTo<Device>& get_device (Value* value) HB_INTERNAL static OffsetTo<Device>& get_device (Value* value)
{ return *CastP<OffsetTo<Device>> (value); } { return *CastP<OffsetTo<Device>> (value); }
static const OffsetTo<Device>& get_device (const Value* value, bool *worked=nullptr) HB_INTERNAL static const OffsetTo<Device>& get_device (const Value* value, bool *worked=nullptr)
{ {
if (worked) *worked |= bool (*value); if (worked) *worked |= bool (*value);
return *CastP<OffsetTo<Device>> (value); return *CastP<OffsetTo<Device>> (value);
} }
static const HBINT16& get_short (const Value* value, bool *worked=nullptr) HB_INTERNAL static const HBINT16& get_short (const Value* value, bool *worked=nullptr)
{ {
if (worked) *worked |= bool (*value); if (worked) *worked |= bool (*value);
return *CastP<HBINT16> (value); return *CastP<HBINT16> (value);
@@ -446,8 +446,8 @@ struct MarkArray : ArrayOf<MarkRecord> /* Array of MarkRecords--in Coverage orde
glyph_anchor.get_anchor (c, buffer->info[glyph_pos].codepoint, &base_x, &base_y); glyph_anchor.get_anchor (c, buffer->info[glyph_pos].codepoint, &base_x, &base_y);
hb_glyph_position_t &o = buffer->cur_pos(); hb_glyph_position_t &o = buffer->cur_pos();
o.x_offset = round (base_x - mark_x); o.x_offset = roundf (base_x - mark_x);
o.y_offset = round (base_y - mark_y); o.y_offset = roundf (base_y - mark_y);
o.attach_type() = ATTACH_TYPE_MARK; o.attach_type() = ATTACH_TYPE_MARK;
o.attach_chain() = (int) glyph_pos - (int) buffer->idx; o.attach_chain() = (int) glyph_pos - (int) buffer->idx;
buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT; buffer->scratch_flags |= HB_BUFFER_SCRATCH_FLAG_HAS_GPOS_ATTACHMENT;
@@ -576,14 +576,14 @@ struct SinglePosFormat2
struct SinglePos struct SinglePos
{ {
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, u.format); TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) { switch (u.format) {
case 1: return_trace (c->dispatch (u.format1)); case 1: return_trace (c->dispatch (u.format1, hb_forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2)); case 2: return_trace (c->dispatch (u.format2, hb_forward<Ts> (ds)...));
default:return_trace (c->default_return_value ()); default:return_trace (c->default_return_value ());
} }
} }
@@ -722,16 +722,14 @@ struct PairPosFormat1
{ {
bool intersects (const hb_set_t *glyphs) const bool intersects (const hb_set_t *glyphs) const
{ {
unsigned int count = pairSet.len; return
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) + hb_zip (this+coverage, pairSet)
{ | hb_filter (*glyphs, hb_first)
if (unlikely (iter.get_coverage () >= count)) | hb_map (hb_second)
break; /* Work around malicious fonts. https://github.com/harfbuzz/harfbuzz/issues/363 */ | hb_map ([=] (const OffsetTo<PairSet> &_)
if (glyphs->has (iter.get_glyph ()) && { return (this+_).intersects (glyphs, valueFormat); })
(this+pairSet[iter.get_coverage ()]).intersects (glyphs, valueFormat)) | hb_any
return true; ;
}
return false;
} }
void collect_glyphs (hb_collect_glyphs_context_t *c) const void collect_glyphs (hb_collect_glyphs_context_t *c) const
@@ -909,14 +907,14 @@ struct PairPosFormat2
struct PairPos struct PairPos
{ {
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, u.format); TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) { switch (u.format) {
case 1: return_trace (c->dispatch (u.format1)); case 1: return_trace (c->dispatch (u.format1, hb_forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2)); case 2: return_trace (c->dispatch (u.format2, hb_forward<Ts> (ds)...));
default:return_trace (c->default_return_value ()); default:return_trace (c->default_return_value ());
} }
} }
@@ -995,32 +993,32 @@ struct CursivePosFormat1
/* Main-direction adjustment */ /* Main-direction adjustment */
switch (c->direction) { switch (c->direction) {
case HB_DIRECTION_LTR: case HB_DIRECTION_LTR:
pos[i].x_advance = round (exit_x) + pos[i].x_offset; pos[i].x_advance = roundf (exit_x) + pos[i].x_offset;
d = round (entry_x) + pos[j].x_offset; d = roundf (entry_x) + pos[j].x_offset;
pos[j].x_advance -= d; pos[j].x_advance -= d;
pos[j].x_offset -= d; pos[j].x_offset -= d;
break; break;
case HB_DIRECTION_RTL: case HB_DIRECTION_RTL:
d = round (exit_x) + pos[i].x_offset; d = roundf (exit_x) + pos[i].x_offset;
pos[i].x_advance -= d; pos[i].x_advance -= d;
pos[i].x_offset -= d; pos[i].x_offset -= d;
pos[j].x_advance = round (entry_x) + pos[j].x_offset; pos[j].x_advance = roundf (entry_x) + pos[j].x_offset;
break; break;
case HB_DIRECTION_TTB: case HB_DIRECTION_TTB:
pos[i].y_advance = round (exit_y) + pos[i].y_offset; pos[i].y_advance = roundf (exit_y) + pos[i].y_offset;
d = round (entry_y) + pos[j].y_offset; d = roundf (entry_y) + pos[j].y_offset;
pos[j].y_advance -= d; pos[j].y_advance -= d;
pos[j].y_offset -= d; pos[j].y_offset -= d;
break; break;
case HB_DIRECTION_BTT: case HB_DIRECTION_BTT:
d = round (exit_y) + pos[i].y_offset; d = roundf (exit_y) + pos[i].y_offset;
pos[i].y_advance -= d; pos[i].y_advance -= d;
pos[i].y_offset -= d; pos[i].y_offset -= d;
pos[j].y_advance = round (entry_y); pos[j].y_advance = roundf (entry_y);
break; break;
case HB_DIRECTION_INVALID: case HB_DIRECTION_INVALID:
default: default:
@@ -1094,13 +1092,13 @@ struct CursivePosFormat1
struct CursivePos struct CursivePos
{ {
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, u.format); TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) { switch (u.format) {
case 1: return_trace (c->dispatch (u.format1)); case 1: return_trace (c->dispatch (u.format1, hb_forward<Ts> (ds)...));
default:return_trace (c->default_return_value ()); default:return_trace (c->default_return_value ());
} }
} }
@@ -1210,13 +1208,13 @@ struct MarkBasePosFormat1
struct MarkBasePos struct MarkBasePos
{ {
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, u.format); TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) { switch (u.format) {
case 1: return_trace (c->dispatch (u.format1)); case 1: return_trace (c->dispatch (u.format1, hb_forward<Ts> (ds)...));
default:return_trace (c->default_return_value ()); default:return_trace (c->default_return_value ());
} }
} }
@@ -1289,7 +1287,7 @@ struct MarkLigPosFormat1
unsigned int mark_id = _hb_glyph_info_get_lig_id (&buffer->cur()); unsigned int mark_id = _hb_glyph_info_get_lig_id (&buffer->cur());
unsigned int mark_comp = _hb_glyph_info_get_lig_comp (&buffer->cur()); unsigned int mark_comp = _hb_glyph_info_get_lig_comp (&buffer->cur());
if (lig_id && lig_id == mark_id && mark_comp > 0) if (lig_id && lig_id == mark_id && mark_comp > 0)
comp_index = MIN (comp_count, _hb_glyph_info_get_lig_comp (&buffer->cur())) - 1; comp_index = hb_min (comp_count, _hb_glyph_info_get_lig_comp (&buffer->cur())) - 1;
else else
comp_index = comp_count - 1; comp_index = comp_count - 1;
@@ -1335,13 +1333,13 @@ struct MarkLigPosFormat1
struct MarkLigPos struct MarkLigPos
{ {
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, u.format); TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) { switch (u.format) {
case 1: return_trace (c->dispatch (u.format1)); case 1: return_trace (c->dispatch (u.format1, hb_forward<Ts> (ds)...));
default:return_trace (c->default_return_value ()); default:return_trace (c->default_return_value ());
} }
} }
@@ -1457,13 +1455,13 @@ struct MarkMarkPosFormat1
struct MarkMarkPos struct MarkMarkPos
{ {
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, u.format); TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) { switch (u.format) {
case 1: return_trace (c->dispatch (u.format1)); case 1: return_trace (c->dispatch (u.format1, hb_forward<Ts> (ds)...));
default:return_trace (c->default_return_value ()); default:return_trace (c->default_return_value ());
} }
} }
@@ -1509,20 +1507,20 @@ struct PosLookupSubTable
Extension = 9 Extension = 9
}; };
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, unsigned int lookup_type) const typename context_t::return_t dispatch (context_t *c, unsigned int lookup_type, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, lookup_type); TRACE_DISPATCH (this, lookup_type);
switch (lookup_type) { switch (lookup_type) {
case Single: return_trace (u.single.dispatch (c)); case Single: return_trace (u.single.dispatch (c, hb_forward<Ts> (ds)...));
case Pair: return_trace (u.pair.dispatch (c)); case Pair: return_trace (u.pair.dispatch (c, hb_forward<Ts> (ds)...));
case Cursive: return_trace (u.cursive.dispatch (c)); case Cursive: return_trace (u.cursive.dispatch (c, hb_forward<Ts> (ds)...));
case MarkBase: return_trace (u.markBase.dispatch (c)); case MarkBase: return_trace (u.markBase.dispatch (c, hb_forward<Ts> (ds)...));
case MarkLig: return_trace (u.markLig.dispatch (c)); case MarkLig: return_trace (u.markLig.dispatch (c, hb_forward<Ts> (ds)...));
case MarkMark: return_trace (u.markMark.dispatch (c)); case MarkMark: return_trace (u.markMark.dispatch (c, hb_forward<Ts> (ds)...));
case Context: return_trace (u.context.dispatch (c)); case Context: return_trace (u.context.dispatch (c, hb_forward<Ts> (ds)...));
case ChainContext: return_trace (u.chainContext.dispatch (c)); case ChainContext: return_trace (u.chainContext.dispatch (c, hb_forward<Ts> (ds)...));
case Extension: return_trace (u.extension.dispatch (c)); case Extension: return_trace (u.extension.dispatch (c, hb_forward<Ts> (ds)...));
default: return_trace (c->default_return_value ()); default: return_trace (c->default_return_value ());
} }
} }
@@ -1578,14 +1576,14 @@ struct PosLookup : Lookup
dispatch (&c); dispatch (&c);
} }
static bool apply_recurse_func (hb_ot_apply_context_t *c, unsigned int lookup_index); HB_INTERNAL static bool apply_recurse_func (hb_ot_apply_context_t *c, unsigned int lookup_index);
template <typename context_t> template <typename context_t>
static typename context_t::return_t dispatch_recurse_func (context_t *c, unsigned int lookup_index); static typename context_t::return_t dispatch_recurse_func (context_t *c, unsigned int lookup_index);
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ return Lookup::dispatch<SubTable> (c); } { return Lookup::dispatch<SubTable> (c, hb_forward<Ts> (ds)...); }
bool subset (hb_subset_context_t *c) const bool subset (hb_subset_context_t *c) const
{ return Lookup::subset<SubTable> (c); } { return Lookup::subset<SubTable> (c); }

View File

@@ -34,10 +34,12 @@
namespace OT { namespace OT {
typedef hb_pair_t<hb_codepoint_t, hb_codepoint_t> hb_codepoint_pair_t;
template<typename Iterator>
static inline void SingleSubst_serialize (hb_serialize_context_t *c, static inline void SingleSubst_serialize (hb_serialize_context_t *c,
hb_array_t<const GlyphID> glyphs, Iterator it);
hb_array_t<const GlyphID> substitutes);
struct SingleSubstFormat1 struct SingleSubstFormat1
{ {
@@ -46,35 +48,28 @@ struct SingleSubstFormat1
void closure (hb_closure_context_t *c) const void closure (hb_closure_context_t *c) const
{ {
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) unsigned d = deltaGlyphID;
{ + hb_iter (this+coverage)
/* TODO Switch to range-based API to work around malicious fonts. | hb_filter (*c->glyphs)
* https://github.com/harfbuzz/harfbuzz/issues/363 */ | hb_map ([d] (hb_codepoint_t g) { return (g + d) & 0xFFFFu; })
hb_codepoint_t glyph_id = iter.get_glyph (); | hb_sink (c->output)
if (c->glyphs->has (glyph_id)) ;
c->out->add ((glyph_id + deltaGlyphID) & 0xFFFFu);
}
} }
void collect_glyphs (hb_collect_glyphs_context_t *c) const void collect_glyphs (hb_collect_glyphs_context_t *c) const
{ {
if (unlikely (!(this+coverage).add_coverage (c->input))) return; if (unlikely (!(this+coverage).add_coverage (c->input))) return;
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) unsigned d = deltaGlyphID;
{ + hb_iter (this+coverage)
/* TODO Switch to range-based API to work around malicious fonts. | hb_map ([d] (hb_codepoint_t g) { return (g + d) & 0xFFFFu; })
* https://github.com/harfbuzz/harfbuzz/issues/363 */ | hb_sink (c->output)
hb_codepoint_t glyph_id = iter.get_glyph (); ;
c->output->add ((glyph_id + deltaGlyphID) & 0xFFFFu);
}
} }
const Coverage &get_coverage () const { return this+coverage; } const Coverage &get_coverage () const { return this+coverage; }
bool would_apply (hb_would_apply_context_t *c) const bool would_apply (hb_would_apply_context_t *c) const
{ { return c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED; }
TRACE_WOULD_APPLY (this);
return_trace (c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED);
}
bool apply (hb_ot_apply_context_t *c) const bool apply (hb_ot_apply_context_t *c) const
{ {
@@ -91,34 +86,38 @@ struct SingleSubstFormat1
return_trace (true); return_trace (true);
} }
template<typename Iterator,
hb_requires (hb_is_sorted_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
hb_array_t<const GlyphID> glyphs, Iterator glyphs,
int delta) unsigned delta)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
if (unlikely (!coverage.serialize (c, this).serialize (c, glyphs))) return_trace (false); if (unlikely (!coverage.serialize (c, this).serialize (c, glyphs))) return_trace (false);
deltaGlyphID.set (delta); /* TODO(serialize) overflow? */ c->check_assign (deltaGlyphID, delta);
return_trace (true); return_trace (true);
} }
bool subset (hb_subset_context_t *c) const bool subset (hb_subset_context_t *c) const
{ {
TRACE_SUBSET (this); TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset (); const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map; const hb_map_t &glyph_map = *c->plan->glyph_map;
hb_vector_t<GlyphID> from;
hb_vector_t<GlyphID> to;
hb_codepoint_t delta = deltaGlyphID; hb_codepoint_t delta = deltaGlyphID;
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ())
{ auto it =
if (!glyphset.has (iter.get_glyph ())) continue; + hb_iter (this+coverage)
from.push ()->set (glyph_map[iter.get_glyph ()]); | hb_filter (glyphset)
to.push ()->set (glyph_map[(iter.get_glyph () + delta) & 0xFFFF]); | hb_map_retains_sorting ([&] (hb_codepoint_t g) {
} return hb_codepoint_pair_t (glyph_map[g],
c->serializer->propagate_error (from, to); glyph_map[(g + delta) & 0xFFFF]); })
SingleSubst_serialize (c->serializer, from, to); ;
return_trace (from.length);
bool ret = bool (it);
SingleSubst_serialize (c->serializer, it);
return_trace (ret);
} }
bool sanitize (hb_sanitize_context_t *c) const bool sanitize (hb_sanitize_context_t *c) const
@@ -132,8 +131,8 @@ struct SingleSubstFormat1
OffsetTo<Coverage> OffsetTo<Coverage>
coverage; /* Offset to Coverage table--from coverage; /* Offset to Coverage table--from
* beginning of Substitution table */ * beginning of Substitution table */
HBINT16 deltaGlyphID; /* Add to original GlyphID to get HBUINT16 deltaGlyphID; /* Add to original GlyphID to get
* substitute GlyphID */ * substitute GlyphID, modulo 0x10000 */
public: public:
DEFINE_SIZE_STATIC (6); DEFINE_SIZE_STATIC (6);
}; };
@@ -145,35 +144,26 @@ struct SingleSubstFormat2
void closure (hb_closure_context_t *c) const void closure (hb_closure_context_t *c) const
{ {
unsigned int count = substitute.len; + hb_zip (this+coverage, substitute)
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) | hb_filter (*c->glyphs, hb_first)
{ | hb_map (hb_second)
if (unlikely (iter.get_coverage () >= count)) | hb_sink (c->output)
break; /* Work around malicious fonts. https://github.com/harfbuzz/harfbuzz/issues/363 */ ;
if (c->glyphs->has (iter.get_glyph ()))
c->out->add (substitute[iter.get_coverage ()]);
}
} }
void collect_glyphs (hb_collect_glyphs_context_t *c) const void collect_glyphs (hb_collect_glyphs_context_t *c) const
{ {
if (unlikely (!(this+coverage).add_coverage (c->input))) return; if (unlikely (!(this+coverage).add_coverage (c->input))) return;
unsigned int count = substitute.len; + hb_zip (this+coverage, substitute)
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) | hb_map (hb_second)
{ | hb_sink (c->output)
if (unlikely (iter.get_coverage () >= count)) ;
break; /* Work around malicious fonts. https://github.com/harfbuzz/harfbuzz/issues/363 */
c->output->add (substitute[iter.get_coverage ()]);
}
} }
const Coverage &get_coverage () const { return this+coverage; } const Coverage &get_coverage () const { return this+coverage; }
bool would_apply (hb_would_apply_context_t *c) const bool would_apply (hb_would_apply_context_t *c) const
{ { return c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED; }
TRACE_WOULD_APPLY (this);
return_trace (c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED);
}
bool apply (hb_ot_apply_context_t *c) const bool apply (hb_ot_apply_context_t *c) const
{ {
@@ -188,11 +178,21 @@ struct SingleSubstFormat2
return_trace (true); return_trace (true);
} }
template<typename Iterator,
hb_requires (hb_is_sorted_source_of (Iterator,
hb_codepoint_pair_t))>
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
hb_array_t<const GlyphID> glyphs, Iterator it)
hb_array_t<const GlyphID> substitutes)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
auto substitutes =
+ it
| hb_map (hb_second)
;
auto glyphs =
+ it
| hb_map_retains_sorting (hb_first)
;
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
if (unlikely (!substitute.serialize (c, substitutes))) return_trace (false); if (unlikely (!substitute.serialize (c, substitutes))) return_trace (false);
if (unlikely (!coverage.serialize (c, this).serialize (c, glyphs))) return_trace (false); if (unlikely (!coverage.serialize (c, this).serialize (c, glyphs))) return_trace (false);
@@ -202,19 +202,19 @@ struct SingleSubstFormat2
bool subset (hb_subset_context_t *c) const bool subset (hb_subset_context_t *c) const
{ {
TRACE_SUBSET (this); TRACE_SUBSET (this);
const hb_set_t &glyphset = *c->plan->glyphset (); const hb_set_t &glyphset = *c->plan->glyphset_gsub ();
const hb_map_t &glyph_map = *c->plan->glyph_map; const hb_map_t &glyph_map = *c->plan->glyph_map;
hb_vector_t<GlyphID> from;
hb_vector_t<GlyphID> to; auto it =
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) + hb_zip (this+coverage, substitute)
{ | hb_filter (glyphset, hb_first)
if (!glyphset.has (iter.get_glyph ())) continue; | hb_map_retains_sorting ([&] (hb_pair_t<hb_codepoint_t, const GlyphID &> p) -> hb_codepoint_pair_t
from.push ()->set (glyph_map[iter.get_glyph ()]); { return hb_pair (glyph_map[p.first], glyph_map[p.second]); })
to.push ()->set (glyph_map[substitute[iter.get_coverage ()]]); ;
}
c->serializer->propagate_error (from, to); bool ret = bool (it);
SingleSubst_serialize (c->serializer, from, to); SingleSubst_serialize (c->serializer, it);
return_trace (from.length); return_trace (ret);
} }
bool sanitize (hb_sanitize_context_t *c) const bool sanitize (hb_sanitize_context_t *c) const
@@ -237,41 +237,45 @@ struct SingleSubstFormat2
struct SingleSubst struct SingleSubst
{ {
template<typename Iterator,
hb_requires (hb_is_sorted_source_of (Iterator,
const hb_codepoint_pair_t))>
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
hb_array_t<const GlyphID> glyphs, Iterator glyphs)
hb_array_t<const GlyphID> substitutes)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (u.format))) return_trace (false); if (unlikely (!c->extend_min (u.format))) return_trace (false);
unsigned int format = 2; unsigned format = 2;
int delta = 0; unsigned delta = 0;
if (glyphs.length) if (glyphs.len ())
{ {
format = 1; format = 1;
/* TODO(serialize) check for wrap-around */ auto get_delta = [=] (hb_codepoint_pair_t _) {
delta = substitutes[0] - glyphs[0]; return (unsigned) (_.second - _.first) & 0xFFFF;
for (unsigned int i = 1; i < glyphs.length; i++) };
if (delta != (int) (substitutes[i] - glyphs[i])) { delta = get_delta (*glyphs);
format = 2; if (!hb_all (++(+glyphs), delta, get_delta)) format = 2;
break;
} }
} u.format = format;
u.format.set (format);
switch (u.format) { switch (u.format) {
case 1: return_trace (u.format1.serialize (c, glyphs, delta)); case 1: return_trace (u.format1.serialize (c,
case 2: return_trace (u.format2.serialize (c, glyphs, substitutes)); + glyphs
| hb_map_retains_sorting (hb_first),
delta));
case 2: return_trace (u.format2.serialize (c, glyphs));
default:return_trace (false); default:return_trace (false);
} }
} }
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, u.format); TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) { switch (u.format) {
case 1: return_trace (c->dispatch (u.format1)); case 1: return_trace (c->dispatch (u.format1, hb_forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2)); case 2: return_trace (c->dispatch (u.format2, hb_forward<Ts> (ds)...));
default:return_trace (c->default_return_value ()); default:return_trace (c->default_return_value ());
} }
} }
@@ -284,11 +288,11 @@ struct SingleSubst
} u; } u;
}; };
template<typename Iterator>
static inline void static inline void
SingleSubst_serialize (hb_serialize_context_t *c, SingleSubst_serialize (hb_serialize_context_t *c,
hb_array_t<const GlyphID> glyphs, Iterator it)
hb_array_t<const GlyphID> substitutes) { c->start_embed<SingleSubst> ()->serialize (c, it); }
{ c->start_embed<SingleSubst> ()->serialize (c, glyphs, substitutes); }
struct Sequence struct Sequence
{ {
@@ -296,7 +300,7 @@ struct Sequence
{ {
unsigned int count = substitute.len; unsigned int count = substitute.len;
for (unsigned int i = 0; i < count; i++) for (unsigned int i = 0; i < count; i++)
c->out->add (substitute[i]); c->output->add (substitute[i]);
} }
void collect_glyphs (hb_collect_glyphs_context_t *c) const void collect_glyphs (hb_collect_glyphs_context_t *c) const
@@ -334,11 +338,13 @@ struct Sequence
return_trace (true); return_trace (true);
} }
template <typename Iterator,
hb_requires (hb_is_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
hb_array_t<const GlyphID> glyphs) Iterator subst)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
return_trace (substitute.serialize (c, glyphs)); return_trace (substitute.serialize (c, subst));
} }
bool sanitize (hb_sanitize_context_t *c) const bool sanitize (hb_sanitize_context_t *c) const
@@ -361,31 +367,28 @@ struct MultipleSubstFormat1
void closure (hb_closure_context_t *c) const void closure (hb_closure_context_t *c) const
{ {
unsigned int count = sequence.len; + hb_zip (this+coverage, sequence)
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) | hb_filter (*c->glyphs, hb_first)
{ | hb_map (hb_second)
if (unlikely (iter.get_coverage () >= count)) | hb_map (hb_add (this))
break; /* Work around malicious fonts. https://github.com/harfbuzz/harfbuzz/issues/363 */ | hb_apply ([c] (const Sequence &_) { _.closure (c); })
if (c->glyphs->has (iter.get_glyph ())) ;
(this+sequence[iter.get_coverage ()]).closure (c);
}
} }
void collect_glyphs (hb_collect_glyphs_context_t *c) const void collect_glyphs (hb_collect_glyphs_context_t *c) const
{ {
if (unlikely (!(this+coverage).add_coverage (c->input))) return; if (unlikely (!(this+coverage).add_coverage (c->input))) return;
unsigned int count = sequence.len; + hb_zip (this+coverage, sequence)
for (unsigned int i = 0; i < count; i++) | hb_map (hb_second)
(this+sequence[i]).collect_glyphs (c); | hb_map (hb_add (this))
| hb_apply ([c] (const Sequence &_) { _.collect_glyphs (c); })
;
} }
const Coverage &get_coverage () const { return this+coverage; } const Coverage &get_coverage () const { return this+coverage; }
bool would_apply (hb_would_apply_context_t *c) const bool would_apply (hb_would_apply_context_t *c) const
{ { return c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED; }
TRACE_WOULD_APPLY (this);
return_trace (c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED);
}
bool apply (hb_ot_apply_context_t *c) const bool apply (hb_ot_apply_context_t *c) const
{ {
@@ -398,7 +401,7 @@ struct MultipleSubstFormat1
} }
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
hb_array_t<const GlyphID> glyphs, hb_sorted_array_t<const GlyphID> glyphs,
hb_array_t<const unsigned int> substitute_len_list, hb_array_t<const unsigned int> substitute_len_list,
hb_array_t<const GlyphID> substitute_glyphs_list) hb_array_t<const GlyphID> substitute_glyphs_list)
{ {
@@ -444,27 +447,27 @@ struct MultipleSubstFormat1
struct MultipleSubst struct MultipleSubst
{ {
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
hb_array_t<const GlyphID> glyphs, hb_sorted_array_t<const GlyphID> glyphs,
hb_array_t<const unsigned int> substitute_len_list, hb_array_t<const unsigned int> substitute_len_list,
hb_array_t<const GlyphID> substitute_glyphs_list) hb_array_t<const GlyphID> substitute_glyphs_list)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (u.format))) return_trace (false); if (unlikely (!c->extend_min (u.format))) return_trace (false);
unsigned int format = 1; unsigned int format = 1;
u.format.set (format); u.format = format;
switch (u.format) { switch (u.format) {
case 1: return_trace (u.format1.serialize (c, glyphs, substitute_len_list, substitute_glyphs_list)); case 1: return_trace (u.format1.serialize (c, glyphs, substitute_len_list, substitute_glyphs_list));
default:return_trace (false); default:return_trace (false);
} }
} }
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, u.format); TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) { switch (u.format) {
case 1: return_trace (c->dispatch (u.format1)); case 1: return_trace (c->dispatch (u.format1, hb_forward<Ts> (ds)...));
default:return_trace (c->default_return_value ()); default:return_trace (c->default_return_value ());
} }
} }
@@ -482,7 +485,7 @@ struct AlternateSet
{ {
unsigned int count = alternates.len; unsigned int count = alternates.len;
for (unsigned int i = 0; i < count; i++) for (unsigned int i = 0; i < count; i++)
c->out->add (alternates[i]); c->output->add (alternates[i]);
} }
void collect_glyphs (hb_collect_glyphs_context_t *c) const void collect_glyphs (hb_collect_glyphs_context_t *c) const
@@ -502,7 +505,7 @@ struct AlternateSet
unsigned int shift = hb_ctz (lookup_mask); unsigned int shift = hb_ctz (lookup_mask);
unsigned int alt_index = ((lookup_mask & glyph_mask) >> shift); unsigned int alt_index = ((lookup_mask & glyph_mask) >> shift);
/* If alt_index is MAX, randomize feature if it is the rand feature. */ /* If alt_index is MAX_VALUE, randomize feature if it is the rand feature. */
if (alt_index == HB_OT_MAP_MAX_VALUE && c->random) if (alt_index == HB_OT_MAP_MAX_VALUE && c->random)
alt_index = c->random_number () % count + 1; alt_index = c->random_number () % count + 1;
@@ -513,11 +516,13 @@ struct AlternateSet
return_trace (true); return_trace (true);
} }
template <typename Iterator,
hb_requires (hb_is_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
hb_array_t<const GlyphID> glyphs) Iterator alts)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
return_trace (alternates.serialize (c, glyphs)); return_trace (alternates.serialize (c, alts));
} }
bool sanitize (hb_sanitize_context_t *c) const bool sanitize (hb_sanitize_context_t *c) const
@@ -541,35 +546,27 @@ struct AlternateSubstFormat1
void closure (hb_closure_context_t *c) const void closure (hb_closure_context_t *c) const
{ {
unsigned int count = alternateSet.len; + hb_zip (this+coverage, alternateSet)
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) | hb_map (hb_second)
{ | hb_map (hb_add (this))
if (unlikely (iter.get_coverage () >= count)) | hb_apply ([c] (const AlternateSet &_) { _.closure (c); })
break; /* Work around malicious fonts. https://github.com/harfbuzz/harfbuzz/issues/363 */ ;
if (c->glyphs->has (iter.get_glyph ()))
(this+alternateSet[iter.get_coverage ()]).closure (c);
}
} }
void collect_glyphs (hb_collect_glyphs_context_t *c) const void collect_glyphs (hb_collect_glyphs_context_t *c) const
{ {
if (unlikely (!(this+coverage).add_coverage (c->input))) return; if (unlikely (!(this+coverage).add_coverage (c->input))) return;
unsigned int count = alternateSet.len; + hb_zip (this+coverage, alternateSet)
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) | hb_map (hb_second)
{ | hb_map (hb_add (this))
if (unlikely (iter.get_coverage () >= count)) | hb_apply ([c] (const AlternateSet &_) { _.collect_glyphs (c); })
break; /* Work around malicious fonts. https://github.com/harfbuzz/harfbuzz/issues/363 */ ;
(this+alternateSet[iter.get_coverage ()]).collect_glyphs (c);
}
} }
const Coverage &get_coverage () const { return this+coverage; } const Coverage &get_coverage () const { return this+coverage; }
bool would_apply (hb_would_apply_context_t *c) const bool would_apply (hb_would_apply_context_t *c) const
{ { return c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED; }
TRACE_WOULD_APPLY (this);
return_trace (c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED);
}
bool apply (hb_ot_apply_context_t *c) const bool apply (hb_ot_apply_context_t *c) const
{ {
@@ -582,7 +579,7 @@ struct AlternateSubstFormat1
} }
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
hb_array_t<const GlyphID> glyphs, hb_sorted_array_t<const GlyphID> glyphs,
hb_array_t<const unsigned int> alternate_len_list, hb_array_t<const unsigned int> alternate_len_list,
hb_array_t<const GlyphID> alternate_glyphs_list) hb_array_t<const GlyphID> alternate_glyphs_list)
{ {
@@ -628,27 +625,27 @@ struct AlternateSubstFormat1
struct AlternateSubst struct AlternateSubst
{ {
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
hb_array_t<const GlyphID> glyphs, hb_sorted_array_t<const GlyphID> glyphs,
hb_array_t<const unsigned int> alternate_len_list, hb_array_t<const unsigned int> alternate_len_list,
hb_array_t<const GlyphID> alternate_glyphs_list) hb_array_t<const GlyphID> alternate_glyphs_list)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (u.format))) return_trace (false); if (unlikely (!c->extend_min (u.format))) return_trace (false);
unsigned int format = 1; unsigned int format = 1;
u.format.set (format); u.format = format;
switch (u.format) { switch (u.format) {
case 1: return_trace (u.format1.serialize (c, glyphs, alternate_len_list, alternate_glyphs_list)); case 1: return_trace (u.format1.serialize (c, glyphs, alternate_len_list, alternate_glyphs_list));
default:return_trace (false); default:return_trace (false);
} }
} }
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, u.format); TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) { switch (u.format) {
case 1: return_trace (c->dispatch (u.format1)); case 1: return_trace (c->dispatch (u.format1, hb_forward<Ts> (ds)...));
default:return_trace (c->default_return_value ()); default:return_trace (c->default_return_value ());
} }
} }
@@ -674,11 +671,8 @@ struct Ligature
void closure (hb_closure_context_t *c) const void closure (hb_closure_context_t *c) const
{ {
unsigned int count = component.lenP1; if (!intersects (c->glyphs)) return;
for (unsigned int i = 1; i < count; i++) c->output->add (ligGlyph);
if (!c->glyphs->has (component[i]))
return;
c->out->add (ligGlyph);
} }
void collect_glyphs (hb_collect_glyphs_context_t *c) const void collect_glyphs (hb_collect_glyphs_context_t *c) const
@@ -689,15 +683,14 @@ struct Ligature
bool would_apply (hb_would_apply_context_t *c) const bool would_apply (hb_would_apply_context_t *c) const
{ {
TRACE_WOULD_APPLY (this);
if (c->len != component.lenP1) if (c->len != component.lenP1)
return_trace (false); return false;
for (unsigned int i = 1; i < c->len; i++) for (unsigned int i = 1; i < c->len; i++)
if (likely (c->glyphs[i] != component[i])) if (likely (c->glyphs[i] != component[i]))
return_trace (false); return false;
return_trace (true); return true;
} }
bool apply (hb_ot_apply_context_t *c) const bool apply (hb_ot_apply_context_t *c) const
@@ -739,9 +732,11 @@ struct Ligature
return_trace (true); return_trace (true);
} }
template <typename Iterator,
hb_requires (hb_is_source_of (Iterator, hb_codepoint_t))>
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
GlyphID ligature, GlyphID ligature,
hb_array_t<const GlyphID> components /* Starting from second */) Iterator components /* Starting from second */)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (*this))) return_trace (false); if (unlikely (!c->extend_min (*this))) return_trace (false);
@@ -771,38 +766,38 @@ struct LigatureSet
{ {
bool intersects (const hb_set_t *glyphs) const bool intersects (const hb_set_t *glyphs) const
{ {
unsigned int num_ligs = ligature.len; return
for (unsigned int i = 0; i < num_ligs; i++) + hb_iter (ligature)
if ((this+ligature[i]).intersects (glyphs)) | hb_map (hb_add (this))
return true; | hb_map ([glyphs] (const Ligature &_) { return _.intersects (glyphs); })
return false; | hb_any
;
} }
void closure (hb_closure_context_t *c) const void closure (hb_closure_context_t *c) const
{ {
unsigned int num_ligs = ligature.len; + hb_iter (ligature)
for (unsigned int i = 0; i < num_ligs; i++) | hb_map (hb_add (this))
(this+ligature[i]).closure (c); | hb_apply ([c] (const Ligature &_) { _.closure (c); })
;
} }
void collect_glyphs (hb_collect_glyphs_context_t *c) const void collect_glyphs (hb_collect_glyphs_context_t *c) const
{ {
unsigned int num_ligs = ligature.len; + hb_iter (ligature)
for (unsigned int i = 0; i < num_ligs; i++) | hb_map (hb_add (this))
(this+ligature[i]).collect_glyphs (c); | hb_apply ([c] (const Ligature &_) { _.collect_glyphs (c); })
;
} }
bool would_apply (hb_would_apply_context_t *c) const bool would_apply (hb_would_apply_context_t *c) const
{ {
TRACE_WOULD_APPLY (this); return
unsigned int num_ligs = ligature.len; + hb_iter (ligature)
for (unsigned int i = 0; i < num_ligs; i++) | hb_map (hb_add (this))
{ | hb_map ([c] (const Ligature &_) { return _.would_apply (c); })
const Ligature &lig = this+ligature[i]; | hb_any
if (lig.would_apply (c)) ;
return_trace (true);
}
return_trace (false);
} }
bool apply (hb_ot_apply_context_t *c) const bool apply (hb_ot_apply_context_t *c) const
@@ -828,7 +823,7 @@ struct LigatureSet
if (unlikely (!ligature.serialize (c, ligatures.length))) return_trace (false); if (unlikely (!ligature.serialize (c, ligatures.length))) return_trace (false);
for (unsigned int i = 0; i < ligatures.length; i++) for (unsigned int i = 0; i < ligatures.length; i++)
{ {
unsigned int component_count = MAX<int> (component_count_list[i] - 1, 0); unsigned int component_count = (unsigned) hb_max ((int) component_count_list[i] - 1, 0);
if (unlikely (!ligature[i].serialize (c, this) if (unlikely (!ligature[i].serialize (c, this)
.serialize (c, .serialize (c,
ligatures[i], ligatures[i],
@@ -857,52 +852,46 @@ struct LigatureSubstFormat1
{ {
bool intersects (const hb_set_t *glyphs) const bool intersects (const hb_set_t *glyphs) const
{ {
unsigned int count = ligatureSet.len; return
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) + hb_zip (this+coverage, ligatureSet)
{ | hb_filter (*glyphs, hb_first)
if (unlikely (iter.get_coverage () >= count)) | hb_map (hb_second)
break; /* Work around malicious fonts. https://github.com/harfbuzz/harfbuzz/issues/363 */ | hb_map ([this, glyphs] (const OffsetTo<LigatureSet> &_)
if (glyphs->has (iter.get_glyph ()) && { return (this+_).intersects (glyphs); })
(this+ligatureSet[iter.get_coverage ()]).intersects (glyphs)) | hb_any
return true; ;
}
return false;
} }
void closure (hb_closure_context_t *c) const void closure (hb_closure_context_t *c) const
{ {
unsigned int count = ligatureSet.len; + hb_zip (this+coverage, ligatureSet)
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) | hb_filter (*c->glyphs, hb_first)
{ | hb_map (hb_second)
if (unlikely (iter.get_coverage () >= count)) | hb_map (hb_add (this))
break; /* Work around malicious fonts. https://github.com/harfbuzz/harfbuzz/issues/363 */ | hb_apply ([c] (const LigatureSet &_) { _.closure (c); })
if (c->glyphs->has (iter.get_glyph ())) ;
(this+ligatureSet[iter.get_coverage ()]).closure (c);
}
} }
void collect_glyphs (hb_collect_glyphs_context_t *c) const void collect_glyphs (hb_collect_glyphs_context_t *c) const
{ {
if (unlikely (!(this+coverage).add_coverage (c->input))) return; if (unlikely (!(this+coverage).add_coverage (c->input))) return;
unsigned int count = ligatureSet.len;
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) + hb_zip (this+coverage, ligatureSet)
{ | hb_map (hb_second)
if (unlikely (iter.get_coverage () >= count)) | hb_map (hb_add (this))
break; /* Work around malicious fonts. https://github.com/harfbuzz/harfbuzz/issues/363 */ | hb_apply ([c] (const LigatureSet &_) { _.collect_glyphs (c); })
(this+ligatureSet[iter.get_coverage ()]).collect_glyphs (c); ;
}
} }
const Coverage &get_coverage () const { return this+coverage; } const Coverage &get_coverage () const { return this+coverage; }
bool would_apply (hb_would_apply_context_t *c) const bool would_apply (hb_would_apply_context_t *c) const
{ {
TRACE_WOULD_APPLY (this);
unsigned int index = (this+coverage).get_coverage (c->glyphs[0]); unsigned int index = (this+coverage).get_coverage (c->glyphs[0]);
if (likely (index == NOT_COVERED)) return_trace (false); if (likely (index == NOT_COVERED)) return false;
const LigatureSet &lig_set = this+ligatureSet[index]; const LigatureSet &lig_set = this+ligatureSet[index];
return_trace (lig_set.would_apply (c)); return lig_set.would_apply (c);
} }
bool apply (hb_ot_apply_context_t *c) const bool apply (hb_ot_apply_context_t *c) const
@@ -917,7 +906,7 @@ struct LigatureSubstFormat1
} }
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
hb_array_t<const GlyphID> first_glyphs, hb_sorted_array_t<const GlyphID> first_glyphs,
hb_array_t<const unsigned int> ligature_per_first_glyph_count_list, hb_array_t<const unsigned int> ligature_per_first_glyph_count_list,
hb_array_t<const GlyphID> ligatures_list, hb_array_t<const GlyphID> ligatures_list,
hb_array_t<const unsigned int> component_count_list, hb_array_t<const unsigned int> component_count_list,
@@ -968,7 +957,7 @@ struct LigatureSubstFormat1
struct LigatureSubst struct LigatureSubst
{ {
bool serialize (hb_serialize_context_t *c, bool serialize (hb_serialize_context_t *c,
hb_array_t<const GlyphID> first_glyphs, hb_sorted_array_t<const GlyphID> first_glyphs,
hb_array_t<const unsigned int> ligature_per_first_glyph_count_list, hb_array_t<const unsigned int> ligature_per_first_glyph_count_list,
hb_array_t<const GlyphID> ligatures_list, hb_array_t<const GlyphID> ligatures_list,
hb_array_t<const unsigned int> component_count_list, hb_array_t<const unsigned int> component_count_list,
@@ -977,7 +966,7 @@ struct LigatureSubst
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min (u.format))) return_trace (false); if (unlikely (!c->extend_min (u.format))) return_trace (false);
unsigned int format = 1; unsigned int format = 1;
u.format.set (format); u.format = format;
switch (u.format) { switch (u.format) {
case 1: return_trace (u.format1.serialize (c, case 1: return_trace (u.format1.serialize (c,
first_glyphs, first_glyphs,
@@ -989,13 +978,13 @@ struct LigatureSubst
} }
} }
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, u.format); TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) { switch (u.format) {
case 1: return_trace (c->dispatch (u.format1)); case 1: return_trace (c->dispatch (u.format1, hb_forward<Ts> (ds)...));
default:return_trace (c->default_return_value ()); default:return_trace (c->default_return_value ());
} }
} }
@@ -1046,29 +1035,16 @@ struct ReverseChainSingleSubstFormat1
void closure (hb_closure_context_t *c) const void closure (hb_closure_context_t *c) const
{ {
if (!intersects (c->glyphs)) return;
const OffsetArrayOf<Coverage> &lookahead = StructAfter<OffsetArrayOf<Coverage>> (backtrack); const OffsetArrayOf<Coverage> &lookahead = StructAfter<OffsetArrayOf<Coverage>> (backtrack);
unsigned int count;
count = backtrack.len;
for (unsigned int i = 0; i < count; i++)
if (!(this+backtrack[i]).intersects (c->glyphs))
return;
count = lookahead.len;
for (unsigned int i = 0; i < count; i++)
if (!(this+lookahead[i]).intersects (c->glyphs))
return;
const ArrayOf<GlyphID> &substitute = StructAfter<ArrayOf<GlyphID>> (lookahead); const ArrayOf<GlyphID> &substitute = StructAfter<ArrayOf<GlyphID>> (lookahead);
count = substitute.len;
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) + hb_zip (this+coverage, substitute)
{ | hb_filter (*c->glyphs, hb_first)
if (unlikely (iter.get_coverage () >= count)) | hb_map (hb_second)
break; /* Work around malicious fonts. https://github.com/harfbuzz/harfbuzz/issues/363 */ | hb_sink (c->output)
if (c->glyphs->has (iter.get_glyph ())) ;
c->out->add (substitute[iter.get_coverage ()]);
}
} }
void collect_glyphs (hb_collect_glyphs_context_t *c) const void collect_glyphs (hb_collect_glyphs_context_t *c) const
@@ -1094,10 +1070,7 @@ struct ReverseChainSingleSubstFormat1
const Coverage &get_coverage () const { return this+coverage; } const Coverage &get_coverage () const { return this+coverage; }
bool would_apply (hb_would_apply_context_t *c) const bool would_apply (hb_would_apply_context_t *c) const
{ { return c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED; }
TRACE_WOULD_APPLY (this);
return_trace (c->len == 1 && (this+coverage).get_coverage (c->glyphs[0]) != NOT_COVERED);
}
bool apply (hb_ot_apply_context_t *c) const bool apply (hb_ot_apply_context_t *c) const
{ {
@@ -1173,13 +1146,13 @@ struct ReverseChainSingleSubstFormat1
struct ReverseChainSingleSubst struct ReverseChainSingleSubst
{ {
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, u.format); TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) { switch (u.format) {
case 1: return_trace (c->dispatch (u.format1)); case 1: return_trace (c->dispatch (u.format1, hb_forward<Ts> (ds)...));
default:return_trace (c->default_return_value ()); default:return_trace (c->default_return_value ());
} }
} }
@@ -1213,19 +1186,19 @@ struct SubstLookupSubTable
ReverseChainSingle = 8 ReverseChainSingle = 8
}; };
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c, unsigned int lookup_type) const typename context_t::return_t dispatch (context_t *c, unsigned int lookup_type, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, lookup_type); TRACE_DISPATCH (this, lookup_type);
switch (lookup_type) { switch (lookup_type) {
case Single: return_trace (u.single.dispatch (c)); case Single: return_trace (u.single.dispatch (c, hb_forward<Ts> (ds)...));
case Multiple: return_trace (u.multiple.dispatch (c)); case Multiple: return_trace (u.multiple.dispatch (c, hb_forward<Ts> (ds)...));
case Alternate: return_trace (u.alternate.dispatch (c)); case Alternate: return_trace (u.alternate.dispatch (c, hb_forward<Ts> (ds)...));
case Ligature: return_trace (u.ligature.dispatch (c)); case Ligature: return_trace (u.ligature.dispatch (c, hb_forward<Ts> (ds)...));
case Context: return_trace (u.context.dispatch (c)); case Context: return_trace (u.context.dispatch (c, hb_forward<Ts> (ds)...));
case ChainContext: return_trace (u.chainContext.dispatch (c)); case ChainContext: return_trace (u.chainContext.dispatch (c, hb_forward<Ts> (ds)...));
case Extension: return_trace (u.extension.dispatch (c)); case Extension: return_trace (u.extension.dispatch (c, hb_forward<Ts> (ds)...));
case ReverseChainSingle: return_trace (u.reverseChainContextSingle.dispatch (c)); case ReverseChainSingle: return_trace (u.reverseChainContextSingle.dispatch (c, hb_forward<Ts> (ds)...));
default: return_trace (c->default_return_value ()); default: return_trace (c->default_return_value ());
} }
} }
@@ -1253,7 +1226,7 @@ struct SubstLookup : Lookup
const SubTable& get_subtable (unsigned int i) const const SubTable& get_subtable (unsigned int i) const
{ return Lookup::get_subtable<SubTable> (i); } { return Lookup::get_subtable<SubTable> (i); }
static bool lookup_type_is_reverse (unsigned int lookup_type) HB_INTERNAL static bool lookup_type_is_reverse (unsigned int lookup_type)
{ return lookup_type == SubTable::ReverseChainSingle; } { return lookup_type == SubTable::ReverseChainSingle; }
bool is_reverse () const bool is_reverse () const
@@ -1306,13 +1279,12 @@ struct SubstLookup : Lookup
bool would_apply (hb_would_apply_context_t *c, bool would_apply (hb_would_apply_context_t *c,
const hb_ot_layout_lookup_accelerator_t *accel) const const hb_ot_layout_lookup_accelerator_t *accel) const
{ {
TRACE_WOULD_APPLY (this); if (unlikely (!c->len)) return false;
if (unlikely (!c->len)) return_trace (false); if (!accel->may_have (c->glyphs[0])) return false;
if (!accel->may_have (c->glyphs[0])) return_trace (false); return dispatch (c);
return_trace (dispatch (c));
} }
static bool apply_recurse_func (hb_ot_apply_context_t *c, unsigned int lookup_index); HB_INTERNAL static bool apply_recurse_func (hb_ot_apply_context_t *c, unsigned int lookup_index);
SubTable& serialize_subtable (hb_serialize_context_t *c, SubTable& serialize_subtable (hb_serialize_context_t *c,
unsigned int i) unsigned int i)
@@ -1320,23 +1292,25 @@ struct SubstLookup : Lookup
bool serialize_single (hb_serialize_context_t *c, bool serialize_single (hb_serialize_context_t *c,
uint32_t lookup_props, uint32_t lookup_props,
hb_array_t<const GlyphID> glyphs, hb_sorted_array_t<const GlyphID> glyphs,
hb_array_t<const GlyphID> substitutes) hb_array_t<const GlyphID> substitutes)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!Lookup::serialize (c, SubTable::Single, lookup_props, 1))) return_trace (false); if (unlikely (!Lookup::serialize (c, SubTable::Single, lookup_props, 1))) return_trace (false);
return_trace (serialize_subtable (c, 0).u.single.serialize (c, glyphs, substitutes)); return_trace (serialize_subtable (c, 0).u.single.
serialize (c, hb_zip (glyphs, substitutes)));
} }
bool serialize_multiple (hb_serialize_context_t *c, bool serialize_multiple (hb_serialize_context_t *c,
uint32_t lookup_props, uint32_t lookup_props,
hb_array_t<const GlyphID> glyphs, hb_sorted_array_t<const GlyphID> glyphs,
hb_array_t<const unsigned int> substitute_len_list, hb_array_t<const unsigned int> substitute_len_list,
hb_array_t<const GlyphID> substitute_glyphs_list) hb_array_t<const GlyphID> substitute_glyphs_list)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!Lookup::serialize (c, SubTable::Multiple, lookup_props, 1))) return_trace (false); if (unlikely (!Lookup::serialize (c, SubTable::Multiple, lookup_props, 1))) return_trace (false);
return_trace (serialize_subtable (c, 0).u.multiple.serialize (c, return_trace (serialize_subtable (c, 0).u.multiple.
serialize (c,
glyphs, glyphs,
substitute_len_list, substitute_len_list,
substitute_glyphs_list)); substitute_glyphs_list));
@@ -1344,13 +1318,14 @@ struct SubstLookup : Lookup
bool serialize_alternate (hb_serialize_context_t *c, bool serialize_alternate (hb_serialize_context_t *c,
uint32_t lookup_props, uint32_t lookup_props,
hb_array_t<const GlyphID> glyphs, hb_sorted_array_t<const GlyphID> glyphs,
hb_array_t<const unsigned int> alternate_len_list, hb_array_t<const unsigned int> alternate_len_list,
hb_array_t<const GlyphID> alternate_glyphs_list) hb_array_t<const GlyphID> alternate_glyphs_list)
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!Lookup::serialize (c, SubTable::Alternate, lookup_props, 1))) return_trace (false); if (unlikely (!Lookup::serialize (c, SubTable::Alternate, lookup_props, 1))) return_trace (false);
return_trace (serialize_subtable (c, 0).u.alternate.serialize (c, return_trace (serialize_subtable (c, 0).u.alternate.
serialize (c,
glyphs, glyphs,
alternate_len_list, alternate_len_list,
alternate_glyphs_list)); alternate_glyphs_list));
@@ -1358,7 +1333,7 @@ struct SubstLookup : Lookup
bool serialize_ligature (hb_serialize_context_t *c, bool serialize_ligature (hb_serialize_context_t *c,
uint32_t lookup_props, uint32_t lookup_props,
hb_array_t<const GlyphID> first_glyphs, hb_sorted_array_t<const GlyphID> first_glyphs,
hb_array_t<const unsigned int> ligature_per_first_glyph_count_list, hb_array_t<const unsigned int> ligature_per_first_glyph_count_list,
hb_array_t<const GlyphID> ligatures_list, hb_array_t<const GlyphID> ligatures_list,
hb_array_t<const unsigned int> component_count_list, hb_array_t<const unsigned int> component_count_list,
@@ -1366,7 +1341,8 @@ struct SubstLookup : Lookup
{ {
TRACE_SERIALIZE (this); TRACE_SERIALIZE (this);
if (unlikely (!Lookup::serialize (c, SubTable::Ligature, lookup_props, 1))) return_trace (false); if (unlikely (!Lookup::serialize (c, SubTable::Ligature, lookup_props, 1))) return_trace (false);
return_trace (serialize_subtable (c, 0).u.ligature.serialize (c, return_trace (serialize_subtable (c, 0).u.ligature.
serialize (c,
first_glyphs, first_glyphs,
ligature_per_first_glyph_count_list, ligature_per_first_glyph_count_list,
ligatures_list, ligatures_list,
@@ -1375,12 +1351,12 @@ struct SubstLookup : Lookup
} }
template <typename context_t> template <typename context_t>
static typename context_t::return_t dispatch_recurse_func (context_t *c, unsigned int lookup_index); HB_INTERNAL static typename context_t::return_t dispatch_recurse_func (context_t *c, unsigned int lookup_index);
static hb_closure_context_t::return_t dispatch_closure_recurse_func (hb_closure_context_t *c, unsigned int lookup_index) HB_INTERNAL static hb_closure_context_t::return_t dispatch_closure_recurse_func (hb_closure_context_t *c, unsigned int lookup_index)
{ {
if (!c->should_visit_lookup (lookup_index)) if (!c->should_visit_lookup (lookup_index))
return HB_VOID; return hb_empty_t ();
hb_closure_context_t::return_t ret = dispatch_recurse_func (c, lookup_index); hb_closure_context_t::return_t ret = dispatch_recurse_func (c, lookup_index);
@@ -1392,9 +1368,9 @@ struct SubstLookup : Lookup
return ret; return ret;
} }
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ return Lookup::dispatch<SubTable> (c); } { return Lookup::dispatch<SubTable> (c, hb_forward<Ts> (ds)...); }
bool subset (hb_subset_context_t *c) const bool subset (hb_subset_context_t *c) const
{ return Lookup::subset<SubTable> (c); } { return Lookup::subset<SubTable> (c); }

View File

@@ -59,13 +59,13 @@ struct hb_intersects_context_t :
}; };
struct hb_closure_context_t : struct hb_closure_context_t :
hb_dispatch_context_t<hb_closure_context_t, hb_void_t, 0> hb_dispatch_context_t<hb_closure_context_t, hb_empty_t, 0>
{ {
const char *get_name () { return "CLOSURE"; } const char *get_name () { return "CLOSURE"; }
typedef return_t (*recurse_func_t) (hb_closure_context_t *c, unsigned int lookup_index); typedef return_t (*recurse_func_t) (hb_closure_context_t *c, unsigned int lookup_index);
template <typename T> template <typename T>
return_t dispatch (const T &obj) { obj.closure (this); return HB_VOID; } return_t dispatch (const T &obj) { obj.closure (this); return hb_empty_t (); }
static return_t default_return_value () { return HB_VOID; } static return_t default_return_value () { return hb_empty_t (); }
void recurse (unsigned int lookup_index) void recurse (unsigned int lookup_index)
{ {
if (unlikely (nesting_level_left == 0 || !recurse_func)) if (unlikely (nesting_level_left == 0 || !recurse_func))
@@ -92,7 +92,7 @@ struct hb_closure_context_t :
hb_face_t *face; hb_face_t *face;
hb_set_t *glyphs; hb_set_t *glyphs;
hb_set_t out[1]; hb_set_t output[1];
recurse_func_t recurse_func; recurse_func_t recurse_func;
unsigned int nesting_level_left; unsigned int nesting_level_left;
unsigned int debug_depth; unsigned int debug_depth;
@@ -114,8 +114,8 @@ struct hb_closure_context_t :
void flush () void flush ()
{ {
hb_set_union (glyphs, out); hb_set_union (glyphs, output);
hb_set_clear (out); hb_set_clear (output);
} }
private: private:
@@ -124,7 +124,7 @@ struct hb_closure_context_t :
struct hb_would_apply_context_t : struct hb_would_apply_context_t :
hb_dispatch_context_t<hb_would_apply_context_t, bool, HB_DEBUG_WOULD_APPLY> hb_dispatch_context_t<hb_would_apply_context_t, bool, 0>
{ {
const char *get_name () { return "WOULD_APPLY"; } const char *get_name () { return "WOULD_APPLY"; }
template <typename T> template <typename T>
@@ -151,13 +151,13 @@ struct hb_would_apply_context_t :
struct hb_collect_glyphs_context_t : struct hb_collect_glyphs_context_t :
hb_dispatch_context_t<hb_collect_glyphs_context_t, hb_void_t, 0> hb_dispatch_context_t<hb_collect_glyphs_context_t, hb_empty_t, 0>
{ {
const char *get_name () { return "COLLECT_GLYPHS"; } const char *get_name () { return "COLLECT_GLYPHS"; }
typedef return_t (*recurse_func_t) (hb_collect_glyphs_context_t *c, unsigned int lookup_index); typedef return_t (*recurse_func_t) (hb_collect_glyphs_context_t *c, unsigned int lookup_index);
template <typename T> template <typename T>
return_t dispatch (const T &obj) { obj.collect_glyphs (this); return HB_VOID; } return_t dispatch (const T &obj) { obj.collect_glyphs (this); return hb_empty_t (); }
static return_t default_return_value () { return HB_VOID; } static return_t default_return_value () { return hb_empty_t (); }
void recurse (unsigned int lookup_index) void recurse (unsigned int lookup_index)
{ {
if (unlikely (nesting_level_left == 0 || !recurse_func)) if (unlikely (nesting_level_left == 0 || !recurse_func))
@@ -610,10 +610,10 @@ struct hb_ot_apply_context_t :
struct hb_get_subtables_context_t : struct hb_get_subtables_context_t :
hb_dispatch_context_t<hb_get_subtables_context_t, hb_void_t, HB_DEBUG_APPLY> hb_dispatch_context_t<hb_get_subtables_context_t, hb_empty_t, HB_DEBUG_APPLY>
{ {
template <typename Type> template <typename Type>
static bool apply_to (const void *obj, OT::hb_ot_apply_context_t *c) HB_INTERNAL static bool apply_to (const void *obj, OT::hb_ot_apply_context_t *c)
{ {
const Type *typed_obj = (const Type *) obj; const Type *typed_obj = (const Type *) obj;
return typed_obj->apply (c); return typed_obj->apply (c);
@@ -652,9 +652,9 @@ struct hb_get_subtables_context_t :
{ {
hb_applicable_t *entry = array.push(); hb_applicable_t *entry = array.push();
entry->init (obj, apply_to<T>); entry->init (obj, apply_to<T>);
return HB_VOID; return hb_empty_t ();
} }
static return_t default_return_value () { return HB_VOID; } static return_t default_return_value () { return hb_empty_t (); }
hb_get_subtables_context_t (array_t &array_) : hb_get_subtables_context_t (array_t &array_) :
array (array_), array (array_),
@@ -706,10 +706,11 @@ static inline bool intersects_array (const hb_set_t *glyphs,
intersects_func_t intersects_func, intersects_func_t intersects_func,
const void *intersects_data) const void *intersects_data)
{ {
for (unsigned int i = 0; i < count; i++) return
if (likely (!intersects_func (glyphs, values[i], intersects_data))) + hb_iter (values, count)
return false; | hb_map ([&] (const HBUINT16 &_) { return intersects_func (glyphs, _, intersects_data); })
return true; | hb_any
;
} }
@@ -734,8 +735,10 @@ static inline void collect_array (hb_collect_glyphs_context_t *c HB_UNUSED,
collect_glyphs_func_t collect_func, collect_glyphs_func_t collect_func,
const void *collect_data) const void *collect_data)
{ {
for (unsigned int i = 0; i < count; i++) return
collect_func (glyphs, values[i], collect_data); + hb_iter (values, count)
| hb_apply ([&] (const HBUINT16 &_) { collect_func (glyphs, _, collect_data); })
;
} }
@@ -846,7 +849,7 @@ static inline bool match_input (hb_ot_apply_context_t *c,
if (ligbase == LIGBASE_NOT_CHECKED) if (ligbase == LIGBASE_NOT_CHECKED)
{ {
bool found = false; bool found = false;
const hb_glyph_info_t *out = buffer->out_info; const auto *out = buffer->out_info;
unsigned int j = buffer->out_len; unsigned int j = buffer->out_len;
while (j && _hb_glyph_info_get_lig_id (&out[j - 1]) == first_lig_id) while (j && _hb_glyph_info_get_lig_id (&out[j - 1]) == first_lig_id)
{ {
@@ -970,7 +973,7 @@ static inline bool ligate_input (hb_ot_apply_context_t *c,
if (this_comp == 0) if (this_comp == 0)
this_comp = last_num_components; this_comp = last_num_components;
unsigned int new_lig_comp = components_so_far - last_num_components + unsigned int new_lig_comp = components_so_far - last_num_components +
MIN (this_comp, last_num_components); hb_min (this_comp, last_num_components);
_hb_glyph_info_set_lig_props_for_mark (&buffer->cur(), lig_id, new_lig_comp); _hb_glyph_info_set_lig_props_for_mark (&buffer->cur(), lig_id, new_lig_comp);
} }
buffer->next_glyph (); buffer->next_glyph ();
@@ -992,7 +995,7 @@ static inline bool ligate_input (hb_ot_apply_context_t *c,
if (!this_comp) if (!this_comp)
break; break;
unsigned int new_lig_comp = components_so_far - last_num_components + unsigned int new_lig_comp = components_so_far - last_num_components +
MIN (this_comp, last_num_components); hb_min (this_comp, last_num_components);
_hb_glyph_info_set_lig_props_for_mark (&buffer->info[i], lig_id, new_lig_comp); _hb_glyph_info_set_lig_props_for_mark (&buffer->info[i], lig_id, new_lig_comp);
} else } else
break; break;
@@ -1170,7 +1173,7 @@ static inline bool apply_lookup (hb_ot_apply_context_t *c,
else else
{ {
/* NOTE: delta is negative. */ /* NOTE: delta is negative. */
delta = MAX (delta, (int) next - (int) count); delta = hb_max (delta, (int) next - (int) count);
next -= delta; next -= delta;
} }
@@ -1318,10 +1321,12 @@ struct Rule
bool would_apply (hb_would_apply_context_t *c, bool would_apply (hb_would_apply_context_t *c,
ContextApplyLookupContext &lookup_context) const ContextApplyLookupContext &lookup_context) const
{ {
TRACE_WOULD_APPLY (this);
const UnsizedArrayOf<LookupRecord> &lookupRecord = StructAfter<UnsizedArrayOf<LookupRecord>> const UnsizedArrayOf<LookupRecord> &lookupRecord = StructAfter<UnsizedArrayOf<LookupRecord>>
(inputZ.as_array (inputCount ? inputCount - 1 : 0)); (inputZ.as_array (inputCount ? inputCount - 1 : 0));
return_trace (context_would_apply_lookup (c, inputCount, inputZ.arrayZ, lookupCount, lookupRecord.arrayZ, lookup_context)); return context_would_apply_lookup (c,
inputCount, inputZ.arrayZ,
lookupCount, lookupRecord.arrayZ,
lookup_context);
} }
bool apply (hb_ot_apply_context_t *c, bool apply (hb_ot_apply_context_t *c,
@@ -1364,53 +1369,56 @@ struct RuleSet
bool intersects (const hb_set_t *glyphs, bool intersects (const hb_set_t *glyphs,
ContextClosureLookupContext &lookup_context) const ContextClosureLookupContext &lookup_context) const
{ {
unsigned int num_rules = rule.len; return
for (unsigned int i = 0; i < num_rules; i++) + hb_iter (rule)
if ((this+rule[i]).intersects (glyphs, lookup_context)) | hb_map (hb_add (this))
return true; | hb_map ([&] (const Rule &_) { return _.intersects (glyphs, lookup_context); })
return false; | hb_any
;
} }
void closure (hb_closure_context_t *c, void closure (hb_closure_context_t *c,
ContextClosureLookupContext &lookup_context) const ContextClosureLookupContext &lookup_context) const
{ {
unsigned int num_rules = rule.len; return
for (unsigned int i = 0; i < num_rules; i++) + hb_iter (rule)
(this+rule[i]).closure (c, lookup_context); | hb_map (hb_add (this))
| hb_apply ([&] (const Rule &_) { _.closure (c, lookup_context); })
;
} }
void collect_glyphs (hb_collect_glyphs_context_t *c, void collect_glyphs (hb_collect_glyphs_context_t *c,
ContextCollectGlyphsLookupContext &lookup_context) const ContextCollectGlyphsLookupContext &lookup_context) const
{ {
unsigned int num_rules = rule.len; return
for (unsigned int i = 0; i < num_rules; i++) + hb_iter (rule)
(this+rule[i]).collect_glyphs (c, lookup_context); | hb_map (hb_add (this))
| hb_apply ([&] (const Rule &_) { _.collect_glyphs (c, lookup_context); })
;
} }
bool would_apply (hb_would_apply_context_t *c, bool would_apply (hb_would_apply_context_t *c,
ContextApplyLookupContext &lookup_context) const ContextApplyLookupContext &lookup_context) const
{ {
TRACE_WOULD_APPLY (this); return
unsigned int num_rules = rule.len; + hb_iter (rule)
for (unsigned int i = 0; i < num_rules; i++) | hb_map (hb_add (this))
{ | hb_map ([&] (const Rule &_) { return _.would_apply (c, lookup_context); })
if ((this+rule[i]).would_apply (c, lookup_context)) | hb_any
return_trace (true); ;
}
return_trace (false);
} }
bool apply (hb_ot_apply_context_t *c, bool apply (hb_ot_apply_context_t *c,
ContextApplyLookupContext &lookup_context) const ContextApplyLookupContext &lookup_context) const
{ {
TRACE_APPLY (this); TRACE_APPLY (this);
unsigned int num_rules = rule.len; return_trace (
for (unsigned int i = 0; i < num_rules; i++) + hb_iter (rule)
{ | hb_map (hb_add (this))
if ((this+rule[i]).apply (c, lookup_context)) | hb_map ([&] (const Rule &_) { return _.apply (c, lookup_context); })
return_trace (true); | hb_any
} )
return_trace (false); ;
} }
bool sanitize (hb_sanitize_context_t *c) const bool sanitize (hb_sanitize_context_t *c) const
@@ -1437,16 +1445,14 @@ struct ContextFormat1
nullptr nullptr
}; };
unsigned int count = ruleSet.len; return
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) + hb_zip (this+coverage, ruleSet)
{ | hb_filter (*glyphs, hb_first)
if (unlikely (iter.get_coverage () >= count)) | hb_map (hb_second)
break; /* Work around malicious fonts. https://github.com/harfbuzz/harfbuzz/issues/363 */ | hb_map (hb_add (this))
if (glyphs->has (iter.get_glyph ()) && | hb_map ([&] (const RuleSet &_) { return _.intersects (glyphs, lookup_context); })
(this+ruleSet[iter.get_coverage ()]).intersects (glyphs, lookup_context)) | hb_any
return true; ;
}
return false;
} }
void closure (hb_closure_context_t *c) const void closure (hb_closure_context_t *c) const
@@ -1456,14 +1462,12 @@ struct ContextFormat1
nullptr nullptr
}; };
unsigned int count = ruleSet.len; + hb_zip (this+coverage, ruleSet)
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) | hb_filter (*c->glyphs, hb_first)
{ | hb_map (hb_second)
if (unlikely (iter.get_coverage () >= count)) | hb_map (hb_add (this))
break; /* Work around malicious fonts. https://github.com/harfbuzz/harfbuzz/issues/363 */ | hb_apply ([&] (const RuleSet &_) { _.closure (c, lookup_context); })
if (c->glyphs->has (iter.get_glyph ())) ;
(this+ruleSet[iter.get_coverage ()]).closure (c, lookup_context);
}
} }
void collect_glyphs (hb_collect_glyphs_context_t *c) const void collect_glyphs (hb_collect_glyphs_context_t *c) const
@@ -1475,21 +1479,20 @@ struct ContextFormat1
nullptr nullptr
}; };
unsigned int count = ruleSet.len; + hb_iter (ruleSet)
for (unsigned int i = 0; i < count; i++) | hb_map (hb_add (this))
(this+ruleSet[i]).collect_glyphs (c, lookup_context); | hb_apply ([&] (const RuleSet &_) { _.collect_glyphs (c, lookup_context); })
;
} }
bool would_apply (hb_would_apply_context_t *c) const bool would_apply (hb_would_apply_context_t *c) const
{ {
TRACE_WOULD_APPLY (this);
const RuleSet &rule_set = this+ruleSet[(this+coverage).get_coverage (c->glyphs[0])]; const RuleSet &rule_set = this+ruleSet[(this+coverage).get_coverage (c->glyphs[0])];
struct ContextApplyLookupContext lookup_context = { struct ContextApplyLookupContext lookup_context = {
{match_glyph}, {match_glyph},
nullptr nullptr
}; };
return_trace (rule_set.would_apply (c, lookup_context)); return rule_set.would_apply (c, lookup_context);
} }
const Coverage &get_coverage () const { return this+coverage; } const Coverage &get_coverage () const { return this+coverage; }
@@ -1549,13 +1552,13 @@ struct ContextFormat2
&class_def &class_def
}; };
unsigned int count = ruleSet.len; return
for (unsigned int i = 0; i < count; i++) + hb_enumerate (ruleSet)
if (class_def.intersects_class (glyphs, i) && | hb_map ([&] (const hb_pair_t<unsigned, const OffsetTo<RuleSet> &> p)
(this+ruleSet[i]).intersects (glyphs, lookup_context)) { return class_def.intersects_class (glyphs, p.first) &&
return true; (this+p.second).intersects (glyphs, lookup_context); })
| hb_any
return false; ;
} }
void closure (hb_closure_context_t *c) const void closure (hb_closure_context_t *c) const
@@ -1570,12 +1573,15 @@ struct ContextFormat2
&class_def &class_def
}; };
unsigned int count = ruleSet.len; return
for (unsigned int i = 0; i < count; i++) + hb_enumerate (ruleSet)
if (class_def.intersects_class (c->glyphs, i)) { | hb_filter ([&] (unsigned _)
const RuleSet &rule_set = this+ruleSet[i]; { return class_def.intersects_class (c->glyphs, _); },
rule_set.closure (c, lookup_context); hb_first)
} | hb_map (hb_second)
| hb_map (hb_add (this))
| hb_apply ([&] (const RuleSet &_) { _.closure (c, lookup_context); })
;
} }
void collect_glyphs (hb_collect_glyphs_context_t *c) const void collect_glyphs (hb_collect_glyphs_context_t *c) const
@@ -1588,15 +1594,14 @@ struct ContextFormat2
&class_def &class_def
}; };
unsigned int count = ruleSet.len; + hb_iter (ruleSet)
for (unsigned int i = 0; i < count; i++) | hb_map (hb_add (this))
(this+ruleSet[i]).collect_glyphs (c, lookup_context); | hb_apply ([&] (const RuleSet &_) { _.collect_glyphs (c, lookup_context); })
;
} }
bool would_apply (hb_would_apply_context_t *c) const bool would_apply (hb_would_apply_context_t *c) const
{ {
TRACE_WOULD_APPLY (this);
const ClassDef &class_def = this+classDef; const ClassDef &class_def = this+classDef;
unsigned int index = class_def.get_class (c->glyphs[0]); unsigned int index = class_def.get_class (c->glyphs[0]);
const RuleSet &rule_set = this+ruleSet[index]; const RuleSet &rule_set = this+ruleSet[index];
@@ -1604,7 +1609,7 @@ struct ContextFormat2
{match_class}, {match_class},
&class_def &class_def
}; };
return_trace (rule_set.would_apply (c, lookup_context)); return rule_set.would_apply (c, lookup_context);
} }
const Coverage &get_coverage () const { return this+coverage; } const Coverage &get_coverage () const { return this+coverage; }
@@ -1704,14 +1709,15 @@ struct ContextFormat3
bool would_apply (hb_would_apply_context_t *c) const bool would_apply (hb_would_apply_context_t *c) const
{ {
TRACE_WOULD_APPLY (this);
const LookupRecord *lookupRecord = &StructAfter<LookupRecord> (coverageZ.as_array (glyphCount)); const LookupRecord *lookupRecord = &StructAfter<LookupRecord> (coverageZ.as_array (glyphCount));
struct ContextApplyLookupContext lookup_context = { struct ContextApplyLookupContext lookup_context = {
{match_coverage}, {match_coverage},
this this
}; };
return_trace (context_would_apply_lookup (c, glyphCount, (const HBUINT16 *) (coverageZ.arrayZ + 1), lookupCount, lookupRecord, lookup_context)); return context_would_apply_lookup (c,
glyphCount, (const HBUINT16 *) (coverageZ.arrayZ + 1),
lookupCount, lookupRecord,
lookup_context);
} }
const Coverage &get_coverage () const { return this+coverageZ[0]; } const Coverage &get_coverage () const { return this+coverageZ[0]; }
@@ -1767,15 +1773,15 @@ struct ContextFormat3
struct Context struct Context
{ {
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, u.format); TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) { switch (u.format) {
case 1: return_trace (c->dispatch (u.format1)); case 1: return_trace (c->dispatch (u.format1, hb_forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2)); case 2: return_trace (c->dispatch (u.format2, hb_forward<Ts> (ds)...));
case 3: return_trace (c->dispatch (u.format3)); case 3: return_trace (c->dispatch (u.format3, hb_forward<Ts> (ds)...));
default:return_trace (c->default_return_value ()); default:return_trace (c->default_return_value ());
} }
} }
@@ -1967,15 +1973,14 @@ struct ChainRule
bool would_apply (hb_would_apply_context_t *c, bool would_apply (hb_would_apply_context_t *c,
ChainContextApplyLookupContext &lookup_context) const ChainContextApplyLookupContext &lookup_context) const
{ {
TRACE_WOULD_APPLY (this);
const HeadlessArrayOf<HBUINT16> &input = StructAfter<HeadlessArrayOf<HBUINT16>> (backtrack); const HeadlessArrayOf<HBUINT16> &input = StructAfter<HeadlessArrayOf<HBUINT16>> (backtrack);
const ArrayOf<HBUINT16> &lookahead = StructAfter<ArrayOf<HBUINT16>> (input); const ArrayOf<HBUINT16> &lookahead = StructAfter<ArrayOf<HBUINT16>> (input);
const ArrayOf<LookupRecord> &lookup = StructAfter<ArrayOf<LookupRecord>> (lookahead); const ArrayOf<LookupRecord> &lookup = StructAfter<ArrayOf<LookupRecord>> (lookahead);
return_trace (chain_context_would_apply_lookup (c, return chain_context_would_apply_lookup (c,
backtrack.len, backtrack.arrayZ, backtrack.len, backtrack.arrayZ,
input.lenP1, input.arrayZ, input.lenP1, input.arrayZ,
lookahead.len, lookahead.arrayZ, lookup.len, lookahead.len, lookahead.arrayZ, lookup.len,
lookup.arrayZ, lookup_context)); lookup.arrayZ, lookup_context);
} }
bool apply (hb_ot_apply_context_t *c, ChainContextApplyLookupContext &lookup_context) const bool apply (hb_ot_apply_context_t *c, ChainContextApplyLookupContext &lookup_context) const
@@ -2025,46 +2030,51 @@ struct ChainRuleSet
{ {
bool intersects (const hb_set_t *glyphs, ChainContextClosureLookupContext &lookup_context) const bool intersects (const hb_set_t *glyphs, ChainContextClosureLookupContext &lookup_context) const
{ {
unsigned int num_rules = rule.len; return
for (unsigned int i = 0; i < num_rules; i++) + hb_iter (rule)
if ((this+rule[i]).intersects (glyphs, lookup_context)) | hb_map (hb_add (this))
return true; | hb_map ([&] (const ChainRule &_) { return _.intersects (glyphs, lookup_context); })
return false; | hb_any
;
} }
void closure (hb_closure_context_t *c, ChainContextClosureLookupContext &lookup_context) const void closure (hb_closure_context_t *c, ChainContextClosureLookupContext &lookup_context) const
{ {
unsigned int num_rules = rule.len; return
for (unsigned int i = 0; i < num_rules; i++) + hb_iter (rule)
(this+rule[i]).closure (c, lookup_context); | hb_map (hb_add (this))
| hb_apply ([&] (const ChainRule &_) { _.closure (c, lookup_context); })
;
} }
void collect_glyphs (hb_collect_glyphs_context_t *c, ChainContextCollectGlyphsLookupContext &lookup_context) const void collect_glyphs (hb_collect_glyphs_context_t *c, ChainContextCollectGlyphsLookupContext &lookup_context) const
{ {
unsigned int num_rules = rule.len; return
for (unsigned int i = 0; i < num_rules; i++) + hb_iter (rule)
(this+rule[i]).collect_glyphs (c, lookup_context); | hb_map (hb_add (this))
| hb_apply ([&] (const ChainRule &_) { _.collect_glyphs (c, lookup_context); })
;
} }
bool would_apply (hb_would_apply_context_t *c, ChainContextApplyLookupContext &lookup_context) const bool would_apply (hb_would_apply_context_t *c, ChainContextApplyLookupContext &lookup_context) const
{ {
TRACE_WOULD_APPLY (this); return
unsigned int num_rules = rule.len; + hb_iter (rule)
for (unsigned int i = 0; i < num_rules; i++) | hb_map (hb_add (this))
if ((this+rule[i]).would_apply (c, lookup_context)) | hb_map ([&] (const ChainRule &_) { return _.would_apply (c, lookup_context); })
return_trace (true); | hb_any
;
return_trace (false);
} }
bool apply (hb_ot_apply_context_t *c, ChainContextApplyLookupContext &lookup_context) const bool apply (hb_ot_apply_context_t *c, ChainContextApplyLookupContext &lookup_context) const
{ {
TRACE_APPLY (this); TRACE_APPLY (this);
unsigned int num_rules = rule.len; return_trace (
for (unsigned int i = 0; i < num_rules; i++) + hb_iter (rule)
if ((this+rule[i]).apply (c, lookup_context)) | hb_map (hb_add (this))
return_trace (true); | hb_map ([&] (const ChainRule &_) { return _.apply (c, lookup_context); })
| hb_any
return_trace (false); )
;
} }
bool sanitize (hb_sanitize_context_t *c) const bool sanitize (hb_sanitize_context_t *c) const
@@ -2090,16 +2100,14 @@ struct ChainContextFormat1
{nullptr, nullptr, nullptr} {nullptr, nullptr, nullptr}
}; };
unsigned int count = ruleSet.len; return
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) + hb_zip (this+coverage, ruleSet)
{ | hb_filter (*glyphs, hb_first)
if (unlikely (iter.get_coverage () >= count)) | hb_map (hb_second)
break; /* Work around malicious fonts. https://github.com/harfbuzz/harfbuzz/issues/363 */ | hb_map (hb_add (this))
if (glyphs->has (iter.get_glyph ()) && | hb_map ([&] (const ChainRuleSet &_) { return _.intersects (glyphs, lookup_context); })
(this+ruleSet[iter.get_coverage ()]).intersects (glyphs, lookup_context)) | hb_any
return true; ;
}
return false;
} }
void closure (hb_closure_context_t *c) const void closure (hb_closure_context_t *c) const
@@ -2109,14 +2117,12 @@ struct ChainContextFormat1
{nullptr, nullptr, nullptr} {nullptr, nullptr, nullptr}
}; };
unsigned int count = ruleSet.len; + hb_zip (this+coverage, ruleSet)
for (Coverage::Iter iter (this+coverage); iter.more (); iter.next ()) | hb_filter (*c->glyphs, hb_first)
{ | hb_map (hb_second)
if (unlikely (iter.get_coverage () >= count)) | hb_map (hb_add (this))
break; /* Work around malicious fonts. https://github.com/harfbuzz/harfbuzz/issues/363 */ | hb_apply ([&] (const ChainRuleSet &_) { _.closure (c, lookup_context); })
if (c->glyphs->has (iter.get_glyph ())) ;
(this+ruleSet[iter.get_coverage ()]).closure (c, lookup_context);
}
} }
void collect_glyphs (hb_collect_glyphs_context_t *c) const void collect_glyphs (hb_collect_glyphs_context_t *c) const
@@ -2128,21 +2134,20 @@ struct ChainContextFormat1
{nullptr, nullptr, nullptr} {nullptr, nullptr, nullptr}
}; };
unsigned int count = ruleSet.len; + hb_iter (ruleSet)
for (unsigned int i = 0; i < count; i++) | hb_map (hb_add (this))
(this+ruleSet[i]).collect_glyphs (c, lookup_context); | hb_apply ([&] (const ChainRuleSet &_) { _.collect_glyphs (c, lookup_context); })
;
} }
bool would_apply (hb_would_apply_context_t *c) const bool would_apply (hb_would_apply_context_t *c) const
{ {
TRACE_WOULD_APPLY (this);
const ChainRuleSet &rule_set = this+ruleSet[(this+coverage).get_coverage (c->glyphs[0])]; const ChainRuleSet &rule_set = this+ruleSet[(this+coverage).get_coverage (c->glyphs[0])];
struct ChainContextApplyLookupContext lookup_context = { struct ChainContextApplyLookupContext lookup_context = {
{match_glyph}, {match_glyph},
{nullptr, nullptr, nullptr} {nullptr, nullptr, nullptr}
}; };
return_trace (rule_set.would_apply (c, lookup_context)); return rule_set.would_apply (c, lookup_context);
} }
const Coverage &get_coverage () const { return this+coverage; } const Coverage &get_coverage () const { return this+coverage; }
@@ -2204,13 +2209,13 @@ struct ChainContextFormat2
&lookahead_class_def} &lookahead_class_def}
}; };
unsigned int count = ruleSet.len; return
for (unsigned int i = 0; i < count; i++) + hb_enumerate (ruleSet)
if (input_class_def.intersects_class (glyphs, i) && | hb_map ([&] (const hb_pair_t<unsigned, const OffsetTo<ChainRuleSet> &> p)
(this+ruleSet[i]).intersects (glyphs, lookup_context)) { return input_class_def.intersects_class (glyphs, p.first) &&
return true; (this+p.second).intersects (glyphs, lookup_context); })
| hb_any
return false; ;
} }
void closure (hb_closure_context_t *c) const void closure (hb_closure_context_t *c) const
{ {
@@ -2228,12 +2233,15 @@ struct ChainContextFormat2
&lookahead_class_def} &lookahead_class_def}
}; };
unsigned int count = ruleSet.len; return
for (unsigned int i = 0; i < count; i++) + hb_enumerate (ruleSet)
if (input_class_def.intersects_class (c->glyphs, i)) { | hb_filter ([&] (unsigned _)
const ChainRuleSet &rule_set = this+ruleSet[i]; { return input_class_def.intersects_class (c->glyphs, _); },
rule_set.closure (c, lookup_context); hb_first)
} | hb_map (hb_second)
| hb_map (hb_add (this))
| hb_apply ([&] (const ChainRuleSet &_) { _.closure (c, lookup_context); })
;
} }
void collect_glyphs (hb_collect_glyphs_context_t *c) const void collect_glyphs (hb_collect_glyphs_context_t *c) const
@@ -2251,15 +2259,14 @@ struct ChainContextFormat2
&lookahead_class_def} &lookahead_class_def}
}; };
unsigned int count = ruleSet.len; + hb_iter (ruleSet)
for (unsigned int i = 0; i < count; i++) | hb_map (hb_add (this))
(this+ruleSet[i]).collect_glyphs (c, lookup_context); | hb_apply ([&] (const ChainRuleSet &_) { _.collect_glyphs (c, lookup_context); })
;
} }
bool would_apply (hb_would_apply_context_t *c) const bool would_apply (hb_would_apply_context_t *c) const
{ {
TRACE_WOULD_APPLY (this);
const ClassDef &backtrack_class_def = this+backtrackClassDef; const ClassDef &backtrack_class_def = this+backtrackClassDef;
const ClassDef &input_class_def = this+inputClassDef; const ClassDef &input_class_def = this+inputClassDef;
const ClassDef &lookahead_class_def = this+lookaheadClassDef; const ClassDef &lookahead_class_def = this+lookaheadClassDef;
@@ -2272,7 +2279,7 @@ struct ChainContextFormat2
&input_class_def, &input_class_def,
&lookahead_class_def} &lookahead_class_def}
}; };
return_trace (rule_set.would_apply (c, lookup_context)); return rule_set.would_apply (c, lookup_context);
} }
const Coverage &get_coverage () const { return this+coverage; } const Coverage &get_coverage () const { return this+coverage; }
@@ -2403,8 +2410,6 @@ struct ChainContextFormat3
bool would_apply (hb_would_apply_context_t *c) const bool would_apply (hb_would_apply_context_t *c) const
{ {
TRACE_WOULD_APPLY (this);
const OffsetArrayOf<Coverage> &input = StructAfter<OffsetArrayOf<Coverage>> (backtrack); const OffsetArrayOf<Coverage> &input = StructAfter<OffsetArrayOf<Coverage>> (backtrack);
const OffsetArrayOf<Coverage> &lookahead = StructAfter<OffsetArrayOf<Coverage>> (input); const OffsetArrayOf<Coverage> &lookahead = StructAfter<OffsetArrayOf<Coverage>> (input);
const ArrayOf<LookupRecord> &lookup = StructAfter<ArrayOf<LookupRecord>> (lookahead); const ArrayOf<LookupRecord> &lookup = StructAfter<ArrayOf<LookupRecord>> (lookahead);
@@ -2412,11 +2417,11 @@ struct ChainContextFormat3
{match_coverage}, {match_coverage},
{this, this, this} {this, this, this}
}; };
return_trace (chain_context_would_apply_lookup (c, return chain_context_would_apply_lookup (c,
backtrack.len, (const HBUINT16 *) backtrack.arrayZ, backtrack.len, (const HBUINT16 *) backtrack.arrayZ,
input.len, (const HBUINT16 *) input.arrayZ + 1, input.len, (const HBUINT16 *) input.arrayZ + 1,
lookahead.len, (const HBUINT16 *) lookahead.arrayZ, lookahead.len, (const HBUINT16 *) lookahead.arrayZ,
lookup.len, lookup.arrayZ, lookup_context)); lookup.len, lookup.arrayZ, lookup_context);
} }
const Coverage &get_coverage () const const Coverage &get_coverage () const
@@ -2489,15 +2494,15 @@ struct ChainContextFormat3
struct ChainContext struct ChainContext
{ {
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, u.format); TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) { switch (u.format) {
case 1: return_trace (c->dispatch (u.format1)); case 1: return_trace (c->dispatch (u.format1, hb_forward<Ts> (ds)...));
case 2: return_trace (c->dispatch (u.format2)); case 2: return_trace (c->dispatch (u.format2, hb_forward<Ts> (ds)...));
case 3: return_trace (c->dispatch (u.format3)); case 3: return_trace (c->dispatch (u.format3, hb_forward<Ts> (ds)...));
default:return_trace (c->default_return_value ()); default:return_trace (c->default_return_value ());
} }
} }
@@ -2519,18 +2524,14 @@ struct ExtensionFormat1
template <typename X> template <typename X>
const X& get_subtable () const const X& get_subtable () const
{ { return this + CastR<LOffsetTo<typename T::SubTable>> (extensionOffset); }
unsigned int offset = extensionOffset;
if (unlikely (!offset)) return Null(typename T::SubTable);
return StructAtOffset<typename T::SubTable> (this, offset);
}
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, format); TRACE_DISPATCH (this, format);
if (unlikely (!c->may_dispatch (this, this))) return_trace (c->no_dispatch_return_value ()); if (unlikely (!c->may_dispatch (this, this))) return_trace (c->no_dispatch_return_value ());
return_trace (get_subtable<typename T::SubTable> ().dispatch (c, get_type ())); return_trace (get_subtable<typename T::SubTable> ().dispatch (c, get_type (), hb_forward<Ts> (ds)...));
} }
/* This is called from may_dispatch() above with hb_sanitize_context_t. */ /* This is called from may_dispatch() above with hb_sanitize_context_t. */
@@ -2538,7 +2539,6 @@ struct ExtensionFormat1
{ {
TRACE_SANITIZE (this); TRACE_SANITIZE (this);
return_trace (c->check_struct (this) && return_trace (c->check_struct (this) &&
extensionOffset != 0 &&
extensionLookupType != T::SubTable::Extension); extensionLookupType != T::SubTable::Extension);
} }
@@ -2547,7 +2547,7 @@ struct ExtensionFormat1
HBUINT16 extensionLookupType; /* Lookup type of subtable referenced HBUINT16 extensionLookupType; /* Lookup type of subtable referenced
* by ExtensionOffset (i.e. the * by ExtensionOffset (i.e. the
* extension subtable). */ * extension subtable). */
HBUINT32 extensionOffset; /* Offset to the extension subtable, Offset32 extensionOffset; /* Offset to the extension subtable,
* of lookup type subtable. */ * of lookup type subtable. */
public: public:
DEFINE_SIZE_STATIC (8); DEFINE_SIZE_STATIC (8);
@@ -2572,13 +2572,13 @@ struct Extension
} }
} }
template <typename context_t> template <typename context_t, typename ...Ts>
typename context_t::return_t dispatch (context_t *c) const typename context_t::return_t dispatch (context_t *c, Ts&&... ds) const
{ {
TRACE_DISPATCH (this, u.format); TRACE_DISPATCH (this, u.format);
if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ()); if (unlikely (!c->may_dispatch (this, &u.format))) return_trace (c->no_dispatch_return_value ());
switch (u.format) { switch (u.format) {
case 1: return_trace (u.format1.dispatch (c)); case 1: return_trace (u.format1.dispatch (c, hb_forward<Ts> (ds)...));
default:return_trace (c->default_return_value ()); default:return_trace (c->default_return_value ());
} }
} }
@@ -2681,21 +2681,22 @@ struct GSUBGPOS
bool subset (hb_subset_context_t *c) const bool subset (hb_subset_context_t *c) const
{ {
TRACE_SUBSET (this); TRACE_SUBSET (this);
struct GSUBGPOS *out = c->serializer->embed (*this); auto *out = c->serializer->embed (*this);
if (unlikely (!out)) return_trace (false); if (unlikely (!out)) return_trace (false);
out->scriptList.serialize_subset (c, this+scriptList, out); out->scriptList.serialize_subset (c, scriptList, this, out);
out->featureList.serialize_subset (c, this+featureList, out); out->featureList.serialize_subset (c, featureList, this, out);
typedef OffsetListOf<TLookup> TLookupList; typedef OffsetListOf<TLookup> TLookupList;
/* TODO Use intersects() to count how many subtables survive? */ /* TODO Use intersects() to count how many subtables survive? */
CastR<OffsetTo<TLookupList>> (out->lookupList) CastR<OffsetTo<TLookupList>> (out->lookupList)
.serialize_subset (c, .serialize_subset (c,
this+CastR<const OffsetTo<TLookupList> > (lookupList), CastR<OffsetTo<TLookupList>> (lookupList),
this,
out); out);
if (version.to_int () >= 0x00010001u) if (version.to_int () >= 0x00010001u)
out->featureVars.serialize_subset (c, this+featureVars, out); out->featureVars.serialize_copy (c->serializer, featureVars, this, out);
return_trace (true); return_trace (true);
} }

File diff suppressed because it is too large Load Diff

View File

@@ -93,6 +93,17 @@ hb_ot_tags_to_script_and_language (hb_tag_t script_tag,
HB_EXTERN hb_bool_t HB_EXTERN hb_bool_t
hb_ot_layout_has_glyph_classes (hb_face_t *face); hb_ot_layout_has_glyph_classes (hb_face_t *face);
/**
* hb_ot_layout_glyph_class_t:
* @HB_OT_LAYOUT_GLYPH_CLASS_UNCLASSIFIED: Glyphs not matching the other classifications
* @HB_OT_LAYOUT_GLYPH_CLASS_BASE_GLYPH: Spacing, single characters, capable of accepting marks
* @HB_OT_LAYOUT_GLYPH_CLASS_LIGATURE: Glyphs that represent ligation of multiple characters
* @HB_OT_LAYOUT_GLYPH_CLASS_MARK: Non-spacing, combining glyphs that represent marks
* @HB_OT_LAYOUT_GLYPH_CLASS_COMPONENT: Spacing glyphs that represent part of a single character
*
* The GDEF classes defined for glyphs.
*
**/
typedef enum { typedef enum {
HB_OT_LAYOUT_GLYPH_CLASS_UNCLASSIFIED = 0, HB_OT_LAYOUT_GLYPH_CLASS_UNCLASSIFIED = 0,
HB_OT_LAYOUT_GLYPH_CLASS_BASE_GLYPH = 1, HB_OT_LAYOUT_GLYPH_CLASS_BASE_GLYPH = 1,

View File

@@ -188,12 +188,12 @@ hb_ot_map_builder_t::compile (hb_ot_map_t &m,
feature_infos[j].default_value = feature_infos[i].default_value; feature_infos[j].default_value = feature_infos[i].default_value;
} else { } else {
feature_infos[j].flags &= ~F_GLOBAL; feature_infos[j].flags &= ~F_GLOBAL;
feature_infos[j].max_value = MAX (feature_infos[j].max_value, feature_infos[i].max_value); feature_infos[j].max_value = hb_max (feature_infos[j].max_value, feature_infos[i].max_value);
/* Inherit default_value from j */ /* Inherit default_value from j */
} }
feature_infos[j].flags |= (feature_infos[i].flags & F_HAS_FALLBACK); feature_infos[j].flags |= (feature_infos[i].flags & F_HAS_FALLBACK);
feature_infos[j].stage[0] = MIN (feature_infos[j].stage[0], feature_infos[i].stage[0]); feature_infos[j].stage[0] = hb_min (feature_infos[j].stage[0], feature_infos[i].stage[0]);
feature_infos[j].stage[1] = MIN (feature_infos[j].stage[1], feature_infos[i].stage[1]); feature_infos[j].stage[1] = hb_min (feature_infos[j].stage[1], feature_infos[i].stage[1]);
} }
feature_infos.shrink (j + 1); feature_infos.shrink (j + 1);
} }
@@ -213,20 +213,20 @@ hb_ot_map_builder_t::compile (hb_ot_map_t &m,
bits_needed = 0; bits_needed = 0;
else else
/* Limit bits per feature. */ /* Limit bits per feature. */
bits_needed = MIN(HB_OT_MAP_MAX_BITS, hb_bit_storage (info->max_value)); bits_needed = hb_min (HB_OT_MAP_MAX_BITS, hb_bit_storage (info->max_value));
if (!info->max_value || next_bit + bits_needed > 8 * sizeof (hb_mask_t)) if (!info->max_value || next_bit + bits_needed > 8 * sizeof (hb_mask_t))
continue; /* Feature disabled, or not enough bits. */ continue; /* Feature disabled, or not enough bits. */
hb_bool_t found = false; bool found = false;
unsigned int feature_index[2]; unsigned int feature_index[2];
for (unsigned int table_index = 0; table_index < 2; table_index++) for (unsigned int table_index = 0; table_index < 2; table_index++)
{ {
if (required_feature_tag[table_index] == info->tag) if (required_feature_tag[table_index] == info->tag)
required_feature_stage[table_index] = info->stage[table_index]; required_feature_stage[table_index] = info->stage[table_index];
found |= hb_ot_layout_language_find_feature (face, found |= (bool) hb_ot_layout_language_find_feature (face,
table_tags[table_index], table_tags[table_index],
script_index[table_index], script_index[table_index],
language_index[table_index], language_index[table_index],
@@ -237,7 +237,7 @@ hb_ot_map_builder_t::compile (hb_ot_map_t &m,
{ {
for (unsigned int table_index = 0; table_index < 2; table_index++) for (unsigned int table_index = 0; table_index < 2; table_index++)
{ {
found |= hb_ot_layout_table_find_feature (face, found |= (bool) hb_ot_layout_table_find_feature (face,
table_tags[table_index], table_tags[table_index],
info->tag, info->tag,
&feature_index[table_index]); &feature_index[table_index]);

View File

@@ -68,7 +68,7 @@ struct hb_ot_map_t
unsigned short random : 1; unsigned short random : 1;
hb_mask_t mask; hb_mask_t mask;
static int cmp (const void *pa, const void *pb) HB_INTERNAL static int cmp (const void *pa, const void *pb)
{ {
const lookup_map_t *a = (const lookup_map_t *) pa; const lookup_map_t *a = (const lookup_map_t *) pa;
const lookup_map_t *b = (const lookup_map_t *) pb; const lookup_map_t *b = (const lookup_map_t *) pb;
@@ -167,7 +167,7 @@ struct hb_ot_map_t
hb_mask_t global_mask; hb_mask_t global_mask;
hb_vector_t<feature_map_t> features; hb_sorted_vector_t<feature_map_t> features;
hb_vector_t<lookup_map_t> lookups[2]; /* GSUB/GPOS */ hb_vector_t<lookup_map_t> lookups[2]; /* GSUB/GPOS */
hb_vector_t<stage_map_t> stages[2]; /* GSUB/GPOS */ hb_vector_t<stage_map_t> stages[2]; /* GSUB/GPOS */
}; };
@@ -247,7 +247,7 @@ struct hb_ot_map_builder_t
unsigned int default_value; /* for non-global features, what should the unset glyphs take */ unsigned int default_value; /* for non-global features, what should the unset glyphs take */
unsigned int stage[2]; /* GSUB/GPOS */ unsigned int stage[2]; /* GSUB/GPOS */
static int cmp (const void *pa, const void *pb) HB_INTERNAL static int cmp (const void *pa, const void *pb)
{ {
const feature_info_t *a = (const feature_info_t *) pa; const feature_info_t *a = (const feature_info_t *) pa;
const feature_info_t *b = (const feature_info_t *) pb; const feature_info_t *b = (const feature_info_t *) pb;

View File

@@ -462,7 +462,7 @@ struct MathGlyphPartRecord
out.end_connector_length = font->em_scale (endConnectorLength, scale); out.end_connector_length = font->em_scale (endConnectorLength, scale);
out.full_advance = font->em_scale (fullAdvance, scale); out.full_advance = font->em_scale (fullAdvance, scale);
static_assert ((unsigned int) HB_MATH_GLYPH_PART_FLAG_EXTENDER == static_assert ((unsigned int) HB_OT_MATH_GLYPH_PART_FLAG_EXTENDER ==
(unsigned int) PartFlags::Extender, ""); (unsigned int) PartFlags::Extender, "");
out.flags = (hb_ot_math_glyph_part_flags_t) out.flags = (hb_ot_math_glyph_part_flags_t)

View File

@@ -37,6 +37,11 @@
* @include: hb-ot.h * @include: hb-ot.h
* *
* Functions for fetching mathematics layout data from OpenType fonts. * Functions for fetching mathematics layout data from OpenType fonts.
*
* HarfBuzz itself does not implement a math layout solution. The
* functions and types provided can be used by client programs to access
* the font data necessary for typesetting OpenType Math layout.
*
**/ **/
@@ -48,31 +53,36 @@
* hb_ot_math_has_data: * hb_ot_math_has_data:
* @face: #hb_face_t to test * @face: #hb_face_t to test
* *
* This function allows to verify the presence of an OpenType MATH table on the * Tests whether a face has a `MATH` table.
* face.
* *
* Return value: true if face has a MATH table, false otherwise * Return value: true if the table is found, false otherwise
* *
* Since: 1.3.3 * Since: 1.3.3
**/ **/
hb_bool_t hb_bool_t
hb_ot_math_has_data (hb_face_t *face) hb_ot_math_has_data (hb_face_t *face)
{ {
#ifdef HB_NO_MATH
return false;
#endif
return face->table.MATH->has_data (); return face->table.MATH->has_data ();
} }
/** /**
* hb_ot_math_get_constant: * hb_ot_math_get_constant:
* @font: #hb_font_t from which to retrieve the value * @font: #hb_font_t to work upon
* @constant: #hb_ot_math_constant_t the constant to retrieve * @constant: #hb_ot_math_constant_t the constant to retrieve
* *
* This function returns the requested math constants as a #hb_position_t. * Fetches the specified math constant. For most constants, the value returned
* If the request constant is HB_OT_MATH_CONSTANT_SCRIPT_PERCENT_SCALE_DOWN, * is an #hb_position_t.
* HB_OT_MATH_CONSTANT_SCRIPT_SCRIPT_PERCENT_SCALE_DOWN or
* HB_OT_MATH_CONSTANT_SCRIPT_PERCENT_SCALE_DOWN then the return value is
* actually an integer between 0 and 100 representing that percentage.
* *
* Return value: the requested constant or 0 * However, if the requested constant is #HB_OT_MATH_CONSTANT_SCRIPT_PERCENT_SCALE_DOWN,
* #HB_OT_MATH_CONSTANT_SCRIPT_SCRIPT_PERCENT_SCALE_DOWN or
* #HB_OT_MATH_CONSTANT_SCRIPT_PERCENT_SCALE_DOWN, then the return value is
* an integer between 0 and 100 representing that percentage.
*
* Return value: the requested constant or zero
* *
* Since: 1.3.3 * Since: 1.3.3
**/ **/
@@ -80,15 +90,22 @@ hb_position_t
hb_ot_math_get_constant (hb_font_t *font, hb_ot_math_get_constant (hb_font_t *font,
hb_ot_math_constant_t constant) hb_ot_math_constant_t constant)
{ {
#ifdef HB_NO_MATH
return 0;
#endif
return font->face->table.MATH->get_constant(constant, font); return font->face->table.MATH->get_constant(constant, font);
} }
/** /**
* hb_ot_math_get_glyph_italics_correction: * hb_ot_math_get_glyph_italics_correction:
* @font: #hb_font_t from which to retrieve the value * @font: #hb_font_t to work upon
* @glyph: glyph index from which to retrieve the value * @glyph: The glyph index from which to retrieve the value
* *
* Return value: the italics correction of the glyph or 0 * Fetches an italics-correction value (if one exists) for the specified
* glyph index.
*
* Return value: the italics correction of the glyph or zero
* *
* Since: 1.3.3 * Since: 1.3.3
**/ **/
@@ -96,15 +113,29 @@ hb_position_t
hb_ot_math_get_glyph_italics_correction (hb_font_t *font, hb_ot_math_get_glyph_italics_correction (hb_font_t *font,
hb_codepoint_t glyph) hb_codepoint_t glyph)
{ {
#ifdef HB_NO_MATH
return 0;
#endif
return font->face->table.MATH->get_glyph_info().get_italics_correction (glyph, font); return font->face->table.MATH->get_glyph_info().get_italics_correction (glyph, font);
} }
/** /**
* hb_ot_math_get_glyph_top_accent_attachment: * hb_ot_math_get_glyph_top_accent_attachment:
* @font: #hb_font_t from which to retrieve the value * @font: #hb_font_t to work upon
* @glyph: glyph index from which to retrieve the value * @glyph: The glyph index from which to retrieve the value
* *
* Return value: the top accent attachment of the glyph or 0 * Fetches a top-accent-attachment value (if one exists) for the specified
* glyph index.
*
* For any glyph that does not have a top-accent-attachment value - that is,
* a glyph not covered by the `MathTopAccentAttachment` table (or, when
* @font has no `MathTopAccentAttachment` table or no `MATH` table, any
* glyph) - the function synthesizes a value, returning the position at
* one-half the glyph's advance width.
*
* Return value: the top accent attachment of the glyph or 0.5 * the advance
* width of @glyph
* *
* Since: 1.3.3 * Since: 1.3.3
**/ **/
@@ -112,13 +143,19 @@ hb_position_t
hb_ot_math_get_glyph_top_accent_attachment (hb_font_t *font, hb_ot_math_get_glyph_top_accent_attachment (hb_font_t *font,
hb_codepoint_t glyph) hb_codepoint_t glyph)
{ {
#ifdef HB_NO_MATH
return 0;
#endif
return font->face->table.MATH->get_glyph_info().get_top_accent_attachment (glyph, font); return font->face->table.MATH->get_glyph_info().get_top_accent_attachment (glyph, font);
} }
/** /**
* hb_ot_math_is_glyph_extended_shape: * hb_ot_math_is_glyph_extended_shape:
* @face: a #hb_face_t to test * @face: #hb_face_t to work upon
* @glyph: a glyph index to test * @glyph: The glyph index to test
*
* Tests whether the given glyph index is an extended shape in the face.
* *
* Return value: true if the glyph is an extended shape, false otherwise * Return value: true if the glyph is an extended shape, false otherwise
* *
@@ -128,23 +165,29 @@ hb_bool_t
hb_ot_math_is_glyph_extended_shape (hb_face_t *face, hb_ot_math_is_glyph_extended_shape (hb_face_t *face,
hb_codepoint_t glyph) hb_codepoint_t glyph)
{ {
#ifdef HB_NO_MATH
return false;
#endif
return face->table.MATH->get_glyph_info().is_extended_shape (glyph); return face->table.MATH->get_glyph_info().is_extended_shape (glyph);
} }
/** /**
* hb_ot_math_get_glyph_kerning: * hb_ot_math_get_glyph_kerning:
* @font: #hb_font_t from which to retrieve the value * @font: #hb_font_t to work upon
* @glyph: glyph index from which to retrieve the value * @glyph: The glyph index from which to retrieve the value
* @kern: the #hb_ot_math_kern_t from which to retrieve the value * @kern: The #hb_ot_math_kern_t from which to retrieve the value
* @correction_height: the correction height to use to determine the kerning. * @correction_height: the correction height to use to determine the kerning.
* *
* This function tries to retrieve the MathKern table for the specified font, * Fetches the math kerning (cut-ins) value for the specified font, glyph index, and
* glyph and #hb_ot_math_kern_t. Then it browses the list of heights from the * @kern.
* MathKern table to find one value that is greater or equal to specified
* correction_height. If one is found the corresponding value from the list of
* kerns is returned and otherwise the last kern value is returned.
* *
* Return value: requested kerning or 0 * If the MathKern table is found, the function examines it to find a height
* value that is greater or equal to @correction_height. If such a height
* value is found, corresponding kerning value from the table is returned. If
* no such height value is found, the last kerning value is returned.
*
* Return value: requested kerning value or zero
* *
* Since: 1.3.3 * Since: 1.3.3
**/ **/
@@ -154,6 +197,10 @@ hb_ot_math_get_glyph_kerning (hb_font_t *font,
hb_ot_math_kern_t kern, hb_ot_math_kern_t kern,
hb_position_t correction_height) hb_position_t correction_height)
{ {
#ifdef HB_NO_MATH
return 0;
#endif
return font->face->table.MATH->get_glyph_info().get_kerning (glyph, return font->face->table.MATH->get_glyph_info().get_kerning (glyph,
kern, kern,
correction_height, correction_height,
@@ -162,20 +209,24 @@ hb_ot_math_get_glyph_kerning (hb_font_t *font,
/** /**
* hb_ot_math_get_glyph_variants: * hb_ot_math_get_glyph_variants:
* @font: #hb_font_t from which to retrieve the values * @font: #hb_font_t to work upon
* @glyph: index of the glyph to stretch * @glyph: The index of the glyph to stretch
* @direction: direction of the stretching * @direction: The direction of the stretching (horizontal or vertical)
* @start_offset: offset of the first variant to retrieve * @start_offset: offset of the first variant to retrieve
* @variants_count: maximum number of variants to retrieve after start_offset * @variants_count: (inout): Input = the maximum number of variants to return;
* (IN) and actual number of variants retrieved (OUT) * Output = the actual number of variants returned
* @variants: array of size at least @variants_count to store the result * @variants: (out) (array length=variants_count): array of variants returned
* *
* This function tries to retrieve the MathGlyphConstruction for the specified * Fetches the MathGlyphConstruction for the specified font, glyph index, and
* font, glyph and direction. Note that only the value of * direction. The corresponding list of size variants is returned as a list of
* #HB_DIRECTION_IS_HORIZONTAL is considered. It provides the corresponding list * #hb_ot_math_glyph_variant_t structs.
* of size variants as an array of hb_ot_math_glyph_variant_t structs.
* *
* Return value: the total number of size variants available or 0 * <note>The @direction parameter is only used to select between horizontal
* or vertical directions for the construction. Even though all #hb_direction_t
* values are accepted, only the result of #HB_DIRECTION_IS_HORIZONTAL is
* considered.</note>
*
* Return value: the total number of size variants available or zero
* *
* Since: 1.3.3 * Since: 1.3.3
**/ **/
@@ -187,6 +238,12 @@ hb_ot_math_get_glyph_variants (hb_font_t *font,
unsigned int *variants_count, /* IN/OUT */ unsigned int *variants_count, /* IN/OUT */
hb_ot_math_glyph_variant_t *variants /* OUT */) hb_ot_math_glyph_variant_t *variants /* OUT */)
{ {
#ifdef HB_NO_MATH
if (variants_count)
*variants_count = 0;
return 0;
#endif
return font->face->table.MATH->get_variants().get_glyph_variants (glyph, direction, font, return font->face->table.MATH->get_variants().get_glyph_variants (glyph, direction, font,
start_offset, start_offset,
variants_count, variants_count,
@@ -195,15 +252,19 @@ hb_ot_math_get_glyph_variants (hb_font_t *font,
/** /**
* hb_ot_math_get_min_connector_overlap: * hb_ot_math_get_min_connector_overlap:
* @font: #hb_font_t from which to retrieve the value * @font: #hb_font_t to work upon
* @direction: direction of the stretching * @direction: direction of the stretching (horizontal or vertical)
* *
* This function tries to retrieve the MathVariants table for the specified * Fetches the MathVariants table for the specified font and returns the
* font and returns the minimum overlap of connecting glyphs to draw a glyph * minimum overlap of connecting glyphs that are required to draw a glyph
* assembly in the specified direction. Note that only the value of * assembly in the specified direction.
* #HB_DIRECTION_IS_HORIZONTAL is considered.
* *
* Return value: requested min connector overlap or 0 * <note>The @direction parameter is only used to select between horizontal
* or vertical directions for the construction. Even though all #hb_direction_t
* values are accepted, only the result of #HB_DIRECTION_IS_HORIZONTAL is
* considered.</note>
*
* Return value: requested minimum connector overlap or zero
* *
* Since: 1.3.3 * Since: 1.3.3
**/ **/
@@ -211,24 +272,33 @@ hb_position_t
hb_ot_math_get_min_connector_overlap (hb_font_t *font, hb_ot_math_get_min_connector_overlap (hb_font_t *font,
hb_direction_t direction) hb_direction_t direction)
{ {
#ifdef HB_NO_MATH
return 0;
#endif
return font->face->table.MATH->get_variants().get_min_connector_overlap (direction, font); return font->face->table.MATH->get_variants().get_min_connector_overlap (direction, font);
} }
/** /**
* hb_ot_math_get_glyph_assembly: * hb_ot_math_get_glyph_assembly:
* @font: #hb_font_t from which to retrieve the values * @font: #hb_font_t to work upon
* @glyph: index of the glyph to stretch * @glyph: The index of the glyph to stretch
* @direction: direction of the stretching * @direction: direction of the stretching (horizontal or vertical)
* @start_offset: offset of the first glyph part to retrieve * @start_offset: offset of the first glyph part to retrieve
* @parts_count: maximum number of glyph parts to retrieve after start_offset * @parts_count: (inout): Input = maximum number of glyph parts to return;
* (IN) and actual number of parts retrieved (OUT) * Output = actual number of parts returned
* @parts: array of size at least @parts_count to store the result * @parts: (out) (array length=parts_count): the glyph parts returned
* @italics_correction: italic correction of the glyph assembly * @italics_correction: (out): italics correction of the glyph assembly
* *
* This function tries to retrieve the GlyphAssembly for the specified font, * Fetches the GlyphAssembly for the specified font, glyph index, and direction.
* glyph and direction. Note that only the value of #HB_DIRECTION_IS_HORIZONTAL * Returned are a list of #hb_ot_math_glyph_part_t glyph parts that can be
* is considered. It provides the information necessary to draw the glyph * used to draw the glyph and an italics-correction value (if one is defined
* assembly as an array of #hb_ot_math_glyph_part_t. * in the font).
*
* <note>The @direction parameter is only used to select between horizontal
* or vertical directions for the construction. Even though all #hb_direction_t
* values are accepted, only the result of #HB_DIRECTION_IS_HORIZONTAL is
* considered.</note>
* *
* Return value: the total number of parts in the glyph assembly * Return value: the total number of parts in the glyph assembly
* *
@@ -243,6 +313,12 @@ hb_ot_math_get_glyph_assembly (hb_font_t *font,
hb_ot_math_glyph_part_t *parts, /* OUT */ hb_ot_math_glyph_part_t *parts, /* OUT */
hb_position_t *italics_correction /* OUT */) hb_position_t *italics_correction /* OUT */)
{ {
#ifdef HB_NO_MATH
if (parts_count)
*parts_count = 0;
return 0;
#endif
return font->face->table.MATH->get_variants().get_glyph_parts (glyph, return font->face->table.MATH->get_variants().get_glyph_parts (glyph,
direction, direction,
font, font,

View File

@@ -50,6 +50,9 @@ HB_BEGIN_DECLS
/** /**
* hb_ot_math_constant_t: * hb_ot_math_constant_t:
* *
* The 'MATH' table constants specified at
* https://docs.microsoft.com/en-us/typography/opentype/spec/math
*
* Since: 1.3.3 * Since: 1.3.3
*/ */
typedef enum { typedef enum {
@@ -114,6 +117,9 @@ typedef enum {
/** /**
* hb_ot_math_kern_t: * hb_ot_math_kern_t:
* *
* The math kerning-table types defined for the four corners
* of a glyph.
*
* Since: 1.3.3 * Since: 1.3.3
*/ */
typedef enum { typedef enum {
@@ -125,6 +131,10 @@ typedef enum {
/** /**
* hb_ot_math_glyph_variant_t: * hb_ot_math_glyph_variant_t:
* @glyph: The glyph index of the variant
* @advance: The advance width of the variant
*
* Data type to hold math-variant information for a glyph.
* *
* Since: 1.3.3 * Since: 1.3.3
*/ */
@@ -136,14 +146,25 @@ typedef struct hb_ot_math_glyph_variant_t {
/** /**
* hb_ot_math_glyph_part_flags_t: * hb_ot_math_glyph_part_flags_t:
* *
* Flags for math glyph parts.
*
* Since: 1.3.3 * Since: 1.3.3
*/ */
typedef enum { /*< flags >*/ typedef enum { /*< flags >*/
HB_MATH_GLYPH_PART_FLAG_EXTENDER = 0x00000001u /* Extender glyph */ HB_OT_MATH_GLYPH_PART_FLAG_EXTENDER = 0x00000001u /* Extender glyph */
} hb_ot_math_glyph_part_flags_t; } hb_ot_math_glyph_part_flags_t;
/** /**
* hb_ot_math_glyph_part_t: * hb_ot_math_glyph_part_t:
* @glyph: The glyph index of the variant part
* @start_connector_length: The length of the connector on the starting side of the variant part
* @end_connection_length: The length of the connector on the ending side of the variant part
* @full_advance: The total advance of the part
* @flags: #hb_ot_math_glyph_part_flags_t flags for the part
*
* Data type to hold information for a "part" component of a math-variant glyph.
* Large variants for stretchable math glyphs (such as parentheses) can be constructed
* on the fly from parts.
* *
* Since: 1.3.3 * Since: 1.3.3
*/ */

View File

@@ -77,7 +77,7 @@ struct maxp
void set_num_glyphs (unsigned int count) void set_num_glyphs (unsigned int count)
{ {
numGlyphs.set (count); numGlyphs = count;
} }
bool sanitize (hb_sanitize_context_t *c) const bool sanitize (hb_sanitize_context_t *c) const
@@ -119,13 +119,13 @@ struct maxp
if (maxp_prime->version.major == 1) if (maxp_prime->version.major == 1)
{ {
maxpV1Tail &v1 = StructAfter<maxpV1Tail> (*maxp_prime); maxpV1Tail &v1 = StructAfter<maxpV1Tail> (*maxp_prime);
v1.maxZones.set (1); v1.maxZones = 1;
v1.maxTwilightPoints.set (0); v1.maxTwilightPoints = 0;
v1.maxStorage.set (0); v1.maxStorage = 0;
v1.maxFunctionDefs.set (0); v1.maxFunctionDefs = 0;
v1.maxInstructionDefs.set (0); v1.maxInstructionDefs = 0;
v1.maxStackElements.set (0); v1.maxStackElements = 0;
v1.maxSizeOfInstructions.set (0); v1.maxSizeOfInstructions = 0;
} }
} }

View File

@@ -24,6 +24,9 @@
* Google Author(s): Behdad Esfahbod * Google Author(s): Behdad Esfahbod
*/ */
#ifndef HB_OT_NAME_LANGUAGE_STATIC_HH
#define HB_OT_NAME_LANGUAGE_STATIC_HH
#include "hb-ot-name-language.hh" #include "hb-ot-name-language.hh"
/* Following two tables were generated by joining FreeType, FontConfig, /* Following two tables were generated by joining FreeType, FontConfig,
@@ -427,6 +430,9 @@ _hb_ot_name_language_for (unsigned int code,
const hb_ot_language_map_t *array, const hb_ot_language_map_t *array,
unsigned int len) unsigned int len)
{ {
#ifdef HB_NO_OT_NAME_LANGUAGE
return HB_LANGUAGE_INVALID;
#endif
const hb_ot_language_map_t *entry = (const hb_ot_language_map_t *) const hb_ot_language_map_t *entry = (const hb_ot_language_map_t *)
hb_bsearch (&code, hb_bsearch (&code,
array, array,
@@ -455,3 +461,5 @@ _hb_ot_name_language_for_mac_code (unsigned int code)
hb_mac_language_map, hb_mac_language_map,
ARRAY_LENGTH (hb_mac_language_map)); ARRAY_LENGTH (hb_mac_language_map));
} }
#endif /* HB_OT_NAME_LANGUAGE_STATIC_HH */

View File

@@ -51,6 +51,7 @@ struct NameRecord
{ {
hb_language_t language (hb_face_t *face) const hb_language_t language (hb_face_t *face) const
{ {
#ifndef HB_NO_OT_NAME_LANGUAGE
unsigned int p = platformID; unsigned int p = platformID;
unsigned int l = languageID; unsigned int l = languageID;
@@ -60,9 +61,12 @@ struct NameRecord
if (p == 1) if (p == 1)
return _hb_ot_name_language_for_mac_code (l); return _hb_ot_name_language_for_mac_code (l);
#ifndef HB_NO_OT_NAME_LANGUAGE_AAT
if (p == 0) if (p == 0)
return _hb_aat_language_get (face, l); return _hb_aat_language_get (face, l);
#endif
#endif
return HB_LANGUAGE_INVALID; return HB_LANGUAGE_INVALID;
} }
@@ -93,11 +97,21 @@ struct NameRecord
return UNSUPPORTED; return UNSUPPORTED;
} }
NameRecord* copy (hb_serialize_context_t *c,
const void *src_base,
const void *dst_base) const
{
TRACE_SERIALIZE (this);
auto *out = c->embed (this);
if (unlikely (!out)) return_trace (nullptr);
out->offset.serialize_copy (c, offset, src_base, dst_base, length);
return_trace (out);
}
bool sanitize (hb_sanitize_context_t *c, const void *base) const bool sanitize (hb_sanitize_context_t *c, const void *base) const
{ {
TRACE_SANITIZE (this); TRACE_SANITIZE (this);
/* We can check from base all the way up to the end of string... */ return_trace (c->check_struct (this) && offset.sanitize (c, base, length));
return_trace (c->check_struct (this) && c->check_range ((char *) base, (unsigned int) length + offset));
} }
HBUINT16 platformID; /* Platform ID. */ HBUINT16 platformID; /* Platform ID. */
@@ -105,7 +119,8 @@ struct NameRecord
HBUINT16 languageID; /* Language ID. */ HBUINT16 languageID; /* Language ID. */
HBUINT16 nameID; /* Name ID. */ HBUINT16 nameID; /* Name ID. */
HBUINT16 length; /* String length (in bytes). */ HBUINT16 length; /* String length (in bytes). */
HBUINT16 offset; /* String offset from start of storage area (in bytes). */ NNOffsetTo<UnsizedArrayOf<HBUINT8>>
offset; /* String offset from start of storage area (in bytes). */
public: public:
DEFINE_SIZE_STATIC (12); DEFINE_SIZE_STATIC (12);
}; };
@@ -156,15 +171,58 @@ struct name
unsigned int get_size () const unsigned int get_size () const
{ return min_size + count * nameRecordZ.item_size; } { return min_size + count * nameRecordZ.item_size; }
template <typename Iterator,
hb_requires (hb_is_source_of (Iterator, const NameRecord &))>
bool serialize (hb_serialize_context_t *c,
Iterator it,
const void *src_string_pool)
{
TRACE_SERIALIZE (this);
if (unlikely (!c->extend_min ((*this)))) return_trace (false);
this->format = 0;
this->count = it.len ();
auto snap = c->snapshot ();
this->nameRecordZ.serialize (c, this->count);
if (unlikely (!c->check_assign (this->stringOffset, c->length ()))) return_trace (false);
c->revert (snap);
const void *dst_string_pool = &(this + this->stringOffset);
+ it
| hb_apply ([=] (const NameRecord& _) { c->copy (_, src_string_pool, dst_string_pool); })
;
if (unlikely (c->ran_out_of_room)) return_trace (false);
assert (this->stringOffset == c->length ());
return_trace (true);
}
bool subset (hb_subset_context_t *c) const
{
TRACE_SUBSET (this);
name *name_prime = c->serializer->start_embed<name> ();
if (unlikely (!name_prime)) return_trace (false);
auto it =
+ nameRecordZ.as_array (count)
| hb_filter (c->plan->name_ids, &NameRecord::nameID)
;
name_prime->serialize (c->serializer, it, hb_addressof (this + stringOffset));
return_trace (name_prime->count);
}
bool sanitize_records (hb_sanitize_context_t *c) const bool sanitize_records (hb_sanitize_context_t *c) const
{ {
TRACE_SANITIZE (this); TRACE_SANITIZE (this);
const void *string_pool = (this+stringOffset).arrayZ; const void *string_pool = (this+stringOffset).arrayZ;
unsigned int _count = count; return_trace (nameRecordZ.sanitize (c, count, string_pool));
/* Move to run-time?! */
for (unsigned int i = 0; i < _count; i++)
if (!nameRecordZ[i].sanitize (c, string_pool)) return_trace (false);
return_trace (true);
} }
bool sanitize (hb_sanitize_context_t *c) const bool sanitize (hb_sanitize_context_t *c) const
@@ -173,7 +231,8 @@ struct name
return_trace (c->check_struct (this) && return_trace (c->check_struct (this) &&
likely (format == 0 || format == 1) && likely (format == 0 || format == 1) &&
c->check_array (nameRecordZ.arrayZ, count) && c->check_array (nameRecordZ.arrayZ, count) &&
c->check_range (this, stringOffset)); c->check_range (this, stringOffset) &&
sanitize_records (c));
} }
struct accelerator_t struct accelerator_t

View File

@@ -58,6 +58,11 @@ const hb_ot_name_entry_t *
hb_ot_name_list_names (hb_face_t *face, hb_ot_name_list_names (hb_face_t *face,
unsigned int *num_entries /* OUT */) unsigned int *num_entries /* OUT */)
{ {
#ifdef HB_NO_NAME
if (num_entries)
*num_entries = 0;
return 0;
#endif
const OT::name_accelerator_t &name = *face->table.name; const OT::name_accelerator_t &name = *face->table.name;
if (num_entries) *num_entries = name.names.length; if (num_entries) *num_entries = name.names.length;
return (const hb_ot_name_entry_t *) name.names; return (const hb_ot_name_entry_t *) name.names;
@@ -93,7 +98,7 @@ hb_ot_name_convert_utf (hb_bytes_t bytes,
dst = dst_next; dst = dst_next;
src = src_next; src = src_next;
}; }
*text_size = dst - text; *text_size = dst - text;
*dst = 0; /* NUL-terminate. */ *dst = 0; /* NUL-terminate. */
@@ -105,7 +110,7 @@ hb_ot_name_convert_utf (hb_bytes_t bytes,
{ {
src = in_utf_t::next (src, src_end, &unicode, replacement); src = in_utf_t::next (src, src_end, &unicode, replacement);
dst_len += out_utf_t::encode_len (unicode); dst_len += out_utf_t::encode_len (unicode);
}; }
return dst_len; return dst_len;
} }
@@ -167,6 +172,11 @@ hb_ot_name_get_utf8 (hb_face_t *face,
unsigned int *text_size /* IN/OUT */, unsigned int *text_size /* IN/OUT */,
char *text /* OUT */) char *text /* OUT */)
{ {
#ifdef HB_NO_NAME
if (text_size)
*text_size = 0;
return 0;
#endif
return hb_ot_name_get_utf<hb_utf8_t> (face, name_id, language, text_size, return hb_ot_name_get_utf<hb_utf8_t> (face, name_id, language, text_size,
(hb_utf8_t::codepoint_t *) text); (hb_utf8_t::codepoint_t *) text);
} }
@@ -194,6 +204,11 @@ hb_ot_name_get_utf16 (hb_face_t *face,
unsigned int *text_size /* IN/OUT */, unsigned int *text_size /* IN/OUT */,
uint16_t *text /* OUT */) uint16_t *text /* OUT */)
{ {
#ifdef HB_NO_NAME
if (text_size)
*text_size = 0;
return 0;
#endif
return hb_ot_name_get_utf<hb_utf16_t> (face, name_id, language, text_size, text); return hb_ot_name_get_utf<hb_utf16_t> (face, name_id, language, text_size, text);
} }
@@ -220,5 +235,10 @@ hb_ot_name_get_utf32 (hb_face_t *face,
unsigned int *text_size /* IN/OUT */, unsigned int *text_size /* IN/OUT */,
uint32_t *text /* OUT */) uint32_t *text /* OUT */)
{ {
#ifdef HB_NO_NAME
if (text_size)
*text_size = 0;
return 0;
#endif
return hb_ot_name_get_utf<hb_utf32_t> (face, name_id, language, text_size, text); return hb_ot_name_get_utf<hb_utf32_t> (face, name_id, language, text_size, text);
} }

View File

@@ -160,8 +160,8 @@ struct OS2
uint16_t min_cp, max_cp; uint16_t min_cp, max_cp;
find_min_and_max_codepoint (plan->unicodes, &min_cp, &max_cp); find_min_and_max_codepoint (plan->unicodes, &min_cp, &max_cp);
os2_prime->usFirstCharIndex.set (min_cp); os2_prime->usFirstCharIndex = min_cp;
os2_prime->usLastCharIndex.set (max_cp); os2_prime->usLastCharIndex = max_cp;
_update_unicode_ranges (plan->unicodes, os2_prime->ulUnicodeRange); _update_unicode_ranges (plan->unicodes, os2_prime->ulUnicodeRange);
bool result = plan->add_table (HB_OT_TAG_OS2, os2_prime_blob); bool result = plan->add_table (HB_OT_TAG_OS2, os2_prime_blob);
@@ -174,7 +174,7 @@ struct OS2
HBUINT32 ulUnicodeRange[4]) const HBUINT32 ulUnicodeRange[4]) const
{ {
for (unsigned int i = 0; i < 4; i++) for (unsigned int i = 0; i < 4; i++)
ulUnicodeRange[i].set (0); ulUnicodeRange[i] = 0;
hb_codepoint_t cp = HB_SET_VALUE_INVALID; hb_codepoint_t cp = HB_SET_VALUE_INVALID;
while (codepoints->next (&cp)) { while (codepoints->next (&cp)) {
@@ -184,14 +184,14 @@ struct OS2
unsigned int block = bit / 32; unsigned int block = bit / 32;
unsigned int bit_in_block = bit % 32; unsigned int bit_in_block = bit % 32;
unsigned int mask = 1 << bit_in_block; unsigned int mask = 1 << bit_in_block;
ulUnicodeRange[block].set (ulUnicodeRange[block] | mask); ulUnicodeRange[block] = ulUnicodeRange[block] | mask;
} }
if (cp >= 0x10000 && cp <= 0x110000) if (cp >= 0x10000 && cp <= 0x110000)
{ {
/* the spec says that bit 57 ("Non Plane 0") implies that there's /* the spec says that bit 57 ("Non Plane 0") implies that there's
at least one codepoint beyond the BMP; so I also include all at least one codepoint beyond the BMP; so I also include all
the non-BMP codepoints here */ the non-BMP codepoints here */
ulUnicodeRange[1].set (ulUnicodeRange[1] | (1 << 25)); ulUnicodeRange[1] = ulUnicodeRange[1] | (1 << 25);
} }
} }
} }

View File

@@ -88,7 +88,7 @@ struct post
return false; return false;
} }
post_prime->version.major.set (3); // Version 3 does not have any glyph names. post_prime->version.major = 3; // Version 3 does not have any glyph names.
bool result = plan->add_table (HB_OT_TAG_post, post_prime_blob); bool result = plan->add_table (HB_OT_TAG_post, post_prime_blob);
hb_blob_destroy (post_prime_blob); hb_blob_destroy (post_prime_blob);
@@ -131,7 +131,7 @@ struct post
hb_bytes_t s = find_glyph_name (glyph); hb_bytes_t s = find_glyph_name (glyph);
if (!s.length) return false; if (!s.length) return false;
if (!buf_len) return true; if (!buf_len) return true;
unsigned int len = MIN (buf_len - 1, s.length); unsigned int len = hb_min (buf_len - 1, s.length);
strncpy (buf, s.arrayZ, len); strncpy (buf, s.arrayZ, len);
buf[len] = '\0'; buf[len] = '\0';
return true; return true;

View File

@@ -66,8 +66,8 @@ arabic_fallback_synthesize_lookup_single (const hb_ot_shape_plan_t *plan HB_UNUS
u_glyph > 0xFFFFu || s_glyph > 0xFFFFu) u_glyph > 0xFFFFu || s_glyph > 0xFFFFu)
continue; continue;
glyphs[num_glyphs].set (u_glyph); glyphs[num_glyphs] = u_glyph;
substitutes[num_glyphs].set (s_glyph); substitutes[num_glyphs] = s_glyph;
num_glyphs++; num_glyphs++;
} }
@@ -77,7 +77,9 @@ arabic_fallback_synthesize_lookup_single (const hb_ot_shape_plan_t *plan HB_UNUS
/* Bubble-sort or something equally good! /* Bubble-sort or something equally good!
* May not be good-enough for presidential candidate interviews, but good-enough for us... */ * May not be good-enough for presidential candidate interviews, but good-enough for us... */
hb_stable_sort (&glyphs[0], num_glyphs, (int(*)(const OT::GlyphID*, const OT::GlyphID *)) OT::GlyphID::cmp, &substitutes[0]); hb_stable_sort (&glyphs[0], num_glyphs,
(int(*)(const OT::HBUINT16*, const OT::HBUINT16 *)) OT::GlyphID::cmp,
&substitutes[0]);
/* Each glyph takes four bytes max, and there's some overhead. */ /* Each glyph takes four bytes max, and there's some overhead. */
@@ -86,10 +88,9 @@ arabic_fallback_synthesize_lookup_single (const hb_ot_shape_plan_t *plan HB_UNUS
OT::SubstLookup *lookup = c.start_serialize<OT::SubstLookup> (); OT::SubstLookup *lookup = c.start_serialize<OT::SubstLookup> ();
bool ret = lookup->serialize_single (&c, bool ret = lookup->serialize_single (&c,
OT::LookupFlag::IgnoreMarks, OT::LookupFlag::IgnoreMarks,
hb_array (glyphs, num_glyphs), hb_sorted_array (glyphs, num_glyphs),
hb_array (substitutes, num_glyphs)); hb_array (substitutes, num_glyphs));
c.end_serialize (); c.end_serialize ();
/* TODO sanitize the results? */
return ret ? c.copy<OT::SubstLookup> () : nullptr; return ret ? c.copy<OT::SubstLookup> () : nullptr;
} }
@@ -118,12 +119,14 @@ arabic_fallback_synthesize_lookup_ligature (const hb_ot_shape_plan_t *plan HB_UN
hb_codepoint_t first_glyph; hb_codepoint_t first_glyph;
if (!hb_font_get_glyph (font, first_u, 0, &first_glyph)) if (!hb_font_get_glyph (font, first_u, 0, &first_glyph))
continue; continue;
first_glyphs[num_first_glyphs].set (first_glyph); first_glyphs[num_first_glyphs] = first_glyph;
ligature_per_first_glyph_count_list[num_first_glyphs] = 0; ligature_per_first_glyph_count_list[num_first_glyphs] = 0;
first_glyphs_indirection[num_first_glyphs] = first_glyph_idx; first_glyphs_indirection[num_first_glyphs] = first_glyph_idx;
num_first_glyphs++; num_first_glyphs++;
} }
hb_stable_sort (&first_glyphs[0], num_first_glyphs, (int(*)(const OT::GlyphID*, const OT::GlyphID *)) OT::GlyphID::cmp, &first_glyphs_indirection[0]); hb_stable_sort (&first_glyphs[0], num_first_glyphs,
(int(*)(const OT::HBUINT16*, const OT::HBUINT16 *)) OT::GlyphID::cmp,
&first_glyphs_indirection[0]);
/* Now that the first-glyphs are sorted, walk again, populate ligatures. */ /* Now that the first-glyphs are sorted, walk again, populate ligatures. */
for (unsigned int i = 0; i < num_first_glyphs; i++) for (unsigned int i = 0; i < num_first_glyphs; i++)
@@ -142,9 +145,9 @@ arabic_fallback_synthesize_lookup_ligature (const hb_ot_shape_plan_t *plan HB_UN
ligature_per_first_glyph_count_list[i]++; ligature_per_first_glyph_count_list[i]++;
ligature_list[num_ligatures].set (ligature_glyph); ligature_list[num_ligatures] = ligature_glyph;
component_count_list[num_ligatures] = 2; component_count_list[num_ligatures] = 2;
component_list[num_ligatures].set (second_glyph); component_list[num_ligatures] = second_glyph;
num_ligatures++; num_ligatures++;
} }
} }
@@ -159,7 +162,7 @@ arabic_fallback_synthesize_lookup_ligature (const hb_ot_shape_plan_t *plan HB_UN
OT::SubstLookup *lookup = c.start_serialize<OT::SubstLookup> (); OT::SubstLookup *lookup = c.start_serialize<OT::SubstLookup> ();
bool ret = lookup->serialize_ligature (&c, bool ret = lookup->serialize_ligature (&c,
OT::LookupFlag::IgnoreMarks, OT::LookupFlag::IgnoreMarks,
hb_array (first_glyphs, num_first_glyphs), hb_sorted_array (first_glyphs, num_first_glyphs),
hb_array (ligature_per_first_glyph_count_list, num_first_glyphs), hb_array (ligature_per_first_glyph_count_list, num_first_glyphs),
hb_array (ligature_list, num_ligatures), hb_array (ligature_list, num_ligatures),
hb_array (component_count_list, num_ligatures), hb_array (component_count_list, num_ligatures),

View File

@@ -383,6 +383,10 @@ arabic_fallback_shape (const hb_ot_shape_plan_t *plan,
hb_font_t *font, hb_font_t *font,
hb_buffer_t *buffer) hb_buffer_t *buffer)
{ {
#ifdef HB_NO_OT_SHAPE_COMPLEX_ARABIC_FALLBACK
return;
#endif
const arabic_shape_plan_t *arabic_plan = (const arabic_shape_plan_t *) plan->data; const arabic_shape_plan_t *arabic_plan = (const arabic_shape_plan_t *) plan->data;
if (!arabic_plan->do_fallback) if (!arabic_plan->do_fallback)

View File

@@ -70,6 +70,10 @@ compose_hebrew (const hb_ot_shape_normalize_context_t *c,
bool found = (bool) c->unicode->compose (a, b, ab); bool found = (bool) c->unicode->compose (a, b, ab);
#ifdef HB_NO_OT_SHAPE_COMPLEX_HEBREW_FALLBACK
return found;
#endif
if (!found && !c->plan->has_gpos_mark) if (!found && !c->plan->has_gpos_mark)
{ {
/* Special-case Hebrew presentation forms that are excluded from /* Special-case Hebrew presentation forms that are excluded from

File diff suppressed because it is too large Load Diff

View File

@@ -64,14 +64,14 @@ reph = (Ra H | Repha); # possible reph
cn = c.ZWJ?.n?; cn = c.ZWJ?.n?;
forced_rakar = ZWJ H ZWJ Ra; forced_rakar = ZWJ H ZWJ Ra;
symbol = Symbol.N?; symbol = Symbol.N?;
matra_group = z{0,3}.M.N?.(H | forced_rakar)?; matra_group = z*.M.N?.(H | forced_rakar)?;
syllable_tail = (z?.SM.SM?.ZWNJ?)? A{0,3}?; syllable_tail = (z?.SM.SM?.ZWNJ?)? A*;
halant_group = (z?.H.(ZWJ.N?)?); halant_group = (z?.H.(ZWJ.N?)?);
final_halant_group = halant_group | H.ZWNJ; final_halant_group = halant_group | H.ZWNJ;
medial_group = CM?; medial_group = CM?;
halant_or_matra_group = (final_halant_group | matra_group{0,4}); halant_or_matra_group = (final_halant_group | matra_group*);
complex_syllable_tail = (halant_group.cn){0,4} medial_group halant_or_matra_group syllable_tail; complex_syllable_tail = (halant_group.cn)* medial_group halant_or_matra_group syllable_tail;
consonant_syllable = (Repha|CS)? cn complex_syllable_tail; consonant_syllable = (Repha|CS)? cn complex_syllable_tail;
vowel_syllable = reph? V.n? (ZWJ | complex_syllable_tail); vowel_syllable = reph? V.n? (ZWJ | complex_syllable_tail);

View File

@@ -274,7 +274,11 @@ struct indic_shape_plan_t
const indic_config_t *config; const indic_config_t *config;
bool is_old_spec; bool is_old_spec;
#ifndef HB_NO_UNISCRIBE_BUG_COMPATIBLE
bool uniscribe_bug_compatible; bool uniscribe_bug_compatible;
#else
static constexpr bool uniscribe_bug_compatible = false;
#endif
mutable hb_atomic_int_t virama_glyph; mutable hb_atomic_int_t virama_glyph;
would_substitute_feature_t rphf; would_substitute_feature_t rphf;
@@ -300,7 +304,9 @@ data_create_indic (const hb_ot_shape_plan_t *plan)
} }
indic_plan->is_old_spec = indic_plan->config->has_old_spec && ((plan->map.chosen_script[0] & 0x000000FFu) != '2'); indic_plan->is_old_spec = indic_plan->config->has_old_spec && ((plan->map.chosen_script[0] & 0x000000FFu) != '2');
#ifndef HB_NO_UNISCRIBE_BUG_COMPATIBLE
indic_plan->uniscribe_bug_compatible = hb_options ().uniscribe_bug_compatible; indic_plan->uniscribe_bug_compatible = hb_options ().uniscribe_bug_compatible;
#endif
indic_plan->virama_glyph.set_relaxed (-1); indic_plan->virama_glyph.set_relaxed (-1);
/* Use zero-context would_substitute() matching for new-spec of the main /* Use zero-context would_substitute() matching for new-spec of the main
@@ -645,7 +651,7 @@ initial_reordering_consonant_syllable (const hb_ot_shape_plan_t *plan,
/* Reorder characters */ /* Reorder characters */
for (unsigned int i = start; i < base; i++) for (unsigned int i = start; i < base; i++)
info[i].indic_position() = MIN (POS_PRE_C, (indic_position_t) info[i].indic_position()); info[i].indic_position() = hb_min (POS_PRE_C, (indic_position_t) info[i].indic_position());
if (base < end) if (base < end)
info[base].indic_position() = POS_BASE_C; info[base].indic_position() = POS_BASE_C;
@@ -801,7 +807,7 @@ initial_reordering_consonant_syllable (const hb_ot_shape_plan_t *plan,
unsigned int j = start + info[i].syllable(); unsigned int j = start + info[i].syllable();
while (j != i) while (j != i)
{ {
max = MAX (max, j); max = hb_max (max, j);
unsigned int next = start + info[j].syllable(); unsigned int next = start + info[j].syllable();
info[j].syllable() = 255; /* So we don't process j later again. */ info[j].syllable() = 255; /* So we don't process j later again. */
j = next; j = next;
@@ -918,11 +924,10 @@ initial_reordering_standalone_cluster (const hb_ot_shape_plan_t *plan,
hb_buffer_t *buffer, hb_buffer_t *buffer,
unsigned int start, unsigned int end) unsigned int start, unsigned int end)
{ {
const indic_shape_plan_t *indic_plan = (const indic_shape_plan_t *) plan->data;
/* We treat placeholder/dotted-circle as if they are consonants, so we /* We treat placeholder/dotted-circle as if they are consonants, so we
* should just chain. Only if not in compatibility mode that is... */ * should just chain. Only if not in compatibility mode that is... */
const indic_shape_plan_t *indic_plan = (const indic_shape_plan_t *) plan->data;
if (indic_plan->uniscribe_bug_compatible) if (indic_plan->uniscribe_bug_compatible)
{ {
/* For dotted-circle, this is what Uniscribe does: /* For dotted-circle, this is what Uniscribe does:
@@ -1008,7 +1013,6 @@ insert_dotted_circles (const hb_ot_shape_plan_t *plan HB_UNUSED,
ginfo.cluster = buffer->cur().cluster; ginfo.cluster = buffer->cur().cluster;
ginfo.mask = buffer->cur().mask; ginfo.mask = buffer->cur().mask;
ginfo.syllable() = buffer->cur().syllable(); ginfo.syllable() = buffer->cur().syllable();
/* TODO Set glyph_props? */
/* Insert dottedcircle after possible Repha. */ /* Insert dottedcircle after possible Repha. */
while (buffer->idx < buffer->len && buffer->successful && while (buffer->idx < buffer->len && buffer->successful &&
@@ -1232,14 +1236,14 @@ final_reordering_syllable (const hb_ot_shape_plan_t *plan,
/* Note: this merge_clusters() is intentionally *after* the reordering. /* Note: this merge_clusters() is intentionally *after* the reordering.
* Indic matra reordering is special and tricky... */ * Indic matra reordering is special and tricky... */
buffer->merge_clusters (new_pos, MIN (end, base + 1)); buffer->merge_clusters (new_pos, hb_min (end, base + 1));
new_pos--; new_pos--;
} }
} else { } else {
for (unsigned int i = start; i < base; i++) for (unsigned int i = start; i < base; i++)
if (info[i].indic_position () == POS_PRE_M) { if (info[i].indic_position () == POS_PRE_M) {
buffer->merge_clusters (i, MIN (end, base + 1)); buffer->merge_clusters (i, hb_min (end, base + 1));
break; break;
} }
} }
@@ -1372,13 +1376,15 @@ final_reordering_syllable (const hb_ot_shape_plan_t *plan,
* TEST: U+0930,U+094D,U+0915,U+094B,U+094D * TEST: U+0930,U+094D,U+0915,U+094B,U+094D
*/ */
if (!indic_plan->uniscribe_bug_compatible && if (!indic_plan->uniscribe_bug_compatible &&
unlikely (is_halant (info[new_reph_pos]))) { unlikely (is_halant (info[new_reph_pos])))
{
for (unsigned int i = base + 1; i < new_reph_pos; i++) for (unsigned int i = base + 1; i < new_reph_pos; i++)
if (info[i].indic_category() == OT_M) { if (info[i].indic_category() == OT_M) {
/* Ok, got it. */ /* Ok, got it. */
new_reph_pos--; new_reph_pos--;
} }
} }
goto reph_move; goto reph_move;
} }
@@ -1591,11 +1597,10 @@ decompose_indic (const hb_ot_shape_normalize_context_t *c,
* https://docs.microsoft.com/en-us/typography/script-development/sinhala#shaping * https://docs.microsoft.com/en-us/typography/script-development/sinhala#shaping
*/ */
const indic_shape_plan_t *indic_plan = (const indic_shape_plan_t *) c->plan->data; const indic_shape_plan_t *indic_plan = (const indic_shape_plan_t *) c->plan->data;
hb_codepoint_t glyph; hb_codepoint_t glyph;
if (indic_plan->uniscribe_bug_compatible ||
if (hb_options ().uniscribe_bug_compatible ||
(c->font->get_nominal_glyph (ab, &glyph) && (c->font->get_nominal_glyph (ab, &glyph) &&
indic_plan->pstf.would_substitute (&glyph, 1, c->font->face))) indic_plan->pstf.would_substitute (&glyph, 1, c->font->face)))
{ {

View File

@@ -278,7 +278,7 @@ matra_position_indic (hb_codepoint_t u, indic_position_t side)
case POS_POST_C: return MATRA_POS_RIGHT (u); case POS_POST_C: return MATRA_POS_RIGHT (u);
case POS_ABOVE_C: return MATRA_POS_TOP (u); case POS_ABOVE_C: return MATRA_POS_TOP (u);
case POS_BELOW_C: return MATRA_POS_BOTTOM (u); case POS_BELOW_C: return MATRA_POS_BOTTOM (u);
}; }
return side; return side;
} }

Some files were not shown because too many files have changed in this diff Show More