From f6418d582fc2294983bfa647c7148a263af13db5 Mon Sep 17 00:00:00 2001 From: Jose Colon Rodriguez Date: Tue, 27 Feb 2024 11:16:41 -0400 Subject: Using NormPropsData in NormData; No Ziglyph deps in NOrmalizer --- build.zig | 19 ++++++- codegen/normp.zig | 135 ++++++++++++++++++++++++++++++++++++++++++++++++++ src/NormData.zig | 4 ++ src/NormPropsData.zig | 53 ++++++++++++++++++++ src/Normalizer.zig | 9 ++-- src/main.zig | 10 ++-- 6 files changed, 220 insertions(+), 10 deletions(-) create mode 100644 codegen/normp.zig create mode 100644 src/NormPropsData.zig diff --git a/build.zig b/build.zig index ee57466..7e41a9a 100644 --- a/build.zig +++ b/build.zig @@ -61,6 +61,15 @@ pub fn build(b: *std.Build) void { const run_hangul_gen_exe = b.addRunArtifact(hangul_gen_exe); const hangul_gen_out = run_hangul_gen_exe.addOutputFileArg("hangul.bin.z"); + const normp_gen_exe = b.addExecutable(.{ + .name = "normp", + .root_source_file = .{ .path = "codegen/normp.zig" }, + .target = b.host, + .optimize = .Debug, + }); + const run_normp_gen_exe = b.addRunArtifact(normp_gen_exe); + const normp_gen_out = run_normp_gen_exe.addOutputFileArg("normp.bin.z"); + const ccc_gen_exe = b.addExecutable(.{ .name = "ccc", .root_source_file = .{ .path = "codegen/ccc.zig" }, @@ -149,6 +158,13 @@ pub fn build(b: *std.Build) void { }); hangul_data.addAnonymousImport("hangul", .{ .root_source_file = hangul_gen_out }); + const normp_data = b.createModule(.{ + .root_source_file = .{ .path = "src/NormPropsData.zig" }, + .target = target, + .optimize = optimize, + }); + normp_data.addAnonymousImport("normp", .{ .root_source_file = normp_gen_out }); + const norm_data = b.createModule(.{ .root_source_file = .{ .path = "src/NormData.zig" }, .target = target, @@ -158,6 +174,7 @@ pub fn build(b: *std.Build) void { norm_data.addImport("CombiningData", ccc_data); norm_data.addImport("CompatData", compat_data); norm_data.addImport("HangulData", hangul_data); + norm_data.addImport("NormPropsData", normp_data); const norm = b.addModule("Normalizer", .{ .root_source_file = .{ .path = "src/Normalizer.zig" }, @@ -200,7 +217,7 @@ pub fn build(b: *std.Build) void { exe_unit_tests.root_module.addImport("code_point", code_point); // exe_unit_tests.root_module.addImport("GraphemeData", grapheme_data); // exe_unit_tests.root_module.addImport("grapheme", grapheme); - exe_unit_tests.root_module.addImport("ziglyph", ziglyph.module("ziglyph")); + // exe_unit_tests.root_module.addImport("ziglyph", ziglyph.module("ziglyph")); // exe_unit_tests.root_module.addAnonymousImport("normp", .{ .root_source_file = normp_gen_out }); // exe_unit_tests.root_module.addImport("DisplayWidthData", dw_data); exe_unit_tests.root_module.addImport("NormData", norm_data); diff --git a/codegen/normp.zig b/codegen/normp.zig new file mode 100644 index 0000000..a332e73 --- /dev/null +++ b/codegen/normp.zig @@ -0,0 +1,135 @@ +const std = @import("std"); +const builtin = @import("builtin"); + +const block_size = 256; +const Block = [block_size]u3; + +const BlockMap = std.HashMap( + Block, + u16, + struct { + pub fn hash(_: @This(), k: Block) u64 { + var hasher = std.hash.Wyhash.init(0); + std.hash.autoHashStrat(&hasher, k, .DeepRecursive); + return hasher.final(); + } + + pub fn eql(_: @This(), a: Block, b: Block) bool { + return std.mem.eql(u3, &a, &b); + } + }, + std.hash_map.default_max_load_percentage, +); + +pub fn main() !void { + var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); + defer arena.deinit(); + const allocator = arena.allocator(); + + var flat_map = std.AutoHashMap(u21, u3).init(allocator); + defer flat_map.deinit(); + + var line_buf: [4096]u8 = undefined; + + // Process DerivedEastAsianWidth.txt + var in_file = try std.fs.cwd().openFile("data/unicode/DerivedNormalizationProps.txt", .{}); + defer in_file.close(); + var in_buf = std.io.bufferedReader(in_file.reader()); + const in_reader = in_buf.reader(); + + while (try in_reader.readUntilDelimiterOrEof(&line_buf, '\n')) |line| { + if (line.len == 0 or line[0] == '#') continue; + + const no_comment = if (std.mem.indexOfScalar(u8, line, '#')) |octo| line[0..octo] else line; + + var field_iter = std.mem.tokenizeAny(u8, no_comment, "; "); + var current_code: [2]u21 = undefined; + + var i: usize = 0; + while (field_iter.next()) |field| : (i += 1) { + switch (i) { + 0 => { + // Code point(s) + if (std.mem.indexOf(u8, field, "..")) |dots| { + current_code = .{ + try std.fmt.parseInt(u21, field[0..dots], 16), + try std.fmt.parseInt(u21, field[dots + 2 ..], 16), + }; + } else { + const code = try std.fmt.parseInt(u21, field, 16); + current_code = .{ code, code }; + } + }, + 1 => { + // Norm props + for (current_code[0]..current_code[1] + 1) |cp| { + const gop = try flat_map.getOrPut(@intCast(cp)); + if (!gop.found_existing) gop.value_ptr.* = 0; + + if (std.mem.eql(u8, field, "NFD_QC")) { + gop.value_ptr.* |= 1; + } else if (std.mem.eql(u8, field, "NFKD_QC")) { + gop.value_ptr.* |= 2; + } else if (std.mem.eql(u8, field, "Full_Composition_Exclusion")) { + gop.value_ptr.* |= 4; + } + } + }, + else => {}, + } + } + } + + var blocks_map = BlockMap.init(allocator); + defer blocks_map.deinit(); + + var stage1 = std.ArrayList(u16).init(allocator); + defer stage1.deinit(); + + var stage2 = std.ArrayList(u3).init(allocator); + defer stage2.deinit(); + + var block: Block = [_]u3{0} ** block_size; + var block_len: u16 = 0; + + for (0..0x110000) |i| { + const cp: u21 = @intCast(i); + const props = flat_map.get(cp) orelse 0; + + // Process block + block[block_len] = props; + block_len += 1; + + if (block_len < block_size and cp != 0x10ffff) continue; + + const gop = try blocks_map.getOrPut(block); + if (!gop.found_existing) { + gop.value_ptr.* = @intCast(stage2.items.len); + try stage2.appendSlice(&block); + } + + try stage1.append(gop.value_ptr.*); + block_len = 0; + } + + var args_iter = try std.process.argsWithAllocator(allocator); + defer args_iter.deinit(); + _ = args_iter.skip(); + const output_path = args_iter.next() orelse @panic("No output file arg!"); + + const compressor = std.compress.deflate.compressor; + var out_file = try std.fs.cwd().createFile(output_path, .{}); + defer out_file.close(); + var out_comp = try compressor(allocator, out_file.writer(), .{ .level = .best_compression }); + defer out_comp.deinit(); + const writer = out_comp.writer(); + + const endian = builtin.cpu.arch.endian(); + try writer.writeInt(u16, @intCast(stage1.items.len), endian); + for (stage1.items) |i| try writer.writeInt(u16, i, endian); + + try writer.writeInt(u16, @intCast(stage2.items.len), endian); + for (stage2.items) |i| try writer.writeInt(u8, i, endian); + + try out_comp.flush(); +} diff --git a/src/NormData.zig b/src/NormData.zig index 8923382..7c2a09b 100644 --- a/src/NormData.zig +++ b/src/NormData.zig @@ -5,11 +5,13 @@ const CanonData = @import("CanonData"); const CccData = @import("CombiningData"); const CompatData = @import("CompatData"); const HangulData = @import("HangulData"); +const NormPropsData = @import("NormPropsData"); canon_data: CanonData, ccc_data: CccData, compat_data: CompatData, hangul_data: HangulData, +normp_data: NormPropsData, const Self = @This(); @@ -19,6 +21,7 @@ pub fn init(allocator: std.mem.Allocator) !Self { .ccc_data = try CccData.init(allocator), .compat_data = try CompatData.init(allocator), .hangul_data = try HangulData.init(allocator), + .normp_data = try NormPropsData.init(allocator), }; } @@ -27,4 +30,5 @@ pub fn deinit(self: *Self) void { self.ccc_data.deinit(); self.compat_data.deinit(); self.hangul_data.deinit(); + self.normp_data.deinit(); } diff --git a/src/NormPropsData.zig b/src/NormPropsData.zig new file mode 100644 index 0000000..3c49712 --- /dev/null +++ b/src/NormPropsData.zig @@ -0,0 +1,53 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const compress = std.compress; +const mem = std.mem; +const testing = std.testing; + +allocator: mem.Allocator, +s1: []u16 = undefined, +s2: []u4 = undefined, + +const Self = @This(); + +pub fn init(allocator: mem.Allocator) !Self { + const decompressor = compress.deflate.decompressor; + const in_bytes = @embedFile("normp"); + var in_fbs = std.io.fixedBufferStream(in_bytes); + var in_decomp = try decompressor(allocator, in_fbs.reader(), null); + defer in_decomp.deinit(); + var reader = in_decomp.reader(); + + const endian = builtin.cpu.arch.endian(); + var self = Self{ .allocator = allocator }; + + const stage_1_len: u16 = try reader.readInt(u16, endian); + self.s1 = try allocator.alloc(u16, stage_1_len); + for (0..stage_1_len) |i| self.s1[i] = try reader.readInt(u16, endian); + + const stage_2_len: u16 = try reader.readInt(u16, endian); + self.s2 = try allocator.alloc(u4, stage_2_len); + for (0..stage_2_len) |i| self.s2[i] = @intCast(try reader.readInt(u8, endian)); + + return self; +} + +pub fn deinit(self: *Self) void { + self.allocator.free(self.s1); + self.allocator.free(self.s2); +} + +/// Returns true if `cp` is already in NFD form. +pub inline fn isNfd(self: Self, cp: u21) bool { + return self.s2[self.s1[cp >> 8] + (cp & 0xff)] & 1 == 0; +} + +/// Returns true if `cp` is already in NFKD form. +pub inline fn isNfkd(self: Self, cp: u21) bool { + return self.s2[self.s1[cp >> 8] + (cp & 0xff)] & 2 == 0; +} + +/// Returns true if `cp` is not allowed in any normalized form. +pub inline fn isFcx(self: Self, cp: u21) bool { + return self.s2[self.s1[cp >> 8] + (cp & 0xff)] & 4 == 4; +} diff --git a/src/Normalizer.zig b/src/Normalizer.zig index d1d7cee..26177ac 100644 --- a/src/Normalizer.zig +++ b/src/Normalizer.zig @@ -6,8 +6,6 @@ const std = @import("std"); const testing = std.testing; const CodePointIterator = @import("code_point").Iterator; -const norm_props = @import("ziglyph").normalization_props; - pub const NormData = @import("NormData"); norm_data: *NormData, @@ -109,7 +107,10 @@ pub fn decompose(self: Self, cp: u21, form: Form) Decomp { var dc = Decomp{ .form = form }; // ASCII or NFD / NFKD quick checks. - if (cp <= 127 or (form == .nfd and norm_props.isNfd(cp)) or (form == .nfkd and norm_props.isNfkd(cp))) { + if (cp <= 127 or + (form == .nfd and self.norm_data.normp_data.isNfd(cp)) or + (form == .nfkd and self.norm_data.normp_data.isNfkd(cp))) + { dc.cps[0] = cp; return dc; } @@ -436,7 +437,7 @@ fn nfxc(self: Self, allocator: std.mem.Allocator, str: []const u8, form: Form) ! if (!processed_hangul) { // L -> C not Hangul. if (self.norm_data.canon_data.toNfc(.{ L, C })) |P| { - if (!norm_props.isFcx(P)) { + if (!self.norm_data.normp_data.isFcx(P)) { d_list.items[sidx] = P; d_list.items[i] = tombstone; // Mark for deletion. deleted += 1; diff --git a/src/main.zig b/src/main.zig index 2c2cf8c..15dca16 100644 --- a/src/main.zig +++ b/src/main.zig @@ -25,9 +25,9 @@ pub fn main() !void { _ = args_iter.skip(); const in_path = args_iter.next() orelse return error.MissingArg; - var gpa = std.heap.GeneralPurposeAllocator(.{}){}; - defer _ = gpa.deinit(); - const allocator = gpa.allocator(); + var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); + defer arena.deinit(); + const allocator = arena.allocator(); const input = try std.fs.cwd().readFileAlloc(allocator, in_path, std.math.maxInt(u32)); defer allocator.free(input); @@ -51,9 +51,9 @@ pub fn main() !void { // while (iter.next()) |_| result += 1; // while (iter.next()) |line| result += strWidth(line, &data); while (iter.next()) |line| { - var nfc = try n.nfc(allocator, line); + const nfc = try n.nfc(allocator, line); result += nfc.slice.len; - nfc.deinit(); + // nfc.deinit(); } std.debug.print("result: {}, took: {}\n", .{ result, timer.lap() / std.time.ns_per_ms }); -- cgit v1.2.3