From 32c68059a05dde8a57a330db6d14a32506081516 Mon Sep 17 00:00:00 2001 From: Jose Colon Rodriguez Date: Tue, 27 Feb 2024 09:26:40 -0400 Subject: Using HangulData in NormData --- build.zig | 19 +++++++- codegen/hangul.zig | 134 +++++++++++++++++++++++++++++++++++++++++++++++++++++ src/HangulData.zig | 52 +++++++++++++++++++++ src/NormData.zig | 4 ++ src/Normalizer.zig | 23 +++++---- 5 files changed, 219 insertions(+), 13 deletions(-) create mode 100644 codegen/hangul.zig create mode 100644 src/HangulData.zig diff --git a/build.zig b/build.zig index fa3660e..ee57466 100644 --- a/build.zig +++ b/build.zig @@ -52,6 +52,15 @@ pub fn build(b: *std.Build) void { const run_compat_gen_exe = b.addRunArtifact(compat_gen_exe); const compat_gen_out = run_compat_gen_exe.addOutputFileArg("compat.bin.z"); + const hangul_gen_exe = b.addExecutable(.{ + .name = "hangul", + .root_source_file = .{ .path = "codegen/hangul.zig" }, + .target = b.host, + .optimize = .Debug, + }); + const run_hangul_gen_exe = b.addRunArtifact(hangul_gen_exe); + const hangul_gen_out = run_hangul_gen_exe.addOutputFileArg("hangul.bin.z"); + const ccc_gen_exe = b.addExecutable(.{ .name = "ccc", .root_source_file = .{ .path = "codegen/ccc.zig" }, @@ -133,14 +142,22 @@ pub fn build(b: *std.Build) void { }); compat_data.addAnonymousImport("compat", .{ .root_source_file = compat_gen_out }); + const hangul_data = b.createModule(.{ + .root_source_file = .{ .path = "src/HangulData.zig" }, + .target = target, + .optimize = optimize, + }); + hangul_data.addAnonymousImport("hangul", .{ .root_source_file = hangul_gen_out }); + const norm_data = b.createModule(.{ .root_source_file = .{ .path = "src/NormData.zig" }, .target = target, .optimize = optimize, }); norm_data.addImport("CanonData", canon_data); - norm_data.addImport("CompatData", compat_data); norm_data.addImport("CombiningData", ccc_data); + norm_data.addImport("CompatData", compat_data); + norm_data.addImport("HangulData", hangul_data); const norm = b.addModule("Normalizer", .{ .root_source_file = .{ .path = "src/Normalizer.zig" }, diff --git a/codegen/hangul.zig b/codegen/hangul.zig new file mode 100644 index 0000000..ab1a861 --- /dev/null +++ b/codegen/hangul.zig @@ -0,0 +1,134 @@ +const std = @import("std"); +const builtin = @import("builtin"); + +const Syllable = enum { + none, + L, + LV, + LVT, + V, + T, +}; + +const block_size = 256; +const Block = [block_size]u3; + +const BlockMap = std.HashMap( + Block, + u16, + struct { + pub fn hash(_: @This(), k: Block) u64 { + var hasher = std.hash.Wyhash.init(0); + std.hash.autoHashStrat(&hasher, k, .DeepRecursive); + return hasher.final(); + } + + pub fn eql(_: @This(), a: Block, b: Block) bool { + return std.mem.eql(u3, &a, &b); + } + }, + std.hash_map.default_max_load_percentage, +); + +pub fn main() !void { + var arena = std.heap.ArenaAllocator.init(std.heap.page_allocator); + defer arena.deinit(); + const allocator = arena.allocator(); + + var flat_map = std.AutoHashMap(u21, u3).init(allocator); + defer flat_map.deinit(); + + var line_buf: [4096]u8 = undefined; + + // Process DerivedEastAsianWidth.txt + var in_file = try std.fs.cwd().openFile("data/unicode/HangulSyllableType.txt", .{}); + defer in_file.close(); + var in_buf = std.io.bufferedReader(in_file.reader()); + const in_reader = in_buf.reader(); + + while (try in_reader.readUntilDelimiterOrEof(&line_buf, '\n')) |line| { + if (line.len == 0 or line[0] == '#') continue; + + const no_comment = if (std.mem.indexOfScalar(u8, line, '#')) |octo| line[0..octo] else line; + + var field_iter = std.mem.tokenizeAny(u8, no_comment, "; "); + var current_code: [2]u21 = undefined; + + var i: usize = 0; + while (field_iter.next()) |field| : (i += 1) { + switch (i) { + 0 => { + // Code point(s) + if (std.mem.indexOf(u8, field, "..")) |dots| { + current_code = .{ + try std.fmt.parseInt(u21, field[0..dots], 16), + try std.fmt.parseInt(u21, field[dots + 2 ..], 16), + }; + } else { + const code = try std.fmt.parseInt(u21, field, 16); + current_code = .{ code, code }; + } + }, + 1 => { + // Syllable type + const st: Syllable = std.meta.stringToEnum(Syllable, field) orelse .none; + for (current_code[0]..current_code[1] + 1) |cp| try flat_map.put(@intCast(cp), @intFromEnum(st)); + }, + else => {}, + } + } + } + + var blocks_map = BlockMap.init(allocator); + defer blocks_map.deinit(); + + var stage1 = std.ArrayList(u16).init(allocator); + defer stage1.deinit(); + + var stage2 = std.ArrayList(u3).init(allocator); + defer stage2.deinit(); + + var block: Block = [_]u3{0} ** block_size; + var block_len: u16 = 0; + + for (0..0x110000) |i| { + const cp: u21 = @intCast(i); + const st = flat_map.get(cp) orelse 0; + + // Process block + block[block_len] = st; + block_len += 1; + + if (block_len < block_size and cp != 0x10ffff) continue; + + const gop = try blocks_map.getOrPut(block); + if (!gop.found_existing) { + gop.value_ptr.* = @intCast(stage2.items.len); + try stage2.appendSlice(&block); + } + + try stage1.append(gop.value_ptr.*); + block_len = 0; + } + + var args_iter = try std.process.argsWithAllocator(allocator); + defer args_iter.deinit(); + _ = args_iter.skip(); + const output_path = args_iter.next() orelse @panic("No output file arg!"); + + const compressor = std.compress.deflate.compressor; + var out_file = try std.fs.cwd().createFile(output_path, .{}); + defer out_file.close(); + var out_comp = try compressor(allocator, out_file.writer(), .{ .level = .best_compression }); + defer out_comp.deinit(); + const writer = out_comp.writer(); + + const endian = builtin.cpu.arch.endian(); + try writer.writeInt(u16, @intCast(stage1.items.len), endian); + for (stage1.items) |i| try writer.writeInt(u16, i, endian); + + try writer.writeInt(u16, @intCast(stage2.items.len), endian); + for (stage2.items) |i| try writer.writeInt(u8, i, endian); + + try out_comp.flush(); +} diff --git a/src/HangulData.zig b/src/HangulData.zig new file mode 100644 index 0000000..4d80c99 --- /dev/null +++ b/src/HangulData.zig @@ -0,0 +1,52 @@ +const std = @import("std"); +const builtin = @import("builtin"); +const compress = std.compress; +const mem = std.mem; +const testing = std.testing; + +pub const Syllable = enum { + none, + L, + LV, + LVT, + V, + T, +}; + +allocator: mem.Allocator, +s1: []u16 = undefined, +s2: []Syllable = undefined, + +const Self = @This(); + +pub fn init(allocator: mem.Allocator) !Self { + const decompressor = compress.deflate.decompressor; + const in_bytes = @embedFile("hangul"); + var in_fbs = std.io.fixedBufferStream(in_bytes); + var in_decomp = try decompressor(allocator, in_fbs.reader(), null); + defer in_decomp.deinit(); + var reader = in_decomp.reader(); + + const endian = builtin.cpu.arch.endian(); + var self = Self{ .allocator = allocator }; + + const stage_1_len: u16 = try reader.readInt(u16, endian); + self.s1 = try allocator.alloc(u16, stage_1_len); + for (0..stage_1_len) |i| self.s1[i] = try reader.readInt(u16, endian); + + const stage_2_len: u16 = try reader.readInt(u16, endian); + self.s2 = try allocator.alloc(Syllable, stage_2_len); + for (0..stage_2_len) |i| self.s2[i] = @enumFromInt(try reader.readInt(u8, endian)); + + return self; +} + +pub fn deinit(self: *Self) void { + self.allocator.free(self.s1); + self.allocator.free(self.s2); +} + +/// Returns the Hangul syllable type for `cp`. +pub inline fn syllable(self: Self, cp: u21) Syllable { + return self.s2[self.s1[cp >> 8] + (cp & 0xff)]; +} diff --git a/src/NormData.zig b/src/NormData.zig index 83110f0..8923382 100644 --- a/src/NormData.zig +++ b/src/NormData.zig @@ -4,10 +4,12 @@ const mem = std.mem; const CanonData = @import("CanonData"); const CccData = @import("CombiningData"); const CompatData = @import("CompatData"); +const HangulData = @import("HangulData"); canon_data: CanonData, ccc_data: CccData, compat_data: CompatData, +hangul_data: HangulData, const Self = @This(); @@ -16,6 +18,7 @@ pub fn init(allocator: std.mem.Allocator) !Self { .canon_data = try CanonData.init(allocator), .ccc_data = try CccData.init(allocator), .compat_data = try CompatData.init(allocator), + .hangul_data = try HangulData.init(allocator), }; } @@ -23,4 +26,5 @@ pub fn deinit(self: *Self) void { self.canon_data.deinit(); self.ccc_data.deinit(); self.compat_data.deinit(); + self.hangul_data.deinit(); } diff --git a/src/Normalizer.zig b/src/Normalizer.zig index 1434043..0670cae 100644 --- a/src/Normalizer.zig +++ b/src/Normalizer.zig @@ -7,7 +7,6 @@ const testing = std.testing; const CodePointIterator = @import("code_point").Iterator; const case_fold_map = @import("ziglyph").case_folding; -const hangul_map = @import("ziglyph").hangul; const norm_props = @import("ziglyph").normalization_props; pub const NormData = @import("NormData"); @@ -17,9 +16,9 @@ norm_data: *NormData, const Self = @This(); // Hangul processing utilities. -fn isHangulPrecomposed(cp: u21) bool { - if (hangul_map.syllableType(cp)) |kind| return kind == .LV or kind == .LVT; - return false; +fn isHangulPrecomposed(self: Self, cp: u21) bool { + const kind = self.norm_data.hangul_data.syllable(cp); + return kind == .LV or kind == .LVT; } const SBase: u21 = 0xAC00; @@ -117,7 +116,7 @@ pub fn decompose(self: Self, cp: u21, form: Form) Decomp { } // Hangul precomposed syllable full decomposition. - if (isHangulPrecomposed(cp)) { + if (self.isHangulPrecomposed(cp)) { const cps = decomposeHangul(cp); @memcpy(dc.cps[0..cps.len], &cps); return dc; @@ -335,12 +334,12 @@ test "nfkd !ASCII / alloc" { // Composition utilities. -fn isHangul(cp: u21) bool { - return cp >= 0x1100 and hangul_map.syllableType(cp) != null; +fn isHangul(self: Self, cp: u21) bool { + return cp >= 0x1100 and self.norm_data.hangul_data.syllable(cp) != .none; } fn isNonHangulStarter(self: Self, cp: u21) bool { - return !isHangul(cp) and self.norm_data.ccc_data.isStarter(cp); + return !self.isHangul(cp) and self.norm_data.ccc_data.isStarter(cp); } /// Normalizes `str` to NFC. @@ -395,7 +394,7 @@ fn nfxc(self: Self, allocator: std.mem.Allocator, str: []const u8, form: Form) ! for (d_list.items[(j + 1)..i]) |B| { const cc_B = self.norm_data.ccc_data.ccc(B); // Check for blocking conditions. - if (isHangul(C)) { + if (self.isHangul(C)) { if (cc_B != 0 or self.isNonHangulStarter(B)) continue :block_check; } if (cc_B >= cc_C) continue :block_check; @@ -414,9 +413,9 @@ fn nfxc(self: Self, allocator: std.mem.Allocator, str: []const u8, form: Form) ! const L = d_list.items[sidx]; var processed_hangul = false; - if (isHangul(L) and isHangul(C)) { - const l_stype = hangul_map.syllableType(L).?; - const c_stype = hangul_map.syllableType(C).?; + if (self.isHangul(L) and self.isHangul(C)) { + const l_stype = self.norm_data.hangul_data.syllable(L); + const c_stype = self.norm_data.hangul_data.syllable(C); if (l_stype == .LV and c_stype == .T) { // LV, T -- cgit v1.2.3