diff options
| author | 2025-05-16 12:06:36 -0400 | |
|---|---|---|
| committer | 2025-05-16 12:06:36 -0400 | |
| commit | aa20bebade8eeb3ca75199dc252feb3edb203fb1 (patch) | |
| tree | 2e832616bbf554ca3a20588d050c0dc764f4cf65 /build.zig | |
| parent | Move WordBreak to Words (diff) | |
| download | zg-aa20bebade8eeb3ca75199dc252feb3edb203fb1.tar.gz zg-aa20bebade8eeb3ca75199dc252feb3edb203fb1.tar.xz zg-aa20bebade8eeb3ca75199dc252feb3edb203fb1.zip | |
Words module
In keeping with the new nomenclature, we're calling the module "Words",
not "WordBreak". The latter is Unicode jargon, the module provides word
iterators. Words are the figure, word breaks are the ground.
Diffstat (limited to 'build.zig')
| -rw-r--r-- | build.zig | 18 |
1 files changed, 9 insertions, 9 deletions
| @@ -226,21 +226,21 @@ pub fn build(b: *std.Build) void { | |||
| 226 | const grapheme_tr = b.addRunArtifact(grapheme_t); | 226 | const grapheme_tr = b.addRunArtifact(grapheme_t); |
| 227 | 227 | ||
| 228 | // Word Breaking | 228 | // Word Breaking |
| 229 | const word_break = b.addModule("WordBreak", .{ | 229 | const words = b.addModule("Words", .{ |
| 230 | .root_source_file = b.path("src/WordBreak.zig"), | 230 | .root_source_file = b.path("src/Words.zig"), |
| 231 | .target = target, | 231 | .target = target, |
| 232 | .optimize = optimize, | 232 | .optimize = optimize, |
| 233 | }); | 233 | }); |
| 234 | word_break.addAnonymousImport("wbp", .{ .root_source_file = wbp_gen_out }); | 234 | words.addAnonymousImport("wbp", .{ .root_source_file = wbp_gen_out }); |
| 235 | word_break.addImport("code_point", code_point); | 235 | words.addImport("code_point", code_point); |
| 236 | 236 | ||
| 237 | const word_break_t = b.addTest(.{ | 237 | const words_t = b.addTest(.{ |
| 238 | .name = "WordBreak", | 238 | .name = "WordBreak", |
| 239 | .root_module = word_break, | 239 | .root_module = words, |
| 240 | .target = target, | 240 | .target = target, |
| 241 | .optimize = optimize, | 241 | .optimize = optimize, |
| 242 | }); | 242 | }); |
| 243 | const word_break_tr = b.addRunArtifact(word_break_t); | 243 | const words_tr = b.addRunArtifact(words_t); |
| 244 | 244 | ||
| 245 | // ASCII utilities | 245 | // ASCII utilities |
| 246 | const ascii = b.addModule("ascii", .{ | 246 | const ascii = b.addModule("ascii", .{ |
| @@ -471,7 +471,7 @@ pub fn build(b: *std.Build) void { | |||
| 471 | }); | 471 | }); |
| 472 | unicode_tests.root_module.addImport("Graphemes", graphemes); | 472 | unicode_tests.root_module.addImport("Graphemes", graphemes); |
| 473 | unicode_tests.root_module.addImport("Normalize", norm); | 473 | unicode_tests.root_module.addImport("Normalize", norm); |
| 474 | unicode_tests.root_module.addImport("WordBreak", word_break); | 474 | unicode_tests.root_module.addImport("Words", words); |
| 475 | 475 | ||
| 476 | const run_unicode_tests = b.addRunArtifact(unicode_tests); | 476 | const run_unicode_tests = b.addRunArtifact(unicode_tests); |
| 477 | 477 | ||
| @@ -480,7 +480,7 @@ pub fn build(b: *std.Build) void { | |||
| 480 | test_step.dependOn(&code_point_tr.step); | 480 | test_step.dependOn(&code_point_tr.step); |
| 481 | test_step.dependOn(&display_width_tr.step); | 481 | test_step.dependOn(&display_width_tr.step); |
| 482 | test_step.dependOn(&grapheme_tr.step); | 482 | test_step.dependOn(&grapheme_tr.step); |
| 483 | test_step.dependOn(&word_break_tr.step); | 483 | test_step.dependOn(&words_tr.step); |
| 484 | test_step.dependOn(&ascii_tr.step); | 484 | test_step.dependOn(&ascii_tr.step); |
| 485 | test_step.dependOn(&ccc_data_tr.step); | 485 | test_step.dependOn(&ccc_data_tr.step); |
| 486 | test_step.dependOn(&canon_data_tr.step); | 486 | test_step.dependOn(&canon_data_tr.step); |