aboutsummaryrefslogtreecommitdiffhomepage
path: root/sundry/util
diff options
context:
space:
mode:
authorRalph Amissah <ralph.amissah@gmail.com>2021-10-05 12:39:53 -0400
committerRalph Amissah <ralph.amissah@gmail.com>2021-11-27 19:40:42 -0500
commit02718313824caa0e87eb0f1448684ff8d2dbe4d1 (patch)
treecb916873c46c57d18643d26fb6731f5258824a0f /sundry/util
parentnix related config, direnv else minor (diff)
primarily org related
Diffstat (limited to 'sundry/util')
-rw-r--r--sundry/util/d/tools/markup_conversion/README1
-rwxr-xr-xsundry/util/d/tools/markup_conversion/endnotes_inline_from_binary.d123
-rwxr-xr-xsundry/util/d/tools/markup_conversion/markup_changes_header_and_content.d244
-rwxr-xr-xsundry/util/d/tools/markup_conversion/markup_conversion_from_sisu_ruby_to_sisu_spine.d367
-rwxr-xr-xsundry/util/rb/cgi/spine.search.cgi952
5 files changed, 1687 insertions, 0 deletions
diff --git a/sundry/util/d/tools/markup_conversion/README b/sundry/util/d/tools/markup_conversion/README
new file mode 100644
index 0000000..8b13789
--- /dev/null
+++ b/sundry/util/d/tools/markup_conversion/README
@@ -0,0 +1 @@
+
diff --git a/sundry/util/d/tools/markup_conversion/endnotes_inline_from_binary.d b/sundry/util/d/tools/markup_conversion/endnotes_inline_from_binary.d
new file mode 100755
index 0000000..b084052
--- /dev/null
+++ b/sundry/util/d/tools/markup_conversion/endnotes_inline_from_binary.d
@@ -0,0 +1,123 @@
+#!/usr/bin/env rdmd
+/+
+ - read in file .sst .ssi .ssm
+ - loop twice
+ - first
+ - check for and skip code blocks
+ - use unique code marker for endnote markers in text and give an endnote
+ number ★1, increment
+ - extract all endnotes in array
+ - second
+ - check that the footnote marker number count matches the number of notes
+ in the array
+ - if they match either:
+ - substitute each endnote marker with the array footnote[number-1]
+ - substitute each endnote marker with footnote
+ as inlined footnote markup (footnote number not needed)
+ - if they do not match exit
+ - check whether changes have been made
+ - if so write file with inline footnotes in sub-directory converted_output_/
+ using the same name as the original file
+ - else, exit
++/
+import std.stdio;
+import std.file;
+import std.array : split;
+import std.exception;
+import core.stdc.errno;
+import std.regex;
+import std.format;
+import std.conv;
+void main(string[] args) {
+ static comment = ctRegex!(`^%+ `);
+ static block_tic_code_open = ctRegex!("^`{3} code(?:[.](?P<syntax>[a-z][0-9a-z#+_]+))?(?:[(](?P<attrib>[ a-zA-Z0-9;:,]*)[)])?");
+ static block_tic_close = ctRegex!("^(`{3})$","m");
+ static block_curly_code_open = ctRegex!(`^(?:code(?:[.](?P<syntax>[a-z][0-9a-z_]+))?(?:[(](?P<attrib>[ a-zA-Z0-9;:,]*)[)])?[{][ ]*$)`);
+ static block_curly_code_close = ctRegex!(`^([}]code)`);
+ auto rgx_endnote_ref = ctRegex!(`([~]\^)(?P<tail>[)\]]? |$)`, "gm");
+ auto rgx_endnote = ctRegex!(`^\^~\s+(.+|\n)`, "gm");
+ foreach(arg; args[1..$]) {
+ if (
+ !(arg.match(regex(r"--\w+")))
+ && arg.match(regex(r"\w+?\.ss[itm]"))
+ ) {
+ writeln(arg);
+ string filename = arg;
+ try {
+ string[] contents, endnotes, endnote_refs;
+ string text = filename.readText;
+ string[] paragraphs = text.split("\n\n");
+ int endnote_ref_count = 0;
+ int code_block_status = 0;
+ enum codeBlock { off, curly, tic, }
+ foreach (paragraph; paragraphs) { /+ loop to gather binary endnotes +/
+ if (code_block_status == codeBlock.off
+ && paragraph.match(rgx_endnote)
+ ) {
+ endnotes ~= replaceAll!(m => m[1])
+ (paragraph, rgx_endnote);
+ } else {
+ if ((code_block_status == codeBlock.curly
+ && paragraph.matchFirst(block_curly_code_close))
+ || ((code_block_status == codeBlock.tic
+ && paragraph.matchFirst(block_tic_close))
+ ) {
+ code_block_status = codeBlock.off;
+ } else if ( type["curly_code"] == 1 || type["tic_code"] == 1) {
+ // skip, prevent search for endnotes
+ } else if (paragraph.matchFirst(block_curly_code_open)) {
+ code_block_status = codeBlock.curly;
+ } else if (paragraph.matchFirst(block_tic_code_open)) {
+ code_block_status = codeBlock.tic;
+ } else if (auto m = paragraph.matchAll(rgx_endnote_ref)) {
+ foreach (n; m) {
+ endnote_ref_count++; // endnote_refs ~= (n.captures[1]);
+ }
+ }
+ contents ~= paragraph;
+ }
+ }
+ if (endnotes.length == endnote_ref_count) {
+ import std.outbuffer;
+ writeln("endnote ref count: ", endnote_ref_count);
+ writeln("number of binary endnotes: ", endnotes.length);
+ int endnote_count = -1;
+ auto buffer = new OutBuffer();
+ foreach (content; contents) { /+ loop to inline endnotes +/
+ content = replaceAll!(m => "~{ " ~ endnotes[++endnote_count] ~ " }~" ~ m["tail"] )
+ (content, rgx_endnote_ref);
+ buffer.write(content ~ "\n\n");
+ }
+ if (buffer) {
+ try {
+ string dir_out = "converted_output_";
+ string path_and_file_out = dir_out ~ "/" ~ filename;
+ dir_out.mkdirRecurse;
+ auto f = File(path_and_file_out, "w");
+ f.write(buffer);
+ writeln("wrote: ", path_and_file_out);
+ } catch (FileException ex) {
+ writeln("did not write file");
+ // Handle errors
+ }
+ }
+ } else {
+ writeln("ERROR binary endnote mismatch, check markup,\nmisatch in the number of endnotes & endnote references!");
+ writeln(" number of endnotes: ", endnotes.length);
+ writeln(" number of endnote refs: ", endnote_ref_count); // endnote_refs.length,
+ }
+ // assert(endnotes.length == endnote_ref_count);
+ } catch (ErrnoException ex) {
+ switch(ex.errno) {
+ case EPERM:
+ case EACCES: // Permission denied
+ break;
+ case ENOENT: // File does not exist
+ break;
+ default: // Handle other errors
+ break;
+ }
+ }
+ }
+ }
+}
diff --git a/sundry/util/d/tools/markup_conversion/markup_changes_header_and_content.d b/sundry/util/d/tools/markup_conversion/markup_changes_header_and_content.d
new file mode 100755
index 0000000..86792ff
--- /dev/null
+++ b/sundry/util/d/tools/markup_conversion/markup_changes_header_and_content.d
@@ -0,0 +1,244 @@
+#!/usr/bin/env rdmd
+/+
+ - read in file .sst .ssi .ssm
+ - loop twice
+ - first
+ - check for and skip code blocks
+ - use unique code marker for endnote markers in text and give an endnote
+ number ★1, increment
+ - extract all endnotes in array
+ - second
+ - check that the footnote marker number count matches the number of notes
+ in the array
+ - if they match either:
+ - substitute each endnote marker with the array footnote[number-1]
+ - substitute each endnote marker with footnote
+ as inlined footnote markup (footnote number not needed)
+ - if they do not match exit
+ - check whether changes have been made
+ - if so write file with inline footnotes in sub-directory converted_output_/
+ using the same name as the original file
+ - else, exit
++/
+import std.stdio;
+import std.file;
+import std.array : split;
+import std.exception;
+// import std.range;
+import core.stdc.errno;
+import std.regex;
+import std.format;
+import std.conv;
+void main(string[] args) {
+ static heading_a = ctRegex!(`^:?[A][~] `, "m");
+ static comment = ctRegex!(`^%+ `);
+ static block_tic_code_open = ctRegex!("^`{3} code(?:[.](?P<syntax>[a-z][0-9a-z#+_]+))?(?:[(](?P<attrib>[ a-zA-Z0-9;:,]*)[)])?");
+ static block_tic_close = ctRegex!("^(`{3})$","m");
+ static block_curly_code_open = ctRegex!(`^(?:code(?:[.](?P<syntax>[a-z][0-9a-z_]+))?(?:[(](?P<attrib>[ a-zA-Z0-9;:,]*)[)])?[{][ ]*$)`);
+ static block_curly_code_close = ctRegex!(`^([}]code)`);
+ auto rgx_endnote_ref = ctRegex!(`([~]\^)(?P<tail>[)\]]? |$)`, "gm");
+ auto rgx_endnote = ctRegex!(`^\^~\s+(.+|\n)`, "gm");
+ char[][] header0Content1(in string src_text) { // cast(char[])
+ /+ split string on _first_ match of "^:?A~\s" into [header, content] array/tuple +/
+ char[][] header_and_content;
+ auto m = (cast(char[]) src_text).matchFirst(heading_a);
+ header_and_content ~= m.pre;
+ header_and_content ~= m.hit ~ m.post;
+ assert(header_and_content.length == 2,
+ "document markup is broken, header body split == "
+ ~ header_and_content.length.to!string
+ ~ "; (header / body array split should == 2 (split is on level A~))"
+ );
+ return header_and_content;
+ }
+ foreach(arg; args[1..$]) {
+ if (
+ !(arg.match(regex(r"--\w+")))
+ && arg.match(regex(r"\w+?\.ss[itm]"))
+ ) {
+ writeln(arg);
+ string filename = arg;
+ try {
+ string[] munged_header, munged_contents, munged_endnotes, endnote_refs;
+ string text = filename.readText;
+ char[][] hc = header0Content1(text);
+ char[] src_header = hc[0];
+ string[] headers = src_header.to!string.split("\n\n");
+ char[] src_txt = hc[1];
+ string[] paragraphs = src_txt.to!string.split("\n\n");
+ int endnote_ref_count = 0;
+ int[string] type = [
+ "curly_code" : 0,
+ "tic_code" : 0,
+ ];
+ string _tmp_header;
+ foreach (h_; headers) { /+ loop to inline endnotes +/
+ _tmp_header = "";
+ if (h_.match(regex(r"^[@\[]?title[:\]]?"))) { // title
+ if (auto m = h_.match(regex(r"^@title:(?:\s+(?P<c>.+)|$)"))) { // sisu bespoke markup
+ if (m.captures["c"].length == 0) {
+ _tmp_header ~= "title:";
+ } else {
+ _tmp_header ~= "title:\n main: " ~ "\"" ~ m.captures["c"] ~ "\"";
+ }
+ } else if (auto m = h_.match(regex(r"^title\s*=\s*(?P<c>.+)"))) { // toml?
+ if (m.captures["c"].length == 0) {
+ _tmp_header ~= "title:";
+ } else {
+ _tmp_header ~= "title:\n main: " ~ "\"" ~ m.captures["c"] ~ "\"";
+ }
+ } else if (auto m = h_.match(regex(r"^\[title\]"))) { // toml markup
+ _tmp_header ~= "title:";
+ } else if (auto m = h_.match(regex(r"^title(?:\s+(?P<c>.+)|\s+\\$)"))) { // sdlang markup
+ if (m.captures["c"].length == 0) {
+ _tmp_header ~= "title:";
+ } else {
+ _tmp_header ~= "title:\n main: " ~ "\"" ~ m.captures["c"] ~ "\"";
+ }
+ }
+ if (h_.match(regex(r"^\s*[:]?(?:main)[:= ]?", "m"))) {
+ if (auto m = h_.match(regex(r"^\s+(?P<h>:main):(?:\s+(?P<c>.+)|$)", "m"))) { // sisu bespoke markup
+ _tmp_header ~= " main: " ~ "\"" ~ m.captures["c"] ~ "\"";
+ } else if (auto m = h_.match(regex(r"^\s*(?P<h>main)\s*=\s*(?P<c>.+)", "m"))) { // toml?
+ _tmp_header ~= " main: " ~ m.captures["c"];
+ } else if (auto m = h_.match(regex(r"^\s+(?P<h>main)(?:\s*\s*(?P<c>.+)|$)", "m"))) { // toml markup
+ _tmp_header ~= " main: " ~ "\"" ~ m.captures["c"] ~ "\"";
+ } else if (auto m = h_.match(regex(r"^\s+(?P<h>main)(?:\s+(?P<c>.+)|\s+\\$)", "m"))) { // sdlang markup
+ _tmp_header ~= " main: " ~ "\"" ~ m.captures["c"] ~ "\"";
+ }
+ }
+ if (h_.match(regex(r"^\s*[:]?(?:sub(title)?)[:= ]?", "m"))) {
+ if (auto m = h_.match(regex(r"^\s+:sub(?:title)?:(?:\s+(?P<c>.+)|$)", "m"))) { // sisu bespoke markup
+ _tmp_header ~= " subtitle: " ~ "\"" ~ m.captures["c"] ~ "\"";
+ } else if (auto m = h_.match(regex(r"^\s*sub(?:title)?\s*=\s*(?P<c>.+)$", "m"))) { // toml?
+ _tmp_header ~= " subtitle: " ~ m.captures["c"];
+ } else if (auto m = h_.match(regex(r"^\s+(?:title)?(?:\s*\s*(?P<c>.+)|$)", "m"))) { // toml markup
+ _tmp_header ~= " subtitle: " ~ "\"" ~ m.captures["c"] ~ "\"";
+ } else if (auto m = h_.match(regex(r"^\s+(?:title)?(?:\s+(?P<c>.+)|\s+\\$)", "m"))) { // sdlang markup
+ _tmp_header ~= " subtitle: " ~ "\"" ~ m.captures["c"] ~ "\"";
+ }
+ }
+ }
+ if (h_.match(regex(r"^[@\[]?rights[:\]]?"))) { // rights
+ if (auto m = h_.match(regex(r"^@rights:[ ]+(?P<c>.+)$"))) { // sisu bespoke markup
+ _tmp_header ~= "rights: \n copyright: \"" ~ m.captures["c"] ~ "\"";
+ } else if (auto m = h_.match(regex(r"^@rights:"))) { // sisu bespoke markup
+ _tmp_header ~= "rights:";
+ } else if (auto m = h_.match(regex(r"^\[rights\]", "m"))) { // toml markup
+ _tmp_header ~= "rights:";
+ } else if (auto m = h_.match(regex(r"^rights:"))) { // sdlang markup
+ _tmp_header ~= "rights:";
+ }
+ if (h_.match(regex(r"^\s*[:]?copyright[:= ]?", "m"))) {
+ if (auto m = h_.match(regex(r"^\s+:copyright:(?:\s+(?P<c>.+)|$)", "m"))) { // sisu bespoke markup
+ _tmp_header ~= " copyright: " ~ "\"" ~ m.captures["c"] ~ "\"";
+ } else if (auto m = h_.match(regex(r"^\s*copyright\s*=\s*(?P<c>.+)", "m"))) { // toml?
+ _tmp_header ~= " copyright: " ~ m.captures["c"];
+ } else if (auto m = h_.match(regex(r"^\s+<h>copyright(?:\s*\s*(?P<c>.+)|$)", "m"))) { // toml markup
+ _tmp_header ~= " copyright: " ~ "\"" ~ m.captures["c"] ~ "\"";
+ } else if (auto m = h_.match(regex(r"^\s+copyright(?:\s+(?P<c>.+)|\s+\\$)", "m"))) { // sdlang markup
+ _tmp_header ~= " copyright: " ~ "\"" ~ m.captures["c"] ~ "\"";
+ }
+ }
+ if (h_.match(regex(r"^\s*[:]?licen[cs]e[:= ]?", "m"))) {
+ if (auto m = h_.match(regex(r"^\s+:licen[cs]e:(?:\s+(?P<c>.+)|$)", "m"))) { // sisu bespoke markup
+ _tmp_header ~= " license: " ~ "\"" ~ m.captures["c"] ~ "\"";
+ } else if (auto m = h_.match(regex(r"^\s*licen[cs]e\s*=\s*(?P<c>.+)$", "m"))) { // toml?
+ _tmp_header ~= " license: " ~ m.captures["c"];
+ } else if (auto m = h_.match(regex(r"^\s+licen[cs]e(?:\s*\s*(?P<c>.+)|$)", "m"))) { // toml markup
+ _tmp_header ~= " license: " ~ "\"" ~ m.captures["c"] ~ "\"";
+ } else if (auto m = h_.match(regex(r"^\s+licen[cs]e(?:\s+(?P<c>.+)|\s+\\$)", "m"))) { // sdlang markup
+ _tmp_header ~= " license: " ~ "\"" ~ m.captures["c"] ~ "\"";
+ }
+ }
+ }
+ if (_tmp_header.length > 0) {
+ munged_header ~= _tmp_header;
+ } else {
+ munged_header ~= h_;
+ }
+ }
+ writeln(munged_header);
+ foreach (paragraph; paragraphs) { /+ loop to gather binary endnotes +/
+ if ( !( type["curly_code"] == 1 || type["tic_code"] == 1)
+ && paragraph.match(rgx_endnote)
+ ) {
+ munged_endnotes ~= replaceAll!(m => m[1])
+ (paragraph, rgx_endnote);
+ } else {
+ if ( type["curly_code"] == 1 || type["tic_code"] == 1
+ || paragraph.matchFirst(block_curly_code_open)
+ || paragraph.matchFirst(block_tic_code_open)
+ ) { /+ code blocks identified, no munging +/
+ if ( type["curly_code"] == 1
+ && paragraph.matchFirst(block_curly_code_close)
+ ) {
+ type["curly_code"] = 0;
+ } else if (type["tic_code"] == 1
+ && paragraph.matchFirst(block_tic_close)
+ ) {
+ type["tic_code"] = 0;
+ } else if (paragraph.matchFirst(block_curly_code_open)) {
+ type["curly_code"] = 1;
+ } else if (paragraph.matchFirst(block_tic_code_open)) {
+ type["tic_code"] = 1;
+ }
+ munged_contents ~= paragraph;
+ } else { /+ regular content, not a code block +/
+ if (auto m = paragraph.matchAll(rgx_endnote_ref)) {
+ foreach (n; m) {
+ endnote_ref_count++; // endnote_refs ~= (n.captures[1]);
+ }
+ }
+ paragraph = replaceAll!(m => " \\\\ " )
+ (paragraph, regex(r"\s*<(?:/\s*|:)?br>\s*")); // (paragraph, regex(r"(<br>)"));
+ munged_contents ~= paragraph;
+ }
+ }
+ }
+ {
+ import std.outbuffer;
+ auto buffer = new OutBuffer();
+ foreach (header; munged_header) { /+ loop to inline endnotes +/
+ buffer.write(header ~ "\n\n");
+ }
+ if (munged_endnotes.length == endnote_ref_count) {
+ int endnote_count = -1;
+ foreach (content; munged_contents) { /+ loop to inline endnotes +/
+ content = replaceAll!(m => "~{ " ~ munged_endnotes[++endnote_count] ~ " }~" ~ m["tail"] )
+ (content, rgx_endnote_ref); // endnote_ref cannot occur in a code block or else fail
+ buffer.write(content ~ "\n\n");
+ }
+ if (buffer) {
+ try {
+ string dir_out = "converted_output_";
+ string path_and_file_out = dir_out ~ "/" ~ filename;
+ dir_out.mkdirRecurse;
+ auto f = File(path_and_file_out, "w");
+ f.write(buffer);
+ // writeln("wrote: ", path_and_file_out);
+ } catch (FileException ex) {
+ writeln("did not write file");
+ // Handle errors
+ }
+ }
+ } else {
+ foreach (content; munged_contents) { /+ loop to inline endnotes +/
+ buffer.write(content ~ "\n\n");
+ }
+ }
+ }
+ } catch (ErrnoException ex) {
+ switch(ex.errno) {
+ case EPERM:
+ case EACCES: // Permission denied
+ break;
+ case ENOENT: // File does not exist
+ break;
+ default: // Handle other errors
+ break;
+ }
+ }
+ }
+ }
+}
diff --git a/sundry/util/d/tools/markup_conversion/markup_conversion_from_sisu_ruby_to_sisu_spine.d b/sundry/util/d/tools/markup_conversion/markup_conversion_from_sisu_ruby_to_sisu_spine.d
new file mode 100755
index 0000000..0ec541d
--- /dev/null
+++ b/sundry/util/d/tools/markup_conversion/markup_conversion_from_sisu_ruby_to_sisu_spine.d
@@ -0,0 +1,367 @@
+#!/usr/bin/env rdmd
+/+
+ - read in file .sst .ssi .ssm
+ - loop twice
+ - first
+ - check for and skip code blocks
+ - use unique code marker for endnote markers in text and give an endnote
+ number ★1, increment
+ - extract all endnotes in array
+ - second
+ - check that the footnote marker number count matches the number of notes
+ in the array
+ - if they match either:
+ - substitute each endnote marker with the array footnote[number-1]
+ - substitute each endnote marker with footnote
+ as inlined footnote markup (footnote number not needed)
+ - if they do not match exit
+ - check whether changes have been made
+ - if so write file with inline footnotes in sub-directory converted_output_/
+ using the same name as the original file
+ - else, exit
++/
+import std.stdio;
+import std.file;
+import std.array : split, join;
+import std.exception;
+// import std.range;
+import core.stdc.errno;
+import std.regex;
+import std.format;
+import std.conv;
+void main(string[] args) {
+ static heading_a = ctRegex!(`^:?[A][~] `, "m");
+ static comment = ctRegex!(`^%+ `);
+ static block_tic_code_open = ctRegex!("^`{3} code(?:[.](?P<syntax>[a-z][0-9a-z#+_]+))?(?:[(](?P<attrib>[ a-zA-Z0-9;:,]*)[)])?");
+ static block_tic_close = ctRegex!("^(`{3})$","m");
+ static block_curly_code_open = ctRegex!(`^(?:code(?:[.](?P<syntax>[a-z][0-9a-z_]+))?(?:[(](?P<attrib>[ a-zA-Z0-9;:,]*)[)])?[{][ ]*$)`);
+ static block_curly_code_close = ctRegex!(`^([}]code)`);
+ auto rgx_endnote_ref = ctRegex!(`([~]\^)(?P<tail>[)\]]? |$)`, "gm");
+ auto rgx_endnote = ctRegex!(`^\^~\s+(.+|\n)`, "gm");
+ char[][] header0Content1(in string src_text) { // cast(char[])
+ /+ split string on _first_ match of "^:?A~\s" into [header, content] array/tuple +/
+ char[][] header_and_content;
+ auto m = (cast(char[]) src_text).matchFirst(heading_a);
+ header_and_content ~= m.pre;
+ header_and_content ~= m.hit ~ m.post;
+ assert(header_and_content.length == 2,
+ "document markup is broken, header body split == "
+ ~ header_and_content.length.to!string
+ ~ "; (header / body array split should == 2 (split is on level A~))"
+ );
+ return header_and_content;
+ }
+ string format_body_string(string s) {
+ string o;
+ o = s
+ .replaceAll(regex("^<(?:/[ ]*)?br>[ ]*"), " \\\\ ")
+ .replaceAll(regex("[ ]*<(?:/[ ]*)?br>$"), " \\\\")
+ .replaceAll(regex("[ ]*<(?:/[ ]*)?br>[ ]*"), " \\\\ ");
+ return o;
+ }
+ string format_header_string(string s) {
+ string o;
+ o = s
+ .replaceAll(regex("\""), "\\\"")
+ .replaceAll(regex("[ ]*<(?:/[ ]*)?br>$"), " \\\\")
+ .replaceAll(regex("[ ]*<(?:/[ ]*)?br>[ ]*"), " \\\\ ");
+ return o;
+ }
+ string format_main_header(string hm, string hs = "", string c = "") {
+ string o;
+ if (c.length == 0) {
+ o ~= hm ~ ":\n";
+ } else {
+ o ~= hm ~ ":\n"
+ ~ " " ~ hs ~ ": "
+ ~ "\"" ~ format_header_string(c) ~ "\"\n";
+ }
+ return o;
+ }
+ string format_sub_header(string hs, string c) {
+ string o;
+ o ~= " " ~ hs ~ ": "
+ ~ "\"" ~ format_header_string(c) ~ "\"\n";
+ return o;
+ }
+ foreach(arg; args[1..$]) {
+ if (
+ !(arg.match(regex(r"--\w+")))
+ && arg.match(regex(r"\w+?\.ss[itm]"))
+ ) {
+ writeln(arg);
+ string filename = arg;
+ try {
+ string[] munged_header, munged_contents, munged_endnotes, endnote_refs;
+ char[][] hc;
+ char[] src_header;
+ string[] headers;
+ char[] src_txt;
+ string[] paragraphs;
+ enum codeBlock { off, curly, tic, }
+ string _tmp_header;
+ int endnote_ref_count = 0;
+ int code_block_status = codeBlock.off;
+ string text = filename.readText;
+ if (arg.match(regex(r"\w+?\.ss[tm]"))) {
+ hc = header0Content1(text);
+ src_header = hc[0];
+ headers = src_header.to!string.split("\n\n");
+ src_txt = hc[1];
+ paragraphs = src_txt.to!string.split("\n\n");
+ } else if (arg.match(regex(r"\w+?\.ssi"))) {
+ headers = [];
+ paragraphs = text.split("\n\n");
+ }
+ if (headers.length > 0) {
+ headers[0] = headers[0].replaceFirst(regex(r"^%\s+SiSU.+", "i"), "# SiSU 8.0 spine (auto-conversion)");
+ foreach (h_; headers) {
+ _tmp_header = "";
+ if (auto m = h_.match(regex(r"^%\s*", "m"))) {
+ h_ = h_.replaceAll(regex(r"^%\s*", "m"), "# ") ~ "\n";
+ }
+ if (h_.match(regex(r"^@title:|@subtitle"))) {
+ if (auto m = h_.match(regex(r"^@(?P<h>title):(?:[ ]+(?P<c>.+)|\n)"))) {
+ _tmp_header ~= format_main_header(m.captures["h"], "main", m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^@(?P<h>subtitle):(?:[ ]+(?P<c>.+)|$)"))) {
+ if (m.captures["c"].length == 0) {
+ } else {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>main):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:sub(?:title)?:(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header("subtitle", m.captures["c"]);
+ }
+ } else if (h_.match(regex(r"^@creator:|@author:"))) {
+ if (auto m = h_.match(regex(r"^(?:@creator:|@author:)(?:[ ]+(?P<c>.+)|\n)"))) {
+ _tmp_header ~= format_main_header("creator", "author", m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>author):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ } else if (h_.match(regex(r"^@rights:"))) {
+ if (auto m = h_.match(regex(r"^@(?P<h>rights):(?:[ ]+(?P<c>.+)|\n)"))) {
+ _tmp_header ~= format_main_header(m.captures["h"], "copyright", m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>copyright):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:licen[cs]e:(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header("license", m.captures["c"]);
+ }
+ } else if (h_.match(regex(r"^@date:|@date\."))) {
+ if (auto m = h_.match(regex(r"^@(?P<h>date):(?:[ ]+(?P<c>.+)|\n)"))) {
+ _tmp_header ~= format_main_header(m.captures["h"], "published", m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>published):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>available):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>modified):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>created):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>issued):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>valid):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^@date\.(?P<h>available):[ ]+(?P<c>.+)$"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^@date\.(?P<h>modified):[ ]+(?P<c>.+)$"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^@date\.(?P<h>created):[ ]+(?P<c>.+)$"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^@date\.(?P<h>issued):[ ]+(?P<c>.+)$"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^@date\.(?P<h>valid):[ ]+(?P<c>.+)$"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ } else if (h_.match(regex(r"^@classify:"))) {
+ if (auto m = h_.match(regex(r"^@classify:"))) {
+ _tmp_header ~= "classify:\n";
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>topic_register):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:type:(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= "# type: " ~ "\"" ~ m.captures["c"] ~ "\"\n";
+ }
+ } else if (h_.match(regex(r"^(?:@identifier:|@identify:)"))) {
+ if (auto m = h_.match(regex(r"^(?:@identifier:|@idenfify)"))) {
+ _tmp_header ~= "identify:\n";
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>oclc):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>isbn):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>dewey):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ } else if (h_.match(regex(r"^@publisher:"))) {
+ if (auto m = h_.match(regex(r"^@publisher:[ ]+(?P<c>.+)$"))) {
+ _tmp_header ~= "publisher: " ~ "\"" ~ m.captures["c"] ~ "\"\n";
+ }
+ } else if (h_.match(regex(r"^@make:"))) {
+ // writeln(h_);
+ if (auto m = h_.match(regex(r"^@make:"))) {
+ _tmp_header ~= "make:\n";
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>breaks):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>num_top):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>headings):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>italics):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>bold):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>emphasis):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>substitute):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>texpdf_font):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>home_button_text):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>home_button_image):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>cover_image):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ if (auto m = h_.match(regex(r"^\s+:(?P<h>footer):(?:[ ]+(?P<c>.+)|$)", "m"))) {
+ _tmp_header ~= format_sub_header(m.captures["h"], m.captures["c"]);
+ }
+ // writeln(_tmp_header);
+ } else if (h_.match(regex(r"^@\w+:"))) {
+ _tmp_header ~= "# " ~ h_.split("\n").join("\n# ") ~ "\n";
+ } else if (h_.match(regex(r"^\s+:\w+:", "m"))) {
+ if (auto m = h_.match(regex(r"^(?P<g>\s+:\w+:.*)"))) {
+ _tmp_header ~= "# " ~ m.captures["g"] ~ "\n";
+ }
+ }
+ if (h_.match(regex(r"^#", "m"))) {
+ if (auto m = h_.match(regex(r"^(?P<g>#.*)", "m"))) {
+ _tmp_header ~= m.captures["g"] ~ "\n";
+ }
+ }
+ if (_tmp_header.length > 0) {
+ munged_header ~= _tmp_header.split("\n\n");
+ } else if (h_.length > 0) {
+ writeln("munging required: ", h_);
+ h_ = h_.replaceAll((regex(r"\n\n\n+", "m")), "\n\n");
+ munged_header ~= h_;
+ }
+ }
+ // writeln(munged_header.join("\n"));
+ }
+ foreach (paragraph; paragraphs) { /+ loop to gather binary endnotes +/
+ if (code_block_status == codeBlock.off
+ && paragraph.match(rgx_endnote)
+ ) {
+ munged_endnotes ~= replaceAll!(m => m[1])
+ (paragraph, rgx_endnote);
+ } else {
+ if ( code_block_status != codeBlock.off
+ || paragraph.matchFirst(block_curly_code_open)
+ || paragraph.matchFirst(block_tic_code_open)
+ ) { /+ code blocks identified, no munging +/
+ if ((code_block_status == codeBlock.curly
+ && paragraph.matchFirst(block_curly_code_close))
+ || (code_block_status == codeBlock.tic
+ && paragraph.matchFirst(block_tic_close))
+ ) {
+ code_block_status = codeBlock.off;
+ } else if (paragraph.matchFirst(block_curly_code_open)) {
+ code_block_status = codeBlock.curly;
+ } else if (paragraph.matchFirst(block_tic_code_open)) {
+ code_block_status = codeBlock.tic;
+ }
+ munged_contents ~= paragraph;
+ } else { /+ regular content, not a code block +/
+ if (auto m = paragraph.matchAll(rgx_endnote_ref)) {
+ foreach (n; m) {
+ endnote_ref_count++; // endnote_refs ~= (n.captures[1]);
+ }
+ }
+ paragraph = format_body_string(paragraph);
+ // paragraph = replaceAll!(m => " \\\\ " )
+ // (paragraph, regex(r"\s*<(?:/\s*|:)?br>\s*")); // (paragraph, regex(r"(<br>)"));
+ munged_contents ~= paragraph;
+ }
+ }
+ }
+ {
+ import std.outbuffer;
+ auto buffer = new OutBuffer();
+ if (munged_header.length > 0) {
+ foreach (header; munged_header) { /+ loop to inline endnotes +/
+ buffer.write(header ~ "\n");
+ }
+ }
+ if (munged_endnotes.length == endnote_ref_count) {
+ int endnote_count = -1;
+ foreach (k, content; munged_contents) { /+ loop to inline endnotes +/
+ content = replaceAll!(m => "~{ " ~ munged_endnotes[++endnote_count] ~ " }~" ~ m["tail"] )
+ (content, rgx_endnote_ref); // endnote_ref cannot occur in a code block or else fail
+ buffer.write(content ~ ((k == munged_contents.length - 1) ? "" : "\n\n"));
+ }
+ if (buffer) {
+ try {
+ string dir_out = "converted_output_";
+ string path_and_file_out = dir_out ~ "/" ~ filename;
+ dir_out.mkdirRecurse;
+ auto f = File(path_and_file_out, "w");
+ f.write(buffer);
+ // writeln("wrote: ", path_and_file_out);
+ } catch (FileException ex) {
+ writeln("did not write file");
+ // Handle errors
+ }
+ }
+ } else {
+ foreach (content; munged_contents) { /+ loop to inline endnotes +/
+ buffer.write(content ~ "\n\n");
+ }
+ }
+ }
+ } catch (ErrnoException ex) {
+ switch(ex.errno) {
+ case EPERM:
+ case EACCES: // Permission denied
+ break;
+ case ENOENT: // File does not exist
+ break;
+ default: // Handle other errors
+ break;
+ }
+ }
+ }
+ }
+}
diff --git a/sundry/util/rb/cgi/spine.search.cgi b/sundry/util/rb/cgi/spine.search.cgi
new file mode 100755
index 0000000..36f109d
--- /dev/null
+++ b/sundry/util/rb/cgi/spine.search.cgi
@@ -0,0 +1,952 @@
+#!/usr/bin/env ruby
+=begin
+ * Name: SiSU information Structuring Universe
+ * Author: Ralph Amissah
+ * https://sisudoc.org
+ * https://git.sisudoc.org
+
+ * Description: generates naive cgi search form for search of sisu database (sqlite)
+ * Name: SiSU generated sample cgi search form
+
+ * Description: generated sample cgi search form for SiSU
+ (SiSU is a framework for document structuring, publishing and search)
+
+ * Author: Ralph Amissah
+
+ * Copyright: (C) 1997 - 2014, Ralph Amissah, All Rights Reserved.
+
+ * License: GPL 3 or later:
+
+ SiSU, a framework for document structuring, publishing and search
+
+ Copyright (C) Ralph Amissah
+
+ This program is free software: you can redistribute it and/or modify it
+ under the terms of the GNU General Public License as published by the Free
+ Software Foundation, either version 3 of the License, or (at your option)
+ any later version.
+
+ This program is distributed in the hope that it will be useful, but WITHOUT
+ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ more details.
+
+ You should have received a copy of the GNU General Public License along with
+ this program. If not, see <https://www.gnu.org/licenses/>.
+
+ If you have Internet connection, the latest version of the GPL should be
+ available at these locations:
+ <https://www.fsf.org/licenses/gpl.html>
+ <https://www.gnu.org/licenses/gpl.html>
+
+ * SiSU uses:
+ * Standard SiSU markup syntax,
+ * Standard SiSU meta-markup syntax, and the
+ * Standard SiSU object citation numbering and system
+
+ * Homepages:
+ <https://www.sisudoc.org>
+
+ * Ralph Amissah
+ <ralph@amissah.com>
+ <ralph.amissah@gmail.com>
+
+=end
+begin
+ require 'cgi'
+ require 'fcgi'
+ require 'sqlite3'
+rescue LoadError
+ puts 'cgi, fcgi or sqlite3 NOT FOUND (LoadError)'
+end
+@stub_default = 'search'
+@image_src = "https://#{ENV['HTTP_HOST']}/image_sys"
+@hosturl_cgi = "https://#{ENV['HTTP_HOST']}#{ENV['PATH_INFO']}"
+@hosturl_files = "https://#{ENV['HTTP_HOST']}"
+@output_dir_structure_by = 'language'
+@lingual = 'multi'
+@db_name_prefix = 'spine.'
+@base = "https://#{ENV['HTTP_HOST']}#{ENV['PATH_INFO']}#{ENV['SCRIPT_NAME']}"
+#Common TOP
+@@offset = 0
+@@canned_search_url = @base
+@color_heading = '#DDFFAA'
+@color_match = '#ffff48'
+class Form
+ def initialize(base,search_field,selected_db,result_type,checked_sql_limit,checked_tip,checked_stats,checked_searched,checked_url,checked_case,checked_echo,checked_sql,checked_all,checked_none,checked_selected,checked_default,search_note,the_can='')
+ search_note = '' if checked_searched !~ /\S/
+ the_can = '' if checked_url !~ /\S/
+ search_field = '' if checked_echo !~ /\S/
+ @base,@search_field,@selected_db,@result_type,@checked_sql_limit,@checked_tip,@checked_stats,@checked_searched,@checked_url,@checked_case,@checked_echo,@checked_sql,@checked_all,@checked_none,@checked_selected,@checked_default,@search_note,@the_can=base,search_field,selected_db,result_type,checked_sql_limit,checked_tip,checked_stats,checked_searched,checked_url,checked_case,checked_echo,checked_sql,checked_all,checked_none,checked_selected,checked_default,search_note,the_can
+ @tip = if checked_tip =~ /\S/
+ '<font size="2" color="#666666">text:__; fulltxt:__; keywords:__; title:__; author:__; topic_register:__; subject:__; description:__; publisher:__; editor:__; contributor:__; date:__; type:__; format:__; identifier:__; source:__; language:__; relation:__; coverage:__; rights:__; comment:__; abstract:__; src_filename_base:__;</font><br />'
+ else ''
+ end
+ end
+def submission_form
+ search_form =<<-WOK
+ <!DOCTYPE html>
+ <html>
+ <head>
+ <title>
+ <meta charset="utf-8">
+ <meta name="sourcefile" content="SiSU._sst" />
+ SiSU search form (sample): SiSU information Structuring Universe
+ </title>
+ <link rel="generator" href="https://sisudoc.org/" />
+ <link rel="shortcut icon" href="https://#{ENV['HTTP_HOST']}/_sisu/image_sys/rb7.ico" />
+ <link href="../_sisu/css/html.css" rel="stylesheet">
+ </head>
+ <body lang="en" xml:lang="en">
+ <table summary="band" border="0" cellpadding="3" cellspacing="0">
+ <tr><td width="20%">
+ <table summary="home button / home information" border="0" cellpadding="3" cellspacing="0">
+ <tr><td align="left">
+ <br /><a href="https://sisudoc.org/" target="_top">
+ <b>SiSU</b>
+ </a>
+ <br /><a href="https://git.sisudoc.org/" target="_top">
+ git
+ </a>
+ </td></tr>
+ </table>
+ </td>
+ <td>
+ <label for="find"><b>SiSU (generated sample) search form (content organised by filetype)</b></label>
+ </td></tr>
+ </table>
+ <form action="#{@base}" id="Test Form" method="post">
+ <table cellpadding="2">
+ <tr><td valign=\"top\">
+ <textarea id="find" name="find" type="text" rows="6" cols="40" maxlength="256">#{@search_field}</textarea>
+ </td>
+ <td valign=\"top\">
+ #{@tip}
+ #{@search_note}
+ #{@the_can}
+ </td></tr></table>
+ <td valign=\"top\"><tr><td>
+ <!input type="text" id="find" name="find" value="#{@search_field}" />
+ <!input type="text" id="find" name="find" value="" />
+ <font size="2" color="#222222">
+ <b>to search:</b> select which database to search (drop-down menu below); enter your search query (in the form above); and <b>click on the search button</b> (below)
+ <br />
+ <select name="db" size="1">
+ #{@selected_db}
+ <option value="spine.sqlite">spine</option>
+ </select>
+ <input type="submit" value="SiSU search" />
+ <input type="radio" name="view" value="index" #{@result_type[:index]}> index
+ <input type="radio" name="view" value="text" #{@result_type[:text]}> text / grep
+ <br />
+ match limit:
+ <input type="radio" name="sql_match_limit" value="1000" #{@checked_sql_limit[:l1000]}> 1,000
+ <input type="radio" name="sql_match_limit" value="2500" #{@checked_sql_limit[:l2500]}> 2,500
+ <br />
+ <input type="checkbox" name="echo" #{@checked_echo}> echo query
+ <input type="checkbox" name="stats" #{@checked_stats}> result stats
+ <input type="checkbox" name="url" #{@checked_url}> search url
+ <input type="checkbox" name="searched" #{@checked_searched}> searched
+ <input type="checkbox" name="tip" #{@checked_tip}> available fields
+ <input type="checkbox" name="sql" #{@checked_sql}> sql statement
+ <br />
+ checks:
+ <input type="radio" name="checks" value="check_default" #{@checked_default}> default
+ <input type="radio" name="checks" value="check_selected" #{@checked_selected}> selected
+ <input type="radio" name="checks" value="check_all" #{@checked_all}> all
+ <input type="radio" name="checks" value="check_none" #{@checked_none}> none
+ </font>
+ </td></tr>
+ </table>
+ </form>
+ WOK
+ end
+end
+class SearchRequest #% search_for
+ attr_accessor :text1,:fulltext,:keywords,:title,:author,:topic_register,:subject,:description,:publisher,:editor,:contributor,:date,:type,:format,:identifier,:source,:language,:relation,:coverage,:rights,:comment,:abstract,:owner,:date_created,:date_issued,:date_modified,:date_available,:date_valid,:src_filename_base
+ def initialize(search_field='',q='')
+ @search_field,@q=search_field,q
+ @text1=@fulltext=@keywords=@title=@author=@topic_register=@subject=@description=@publisher=@editor=@contributor=@date=@type=@format=@identifier=@source=@language=@relation=@coverage=@rights=@comment=@abstract=@owner=@date_created=@date_issued=@date_modified=@date_available=@date_valid=@filename=''
+ if @search_field=~/\S/
+ @text1 = text_to_match('text:')
+ @fulltext = text_to_match('fulltxt:')
+ @topic_register = text_to_match('topic_register:')
+ @title = text_to_match('title:') # DublinCore 1 - title
+ @author = text_to_match('(?:author|creator)s?:') # DublinCore 2 - creator/author
+ @subject = text_to_match('subj(?:ect)?:') # DublinCore 3 - subject
+ @description = text_to_match('description:') # DublinCore 4 - description
+ @publisher = text_to_match('pub(?:lisher)?:') # DublinCore 5 - publisher
+ @editor = text_to_match('editor:')
+ @contributor = text_to_match('contributor:') # DublinCore 6 - contributor
+ @date = text_to_match('date:') # DublinCore 7 - date dd-mm-yy
+ @type = text_to_match('type:') # DublinCore 8 - type
+ @format = text_to_match('format:') # DublinCore 9 - format
+ @identifier = text_to_match('identifier:') # DublinCore 10 - identifier
+ @source = text_to_match('source:') # DublinCore 11 - source
+ @language = text_to_match('language:') # DublinCore 12 - language
+ @relation = text_to_match('relation:') # DublinCore 13 - relation
+ @coverage = text_to_match('coverage:') # DublinCore 14 - coverage
+ @rights = text_to_match('rights:') # DublinCore 15 - rights
+ @keywords = text_to_match('key(?:words?)?:')
+ @comment = text_to_match('comment:')
+ @abstract = text_to_match('abs(?:tract)?:')
+ @owner = text_to_match('owner:')
+ @date_created = text_to_match('date_created:')
+ @date_issued = text_to_match('date_issued:')
+ @date_modified = text_to_match('date_modified:')
+ @date_available = text_to_match('date_available:')
+ @date_valid = text_to_match('date_valid:')
+ @filename = text_to_match('src_filename_base:')
+ @text1 = text_to_match unless @keywords or @author or @title or @text1 or @fulltext or @comment or @abstract or @rights or @subject or @publisher or @date or @filename or @topic_register
+ else
+ @text1 = q['s1'] if q['s1'] =~ /\S/
+ @fulltext = q['ft'] if q['ft'] =~ /\S/
+ @keywords = q['key'] if q['key'] =~ /\S/
+ @title = q['ti'] if q['ti'] =~ /\S/
+ @author = q['au'] if q['au'] =~ /\S/
+ @topic_register = q['tr'] if q['tr'] =~ /\S/
+ @subject = q['sj'] if q['sj'] =~ /\S/
+ @description = q['dsc'] if q['dsc'] =~ /\S/
+ @publisher = q['pb'] if q['pb'] =~ /\S/
+ @editor = q['cntr'] if q['cntr'] =~ /\S/
+ @contributor = q['cntr'] if q['cntr'] =~ /\S/
+ @date = q['dt'] if q['dt'] =~ /\S/
+ @type = q['ty'] if q['ty'] =~ /\S/
+ @identifier = q['id'] if q['id'] =~ /\S/
+ @source = q['src'] if q['src'] =~ /\S/
+ @language = q['lang'] if q['lang'] =~ /\S/
+ @relation = q['rel'] if q['rel'] =~ /\S/
+ @coverage = q['cov'] if q['cov'] =~ /\S/
+ @rights = q['cr'] if q['cr'] =~ /\S/
+ @comment = q['co'] if q['co'] =~ /\S/
+ @abstract = q['ab'] if q['ab'] =~ /\S/
+ @date_created = q['dtc'] if q['dtc'] =~ /\S/
+ @date_issued = q['dti'] if q['dti'] =~ /\S/
+ @date_modified = q['dtm'] if q['dtm'] =~ /\S/
+ @date_available = q['dta'] if q['dta'] =~ /\S/
+ @date_valid = q['dtv'] if q['dtv'] =~ /\S/
+ @filename = if q['doc'] and q['search'] !~ /search db/ then q['doc']
+ elsif q['fns'] =~ /\S/ then q['fns']
+ end
+ @@limit = q['ltd'] if q['ltd'] =~ /\d+/ # 1000
+ @@offset = q['off'] if q['off'] =~ /\d+/ # 0
+ end
+ end
+def text_to_match(identifier='')
+ m={
+ string: /#{identifier}\s*(.+?)/,
+ string: /#{identifier}\s*(.+?)(?:;|\n|\r|$)/,
+ word: /#{identifier}[\s(]*(\S+)/
+ }
+ search_string=if @search_field =~m[:word]
+ search_string=if @search_field =~m[:braces] then m[:braces].match(@search_field)[1]
+ elsif @search_field =~m[:string] then m[:string].match(@search_field)[1]
+ else
+ str=m[:word].match(@search_field)[1]
+ str=str.gsub(/[()]/,'')
+ str
+ end
+ search_string=search_string.strip.gsub(/\s+/,'+')
+ #else
+ # "__"
+ end
+ end
+end
+class DBI_SearchString
+ def initialize(l,t,q,cse=false)
+ @l,@t,@q=l,t,q
+ end
+ def string
+ search={ search: [], flag: false }
+ if @t =~/\S+/ or @q =~/\S+/
+ if @t =~/\S+/ then unescaped_search=CGI.unescape(@t)
+ elsif @q =~/\S+/ then unescaped_search=CGI.unescape(@q)
+ end
+ search_construct=[]
+ unescaped_search=unescaped_search.gsub(/\s*(AND|OR)\s*/,"%' \) \\1 #{@l} LIKE \( '%").
+ gsub(/(.+)/,"#{@l} LIKE \( '%\\1%' \)")
+ search_construct << unescaped_search
+ search_construct=search_construct.join(' ')
+ search[:search] << search_construct
+ search[:flag]=true
+ search
+ end
+ search
+ end
+end
+class DBI_SearchStatement
+ attr_reader :text_search_flag,:sql_select_body_format,:sql_offset,:sql_limit
+ def initialize(conn,search_for,q,c)
+ @conn=conn
+ @text_search_flag=false
+ @sql_statement={ body: '', endnotes: '', range: '' }
+ #@offset||=@@offset
+ #@offset+=@@limit
+ search={ text: [], endnotes: [] }
+ cse=(c =~/\S/) ? true : false
+ st=DBI_SearchString.new('doc_objects.clean',search_for.text1,q['s1'],cse).string
+ se=DBI_SearchString.new('endnotes.clean',search_for.text1,q['s1'],cse).string
+ @text_search_flag=st[:flag]
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.fulltext',search_for.fulltext,q['ft'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.title',search_for.title,q['ti'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.creator_author',search_for.author,q['au'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.classify_topic_register',search_for.topic_register,q['tr'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.classify_subject',search_for.subject,q['sj'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.classify_keywords',search_for.keywords,q['key'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.notes_description',search_for.description,q['dsc'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.publisher',search_for.publisher,q['pb'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.creator_editor',search_for.editor,q['cntr'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.creator_contributor',search_for.contributor,q['cntr'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.date_published',search_for.date,q['dt'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.notes_type',search_for.type,q['ty'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.original_source',search_for.source,q['src'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.language_document_char',search_for.language,q['lang'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.notes_relation',search_for.relation,q['rel'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.notes_coverage',search_for.coverage,q['cov'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.rights_all',search_for.rights,q['cr'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.notes_comment',search_for.comment,q['co'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.notes_abstract',search_for.abstract,q['ab'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ st = DBI_SearchString.new('metadata_and_text.src_filename_base',search_for.src_filename_base,q['fns'],cse).string
+ if st[:flag]
+ search[:text] << st[:search]
+ end
+ @@limit=q['ltd'] if q['ltd']=~/\d+/ # 1000
+ @@offset=q['off'] if q['off']=~/\d+/ # 0
+ @search_text=''
+ @search_text=search[:text].flatten.join(' AND ')
+ @search_text=@search_text.gsub(/(doc_objects\.clean\s+LIKE\s+\(\s*'%[^']+%'\s*\)\s+(?:(?:AND|OR)\s+doc_objects\.clean\s+LIKE\s+\(\s*'%[^']+%'\s*\))+)/,'(\1)')
+ end
+def sql_offset
+ @@offset
+end
+def sql_match_limit
+ @@limit
+end
+def sql_canned_search
+ @offset_next=sql_offset.to_i + sql_match_limit.to_i
+ @offset_previous=sql_offset.to_i - sql_match_limit.to_i
+ def current
+ @@canned_search_url.to_s + '&ltd=' + sql_match_limit.to_s + '&off=' + sql_offset.to_s
+ end
+ def next
+ @@canned_search_url.to_s + '&ltd=' + sql_match_limit.to_s + '&off=' + @offset_next.to_s
+ end
+ def previous
+ @offset_previous >= 0 \
+ ? (@@canned_search_url.to_s + '&ltd=' + sql_match_limit.to_s + '&off=' + @offset_previous.to_s)
+ : ''
+ end
+ def start
+ @@canned_search_url.to_s + '&ltd=' + sql_match_limit.to_s + '&off=' + 0.to_s
+ end
+ self
+end
+def pre_next(beyond_limit,img)
+ can=sql_canned_search
+ page=(sql_offset.to_i + sql_match_limit.to_i)/sql_match_limit.to_i
+ if beyond_limit
+ if page.to_s =~ /^1$/
+ %{<br /><center>
+ pg. #{page.to_s}
+ <a href="#{can.next}">
+ <img border="0" width="22" height="22" src="#{img}/arrow_next_red.png" alt="&nbsp;&gt;&gt;" />
+ </a>
+ </center>}
+ elsif page.to_s =~ /^2$/
+ %{<br /><center>
+ <a href="#{can.previous}">
+ <img border="0" width="22" height="22" src="#{img}/arrow_prev_red.png" alt="&lt;&lt;&nbsp;" />
+ </a>
+ pg. #{page.to_s}
+ <a href="#{can.next}">
+ <img border="0" width="22" height="22" src="#{img}/arrow_next_red.png" alt="&nbsp;&gt;&gt;" />
+ </a>
+ </center>}
+ else
+ %{<br /><center>
+ <a href="#{can.start}">
+ <img border="0" width="22" height="22" src="#{img}/arrow_prev_red.png" alt="|&lt;&nbsp;" />
+ </a>
+ <a href="#{can.previous}">
+ <img border="0" width="22" height="22" src="#{img}/arrow_prev_red.png" alt="&lt;&lt;&nbsp;" />
+ </a>
+ pg. #{page.to_s}
+ <a href="#{can.next}">
+ <img border="0" width="22" height="22" src="#{img}/arrow_next_red.png" alt="&nbsp;&gt;&gt;" />
+ </a>
+ </center>}
+ end
+ else
+ if page.to_s =~ /^1$/ then ''
+ elsif page.to_s =~ /^2$/
+ %{<br /><center>
+ <a href="#{can.previous}">
+ <img border="0" width="22" height="22" src="#{img}/arrow_prev_red.png" alt="&lt;&lt;&nbsp;" />
+ </a>
+ pg. #{page.to_s}
+ </center>}
+ else
+ %{<br /><center>
+ <a href="#{can.start}">
+ <img border="0" width="22" height="22" src="#{img}/arrow_prev_red.png" alt="|&lt;&nbsp;" />
+ </a>
+ <a href="#{can.previous}">
+ <img border="0" width="22" height="22" src="#{img}/arrow_prev_red.png" alt="&lt;&lt;&nbsp;" />
+ </a>
+ pg. #{page.to_s}
+ </center>}
+ end
+ end
+end
+def sql_select_body
+ limit ||= @@limit
+ offset ||= @@offset
+ @sql_statement[:body] = %{
+ SELECT metadata_and_text.title, metadata_and_text.creator_author, metadata_and_text.src_filename_base, metadata_and_text.language_document_char, metadata_and_text.notes_suffix, doc_objects.body, doc_objects.seg_name, doc_objects.ocn, metadata_and_text.uid
+ FROM doc_objects, metadata_and_text
+ WHERE #{@search_text} AND doc_objects.uid_metadata_and_text = metadata_and_text.uid
+ ORDER BY metadata_and_text.language_document_char, metadata_and_text.title, metadata_and_text.src_filename_base, doc_objects.ocn
+ }
+ @sql_statement[:range] = %{LIMIT #{limit} OFFSET #{offset} ;}
+ select = @sql_statement[:body] + ' ' + @sql_statement[:range]
+ select
+ end
+ def sql_select_body_format
+ %{<font color="#666666" size="2">#{sql_select_body}</font>}
+ end
+ def contents
+ @conn.execute(sql_select_body)
+ end
+end
+def tail
+ <<-'WOK'
+ <br /><hr /><br />
+<table summary="SiSU summary" cellpadding="2" border="0">
+ <!-- widget sisu -->
+<tr><td valign="top" width="10%">
+ <table summary="home button / home information" border="0" cellpadding="3" cellspacing="0">
+ <tr><td align="left">
+ <br /><a href="https://sisudoc.org/" target="_top">
+ <b>SiSU</b>
+ </a>
+ <br /><a href="https://git.sisudoc.org/" target="_top">
+ git
+ </a>
+ </td></tr>
+ </table>
+</td>
+<td valign="top" width="45%">
+<!-- SiSU Rights -->
+ <p class="tiny_left"><font color="#666666" size="2">
+ Generated by
+ SiSU 6.3.1 2014-10-19 (2014w41/7)
+ <br />
+ <a href="https://www.sisudoc.org" >
+ <b>SiSU</b></a> <sup>&copy;</sup> Ralph Amissah
+ 1993, current 2014.
+ All Rights Reserved.
+ <br />
+ SiSU is software for document structuring, publishing and search,
+ <br />
+ <a href="https://www.sisudoc.org" >
+ www.sisudoc.org
+ </a>
+ sources
+ <a href="https://git.sisudoc.org" >
+ git.sisudoc.org
+ </a>
+ <br />
+ <i>w3 since October 3 1993</i>
+ <a href="mailto:ralph@amissah.com" >
+ ralph@amissah.com
+ </a>
+ <br />
+ mailing list subscription
+ <a href="https://lists.sisudoc.org/listinfo/sisu" >
+ https://lists.sisudoc.org/listinfo/sisu
+ </a>
+ <br />
+ <a href="mailto:sisu@lists.sisudoc.org" >
+ sisu@lists.sisudoc.org
+ </a>
+ </font></p>
+</td><td valign="top" width="45%">
+ <p class="tiny_left"><font color="#666666" size="2">
+ SiSU using:
+ <br />Standard SiSU markup syntax,
+ <br />Standard SiSU meta-markup syntax, and the
+ <br />Standard SiSU <u>object citation numbering</u> and system, (object/text identifying/locating system)
+ <br />
+ <sup>&copy;</sup> Ralph Amissah 1997, current 2014.
+ All Rights Reserved.
+ </font></p>
+</td></tr>
+ <!-- widget way better -->
+<tr><td valign="top" width="10%">
+ <p class="tiny_left"><font color="#666666" size="2">
+ <a href="https://www.gnu.org/licenses/gpl.html">
+ .:
+ </a>
+ </font></p>
+</td><td valign="top" width="45%">
+ <p class="tiny_left"><font color="#666666" size="2">
+ SiSU is released under
+ <a href="https://www.gnu.org/licenses/gpl.html">GPL&nbsp;v3</a>
+ or later,
+ <a href="https://www.gnu.org/licenses/gpl.html">
+ https://www.gnu.org/licenses/gpl.html
+ </a>
+ </font></p>
+</td><td valign="top" width="45%">
+ <p class="tiny_left"><font color="#666666" size="2">
+ SiSU, developed using
+ <a href="https://www.ruby-lang.org/en/">
+ Ruby
+ </a>
+ on
+ <a href="https://www.debian.org/">
+ Debian/Gnu/Linux
+ </a>
+ software infrastructure,
+ with the usual GPL (or OSS) suspects.
+ </font></p>
+</td></tr>
+</table>
+ <a name="bottom" id="bottom"></a><a name="down" id="down"></a><a name="end" id="end"></a><a name="finish" id="finish"></a><a name="stop" id="stop"></a><a name="credits" id="credits"></a>
+ </body></html>
+ WOK
+end
+@tail=tail
+@counter_txt_doc,@counter_txt_ocn,@counter_endn_doc,@counter_endn_ocn=0,0,0,0
+@counters_txt,@counters_endn,@sql_select_body='','',''
+FCGI.each_cgi do |cgi|
+ begin # all code goes in begin section
+ @search={ text: [], endnotes: [] }
+ q=CGI.new
+ @db=if cgi['db'] =~ /#{@db_name_prefix}(\S+)/
+ @stub=$1
+ cgi['db']
+ else
+ @stub=@stub_default
+ @db_name_prefix + @stub
+ end
+ checked_url,checked_stats,checked_searched,checked_tip,checked_case,checked_echo,checked_sql,checked_all,checked_none,checked_selected,checked_default,selected_db='','','','','','','','',''
+ result_type=(cgi['view']=~/text/) \
+ ? result_type={ index: '', text: 'checked'}
+ : result_type={ index: 'checked', text: ''}
+ @@limit=if cgi['sql_match_limit'].to_s=~/2500/
+ checked_sql_limit={ l1000: '', l2500: 'checked'}
+ '2500'
+ else
+ checked_sql_limit={ l1000: 'checked', l2500: ''}
+ '1000'
+ end
+ checked_echo = 'checked' if cgi['echo'] =~/\S/
+ checked_stats = 'checked' if cgi['stats'] =~/\S/
+ checked_url = 'checked' if cgi['url'] =~/\S/ or cgi['u'].to_i==1
+ checked_searched = 'checked' if cgi['searched'] =~/\S/
+ checked_tip = 'checked' if cgi['tip'] =~/\S/
+ checked_case = 'checked' if cgi['casesense'] =~/\S/
+ checked_sql = 'checked' if cgi['sql'] =~/\S/
+ if cgi['checks'] =~ /check_all/ or cgi['check_all'] =~/\S/ or cgi['a'].to_i==1
+ checked_all = 'checked'
+ checked_echo=checked_stats=checked_url=checked_searched=checked_tip=checked_sql='checked'
+ checked_none =''
+ elsif cgi['checks'] =~ /check_none/
+ checked_none = 'checked'
+ checked_all=checked_url=checked_stats=checked_searched=checked_tip=checked_echo=checked_sql=''
+ elsif cgi['checks'] =~ /check_selected/
+ checked_selected = 'checked'
+ elsif cgi['checks'] =~ /check_default/
+ checked_default = 'checked'
+ checked_echo=checked_stats=checked_url='checked'
+ checked_searched=checked_tip=checked_case=checked_sql=''
+ else
+ checked_selected='checked'
+ checked_echo=checked_stats=checked_url='checked'
+ checked_searched=checked_tip=checked_case=checked_sql=''
+ end
+ selected_db=case cgi['db']
+ when /spine.sqlite/ then '<option value="spine.sqlite">spine</option>'
+ end
+ db_name='spine.search.sql.db'
+ #db_name='spine.sqlite.db'
+ #db_name='sisu_sqlite.db'
+ db_sqlite=case cgi['db']
+ when /spine.sqlite/ then "/srv/complete.sisudoc.org/web/manual/#{db_name}"
+ else "/var/www/sqlite/#{db_name}"
+ end
+ #when /spine.sqlite/ then "/srv/complete.sisudoc.org/web/manual/#{db_name}"
+ #else "/srv/complete.sisudoc.org/web/manual/#{db_name}"
+ #end
+ #@conn=SQLite3::Database.new(db_sqlite)
+ @conn=SQLite3::Database.new("/var/www/sqlite/spine.search.sql.db")
+ #@conn=SQLite3::Database.new("/var/www/spine.sqlite.db")
+ @conn.results_as_hash=true
+ search_field=cgi['find'] if cgi['find'] # =~/\S+/
+ @search_for=SearchRequest.new(search_field,q) #.analyze #% search_for
+ #% searches
+#Canned_search.new(@base,@search_for.text1,cgi)
+ if @search_for.text1=~/\S+/ or @search_for.fulltext=~/\S+/ or @search_for.author=~/\S+/ or @search_for.topic_register=~/\S+/ #and search_field =~/\S/
+ s1 = 's1=' + CGI.escape(@search_for.text1) if @search_for.text1 =~ /\S/
+ ft = '&ft=' + CGI.escape(@search_for.fulltext) if @search_for.fulltext =~ /\S/
+ key = 'key=' + CGI.escape(@search_for.keywords) if @search_for.keywords =~ /\S/
+ ti = '&ti=' + CGI.escape(@search_for.title) if @search_for.title =~ /\S/
+ au = '&au=' + CGI.escape(@search_for.author) if @search_for.author =~ /\S/
+ tr = '&tr=' + CGI.escape(@search_for.topic_register) if @search_for.topic_register =~ /\S/
+ sj = '&sj=' + CGI.escape(@search_for.subject) if @search_for.subject =~ /\S/
+ dsc = '&dsc=' + CGI.escape(@search_for.description) if @search_for.description =~ /\S/
+ pb = '&pb=' + CGI.escape(@search_for.publisher) if @search_for.publisher =~ /\S/
+ edt = '&edt=' + CGI.escape(@search_for.editor) if @search_for.editor =~ /\S/
+ cntr = '&cntr=' + CGI.escape(@search_for.contributor) if @search_for.contributor =~ /\S/
+ dt = '&dt=' + CGI.escape(@search_for.date) if @search_for.date =~ /\S/
+ ty = '&ty=' + CGI.escape(@search_for.type) if @search_for.type =~ /\S/
+ id = '&id=' + CGI.escape(@search_for.identifier) if @search_for.identifier =~ /\S/
+ src = '&src=' + CGI.escape(@search_for.source) if @search_for.source =~ /\S/
+ lang = '&lang=' + CGI.escape(@search_for.language) if @search_for.language =~ /\S/
+ rel = '&rel=' + CGI.escape(@search_for.relation) if @search_for.relation =~ /\S/
+ cov = '&cov=' + CGI.escape(@search_for.coverage) if @search_for.coverage =~ /\S/
+ cr = '&cr=' + CGI.escape(@search_for.rights) if @search_for.rights =~ /\S/
+ co = '&co=' + CGI.escape(@search_for.comment) if @search_for.comment =~ /\S/
+ ab = '&ab=' + CGI.escape(@search_for.abstract) if @search_for.abstract =~ /\S/
+ dtc = '&dtc=' + CGI.escape(@search_for.date_created) if @search_for.date_created =~ /\S/
+ dti = '&dti=' + CGI.escape(@search_for.date_issued) if @search_for.date_issued =~ /\S/
+ dtm = '&dtm=' + CGI.escape(@search_for.date_modified) if @search_for.date_modified =~ /\S/
+ dta = '&dta=' + CGI.escape(@search_for.date_available) if @search_for.date_available =~ /\S/
+ dtv = '&dtv=' + CGI.escape(@search_for.date_valid) if @search_for.date_valid =~ /\S/
+ fns = '&fns=' + CGI.escape(@search_for.src_filename_base) if @search_for.src_filename_base =~ /\S/
+ @@canned_search_url=(checked_all =~/checked/) \
+ ? "#{@base}?#{s1}#{ft}#{key}#{ti}#{au}#{tr}#{sj}#{dsc}#{pb}#{edt}#{cntr}#{dt}#{ty}#{id}#{src}#{lang}#{rel}#{cov}#{cr}#{co}#{ab}#{dtc}#{dti}#{dtm}#{dta}#{dtv}#{fns}&db=#{cgi['db']}&view=#{cgi['view']}&a=1"
+ : "#{@base}?#{s1}#{ft}#{key}#{ti}#{au}#{tr}#{sj}#{dsc}#{pb}#{edt}#{cntr}#{dt}#{ty}#{id}#{src}#{lang}#{rel}#{cov}#{cr}#{co}#{ab}#{dtc}#{dti}#{dtm}#{dta}#{dtv}#{fns}&db=#{cgi['db']}&view=#{cgi['view']}"
+ mod=ft=~/\S+/ ? (ft.gsub(/ft/,'s1')) : s1
+ @canned_base_url="#{@base}?#{mod}&db=#{cgi['db']}"
+ if checked_case=~/\S/
+ @search[:text][1]=%{doc_objects.clean~'#{@search_for.text1}'} #s1
+ else
+ @search[:text][1]=%{doc_objects.clean~*'#{@search_for.text1}'} #s1
+ end
+ canned_note='search url:'
+ else
+ @@canned_search_url="#{@base}?db=#{@db}&view=index"
+ canned_note='search url example:'
+ end
+ if search_field =~/\S+/
+ analyze_format=search_field.gsub(/\s*\n/,'; ')
+ elsif checked_all =~/checked/ or checked_url =~/checked/
+ canned_search=@@canned_search_url.scan(/(?:s1|ft|au|ti|fns|tr)=[^&]+/)
+ af=canned_search.join('; ')
+ af=af.gsub(/s1=/,'text: ').
+ gsub(/ft=/,'fulltxt: ').
+ gsub(/au=/,'author: ').
+ gsub(/ti=/,'title: ').
+ gsub(/fns=/,'src_filename_base: ').
+ gsub(/tr=/,'topic_register: ').
+ gsub(/%2B/,' ')
+ analyze_format=af
+ st=af.split(/\s*;\s*/)
+ search_field=st.join("\n")
+ end
+ green=%{<font size="2" color="#004000">}
+ canned_search_url_txt=CGI.escapeHTML(@@canned_search_url)
+ the_can=%{<font size="2" color="#666666">#{canned_note} <a href="#{@@canned_search_url}">#{canned_search_url_txt}</a></font><br />}
+ p_text=p_fulltext=p_keywords=p_title=p_author=p_topic_register=p_subject=p_description=p_publisher=p_editor=p_contributor=p_date=p_type=p_format=p_identifier=p_source=p_language=p_relation=p_coverage=p_rights=p_comment=p_abstract=p_filename=''
+ p_filename = %{src_filename_base: #{green}#{@search_for.src_filename_base}</font><br />} if @search_for.src_filename_base =~ /\S+/
+ p_text = %{text: #{green}#{@search_for.text1}</font><br />} if @search_for.text1 =~ /\S+/
+ p_fulltext = %{fulltxt: #{green}#{@search_for.fulltext}</font><br />} if @search_for.fulltext =~ /\S+/
+ p_title = %{title: #{green}#{@search_for.title}</font><br />} if @search_for.title =~ /\S+/
+ p_author = %{author: #{green}#{@search_for.author}</font><br />} if @search_for.author =~ /\S+/
+ p_editor = %{editor: #{green}#{@search_for.editor}</font><br />} if @search_for.editor =~ /\S+/
+ p_contributor = %{contributor: #{green}#{@search_for.contributor}</font><br />} if @search_for.contributor =~ /\S+/
+ p_date = %{date: #{green}#{@search_for.date}</font><br />} if @search_for.date =~ /\S+/
+ p_rights = %{rights: #{green}#{@search_for.rights}</font><br />} if @search_for.rights =~ /\S+/
+ p_topic_register = %{topic_register: #{green}#{@search_for.topic_register}</font><br />} if @search_for.topic_register =~ /\S+/
+ p_subject = %{subject: #{green}#{@search_for.subject}</font><br />} if @search_for.subject =~ /\S+/
+ p_keywords = %{keywords: #{green}#{@search_for.keywords}</font><br />} if @search_for.keywords =~ /\S+/
+ p_identifier = %{identifier: #{green}#{@search_for.identifier}</font><br />} if @search_for.identifier =~ /\S+/
+ p_type = %{type: #{green}#{@search_for.type}</font><br />} if @search_for.type =~ /\S+/
+ p_format = %{format: #{green}#{@search_for.format}</font><br />} if @search_for.format =~ /\S+/
+ p_relation = %{relation: #{green}#{@search_for.relation}</font><br />} if @search_for.relation =~ /\S+/
+ p_coverage = %{coverage: #{green}#{@search_for.coverage}</font><br />} if @search_for.coverage =~ /\S+/
+ p_description = %{description: #{green}#{@search_for.description}</font><br />} if @search_for.description =~ /\S+/
+ p_abstract = %{abstract: #{green}#{@search_for.abstract}</font><br />} if @search_for.abstract =~ /\S+/
+ p_comment = %{comment: #{green}#{@search_for.comment}</font><br />} if @search_for.comment =~ /\S+/
+ p_publisher = %{publisher: #{green}#{@search_for.publisher}</font><br />} if @search_for.publisher =~ /\S+/
+ p_source = %{source: #{green}#{@search_for.source}</font><br />} if @search_for.source =~ /\S+/
+ p_language = %{language: #{green}#{@search_for.language}</font><br />} if @search_for.language =~ /\S+/
+ search_note=<<-WOK
+ <font size="2" color="#666666">
+ <b>database:</b> #{green}#{@db}</font>; <b>selected view:</b> #{green}#{cgi['view']}</font>
+ <b>search string:</b> "#{green}#{analyze_format}</font>"<br />
+ #{p_text} #{p_fulltext} #{p_keywords} #{p_title} #{p_author} #{p_topic_register} #{p_subject} #{p_description} #{p_publisher} #{p_editor} #{p_contributor} #{p_date} #{p_type} #{p_format} #{p_identifier} #{p_source} #{p_language} #{p_relation} #{p_coverage} #{p_rights} #{p_comment} #{p_abstract} #{p_filename}
+ </font>
+ WOK
+#eg = %{canned search e.g.:<br /> <a href="#{url}">#{url}</a><br />find: #{analyze}<br />database: #{database}}
+#% dbi_canning
+@header = Form.new(@base,search_field,selected_db,result_type,checked_sql_limit,checked_tip,checked_stats,checked_searched,checked_url,checked_case,checked_echo,checked_sql,checked_all,checked_none,checked_selected,checked_default,search_note,the_can).submission_form #% form
+unless q['s1'] =~/\S/ or q['au'] =~/\S/ or @search[:text][1] =~/\S/
+ print "Content-type: text/html\n\n"
+ puts (@header+@tail)
+else #% searches
+ s1=(@search_for.text1 =~/\S/) \
+ ? @search_for.text1
+ : 'Unavailable'
+ if checked_case=~/\S/
+ @search[:text]<<%{doc_objects.clean~'#{CGI.unescape(s1)}'}
+ else
+ @search[:text]<<%{doc_objects.clean~*'#{CGI.unescape(s1)}'}
+ end
+ #% dbi_request
+ dbi_statement=DBI_SearchStatement.new(@conn,@search_for,q,checked_case)
+ @text_search_flag=false
+ @text_search_flag=dbi_statement.text_search_flag
+ s_contents=dbi_statement.contents
+ @body_main=''
+ @search_regx=nil
+ olduid=""
+ if @text_search_flag
+ if checked_sql =~/\S/
+ sql_select_body=dbi_statement.sql_select_body_format
+ else sql_select_body=''
+ end
+ @body_main << sql_select_body
+ #@body_main << '<p><hr><br /><b>Main Text:</b><br />' << sql_select_body
+ else
+ end
+ @hostpath = "#{@hosturl_files}"
+ #@hostpath="#{@hosturl_files}/#{@stub}"
+ def path_manifest(fn,ln=nil)
+ case @output_dir_structure_by
+ when 'filename'
+ @lingual =='mono' \
+ ? "#{@hostpath}/#{fn}/sisu_manifest.html"
+ : "#{@hostpath}/#{fn}/sisu_manifest.#{ln}.html"
+ when 'filetype'
+ @lingual =='mono' \
+ ? "#{@hostpath}/manifest/#{fn}.html"
+ : "#{@hostpath}/manifest/#{fn}.#{ln}.html"
+ else
+ "#{@hostpath}/#{ln}/manifest/#{fn}.html"
+ end
+ end
+ def path_html_seg(fn,ln=nil)
+ case @output_dir_structure_by
+ when 'filename'
+ "#{@hostpath}/#{fn}"
+ when 'filetype'
+ "#{@hostpath}/html/#{fn}"
+ else
+ "#{@hostpath}/#{ln}/html/#{fn}"
+ end
+ end
+ def path_toc(fn,ln=nil)
+ if @output_dir_structure_by =='filename' \
+ or @output_dir_structure_by =='filetype'
+ @lingual =='mono' \
+ ? "#{path_html_seg(fn,ln)}/toc.html"
+ : "#{path_html_seg(fn,ln)}/toc.#{ln}.html"
+ else
+ "#{path_html_seg(fn,ln)}/toc.html"
+ end
+ end
+ def path_filename(fn,seg_name,ln=nil)
+ if @output_dir_structure_by =='filename' \
+ or @output_dir_structure_by =='filetype'
+ @lingual =='mono' \
+ ? "#{path_html_seg(fn,ln)}/#{seg_name}.html"
+ : "#{path_html_seg(fn,ln)}/#{seg_name}.#{ln}.html"
+ else
+ "#{path_html_seg(fn,ln)}/#{seg_name}.html"
+ end
+ end
+ def path_html_doc(fn,ln=nil)
+ case @output_dir_structure_by
+ when 'filename'
+ @lingual =='mono' \
+ ? "#{path_html_seg(fn,ln)}/scroll.html"
+ : "#{path_html_seg(fn,ln)}/scroll.#{ln}.html"
+ when 'filetype'
+ @lingual =='mono' \
+ ? "#{@hostpath}/html/#{fn}.html"
+ : "#{@hostpath}/html/#{fn}.#{ln}.html"
+ else
+ "#{@hostpath}/#{ln}/html/#{fn}.html"
+ end
+ end
+#% text_objects_body
+s_contents.each do |c| #% text body
+ location=c['src_filename_base'][/(.+?)\.(?:ssm\.sst|sst)$/,1]
+ file_suffix=c['src_filename_base'][/.+?\.(ssm\.sst|sst)$/,1]
+ lang=if location =~ /\S+?~(\S\S\S?)$/
+ l=location[/\S+?~(\S\S\S?)$/,1]
+ location=location.gsub(/(\S+?)~\S\S\S?/,'\1')
+ l=".#{l}"
+ else ''
+ end
+#% metadata_found_body
+ if c['uid'] != olduid
+ ti=c['title']
+ can_txt_srch=(cgi['view']=~/index/) \
+ ? %{<a href="#{@canned_base_url}&fns=#{c['src_filename_base']}&lang=#{c['language_document_char']}&view=text"><img border="0" width="24" height="16" src="#{@image_src}/b_search.png" alt="search"></a>&nbsp;}
+ : %{<a href="#{@canned_base_url}&fns=#{c['src_filename_base']}&lang=#{c['language_document_char']}&view=index"><img border="0" width="24" height="16" src="#{@image_src}/b_search.png" alt="search"></a>&nbsp;}
+ title = %{<span style="background-color: #{@color_heading}"><a href="#{path_toc(location,c['language_document_char'])}"><img border="0" width="15" height="18" src="#{@image_src}/b_toc.png" alt="">&nbsp;#{ti}</a></span> [#{c['language_document_char']}] by #{c['creator_author']} <a href="#{path_manifest(location,c['language_document_char'])}"><img border="0" width="15" height="15" src="#{@image_src}/b_info.png" alt=""></a> #{can_txt_srch}<br />}
+ title=@text_search_flag \
+ ? '<br /><hr>'+title
+ : '<br />'+title
+ @counter_txt_doc+=1
+ olduid=c['uid']
+ else title=''
+ end
+ if @text_search_flag
+ if cgi['view']=~/text/ \
+ or (cgi['view']!~/index/ and cgi['search'] !~/search db/) #% txt body
+ text=if c['suffix'] !~/1/ #seg
+ if @search_for.text1 =~/\S+/ \
+ or q['s1'] =~/\S+/ #% only this branch is working !!
+ unescaped_search=if @search_for.text1 =~/\S+/
+ CGI.unescape(@search_for.text1)
+ elsif q['s1'] =~/\S+/
+ CGI.unescape(q['s1'])
+ else nil
+ end
+ @search_regx=if unescaped_search #check
+ search_regex=[]
+ build=unescaped_search.scan(/\S+/).each do |g|
+ (g.to_s =~/(AND|OR)/) \
+ ? (search_regex << '|')
+ : (search_regex << %{#{g.to_s}})
+ end
+ search_regex=search_regex.join(' ')
+ search_regex=search_regex.gsub(/\s*\|\s*/,'|')
+ Regexp.new(search_regex, Regexp::IGNORECASE)
+ else nil
+ end
+ else nil
+ end
+ matched_para=(@search_regx.to_s.class==String && @search_regx.to_s=~/\S\S+/) \
+ ? (c['body'].gsub(/(<a\s+href="https?:\/\/[^><\s]+#{@search_regx}[^>]+?>|#{@search_regx})/mi,%{<span style="background-color: #{@color_match}">\\1</span>}))
+ : c['body']
+ %{<hr><p><font size="2">ocn <b><a href="#{path_filename(location,c['seg_name'],c['language_document_char'])}##{c['ocn']}">#{c['ocn']}</a></b>:</font></p>#{matched_para}}
+ elsif c['suffix'] =~/1/ #doc
+ %{#{title}<hr><p><font size="2">ocn #{c['ocn']}:#{c['body']}}
+ end
+ @counter_txt_ocn+=1
+ output=title+text
+ else #elsif cgi['view']=~/index/ #% idx body
+ if c['suffix'] !~/1/ #seg
+ index=%{<a href="#{path_filename(location,c['seg_name'],c['language_document_char'])}##{c['ocn']}">#{c['ocn']}</a>, } if @text_search_flag
+ elsif c['suffix'] =~/1/ #doc #FIX
+ index=%{<a href="#{path_html_doc(location,c['language_document_char'])}##{c['ocn']}">#{c['ocn']}</a>, }
+ end
+ if c['seg_name'] =~/\S+/
+ if @text_search_flag
+ @counter_txt_ocn+=1
+ output=title+index
+ end
+ else
+ @counter_txt_ocn+=1
+ output=c['suffix'] !~/1/ \
+ ? title+index
+ : %{#{title}#{c['ocn'].sort}, }
+ end
+ end
+ else output=title
+ end
+ @counters_txt=if @counter_txt_doc > 0
+ if checked_stats =~/\S/
+ @@lt_t=(@counter_txt_ocn==dbi_statement.sql_match_limit.to_i) ? true : false
+ start=(@@offset.to_i+1).to_s
+ range=(@@offset.to_i+@counter_txt_ocn.to_i).to_s
+ %{<hr /><font size="2" color="#666666">Found #{@counter_txt_ocn} times in the main body of #{@counter_txt_doc} documents [ matches #{start} to #{range} ]</font><br />}
+ else ''
+ end
+ else ''
+ end
+ @body_main << output #+ details
+end
+olduid = ""
+ offset=dbi_statement.sql_offset.to_s
+ limit=dbi_statement.sql_match_limit.to_s
+ @@lt_t ||=false; @@lt_e ||=false
+ canned=(@@lt_t or @@lt_e) \
+ ? dbi_statement.pre_next(true,@image_src).to_s
+ : dbi_statement.pre_next(false,@image_src).to_s
+ limit=dbi_statement.sql_match_limit.to_s
+ cgi.out{
+ @header.force_encoding("UTF-8") \
+ + @counters_txt.force_encoding("UTF-8") \
+ + @counters_endn.force_encoding("UTF-8") \
+ + canned.force_encoding("UTF-8") \
+ + @body_main.force_encoding("UTF-8") \
+ + canned.force_encoding("UTF-8") \
+ + @tail.force_encoding("UTF-8")
+ } #% print cgi_output_header+counters+body
+ end
+ rescue Exception => e
+ s='<pre>' + CGI::escapeHTML(e.backtrace.reverse.join("\n"))
+ s << CGI::escapeHTML(e.message) + '</pre>'
+ cgi.out{s}
+ next
+ ensure # eg. disconnect from server
+ @conn.disconnect if @conn
+ end
+end