From 92d49cf6c5dec016fc1262e4436c62dc199f2b9d Mon Sep 17 00:00:00 2001 From: "Carol (Nichols || Goulding)" Date: Sun, 3 May 2015 17:41:23 -0400 Subject: [PATCH] Remove unused extract_grammar.py This script used to be used to extract the grammar sections from the reference, but there is now a separate src/doc/grammar.md where the grammar sections that used to be in the reference live, so there is no longer a need to extract the grammar from the reference. --- src/etc/extract_grammar.py | 156 ------------------------------------- 1 file changed, 156 deletions(-) delete mode 100755 src/etc/extract_grammar.py diff --git a/src/etc/extract_grammar.py b/src/etc/extract_grammar.py deleted file mode 100755 index a12c3298cb3..00000000000 --- a/src/etc/extract_grammar.py +++ /dev/null @@ -1,156 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT -# file at the top-level directory of this distribution and at -# http://rust-lang.org/COPYRIGHT. -# -# Licensed under the Apache License, Version 2.0 or the MIT license -# , at your -# option. This file may not be copied, modified, or distributed -# except according to those terms. - -# This script is for extracting the grammar from the rust docs. - -import fileinput - -collections = {"gram": [], - "keyword": [], - "reserved": [], - "binop": [], - "unop": []} - - -in_coll = False -coll = "" - -for line in fileinput.input(openhook=fileinput.hook_encoded("utf-8")): - if in_coll: - if line.startswith("~~~~"): - in_coll = False - else: - if coll in ["keyword", "reserved", "binop", "unop"]: - for word in line.split(): - if word not in collections[coll]: - collections[coll].append(word) - else: - collections[coll].append(line) - - else: - if line.startswith("~~~~"): - for cname in collections: - if ("." + cname) in line: - coll = cname - in_coll = True - break - -# Define operator symbol-names here - -tokens = ["non_star", "non_slash", "non_eol", - "non_single_quote", "non_double_quote", "ident"] - -symnames = { - ".": "dot", - "+": "plus", - "-": "minus", - "/": "slash", - "*": "star", - "%": "percent", - - "~": "tilde", - "@": "at", - - "!": "not", - "&": "and", - "|": "or", - "^": "xor", - - "<<": "lsl", - ">>": "lsr", - ">>>": "asr", - - "&&": "andand", - "||": "oror", - - "<": "lt", - "<=": "le", - "==": "eqeq", - ">=": "ge", - ">": "gt", - - "=": "eq", - - "+=": "plusequal", - "-=": "minusequal", - "/=": "divequal", - "*=": "starequal", - "%=": "percentequal", - - "&=": "andequal", - "|=": "orequal", - "^=": "xorequal", - - ">>=": "lsrequal", - ">>>=": "asrequal", - "<<=": "lslequal", - - "::": "coloncolon", - - "->": "rightarrow", - "<-": "leftarrow", - "<->": "swaparrow", - - "//": "linecomment", - "/*": "openblockcomment", - "*/": "closeblockcomment", - "macro_rules": "macro_rules", - "=>": "eg", - "..": "dotdot", - ",": "comma" -} - -lines = [] - -for line in collections["gram"]: - line2 = "" - for word in line.split(): - # replace strings with keyword-names or symbol-names from table - if word.startswith("\""): - word = word[1:-1] - if word in symnames: - word = symnames[word] - else: - for ch in word: - if not ch.isalpha(): - raise Exception("non-alpha apparent keyword: " - + word) - if word not in tokens: - if (word in collections["keyword"] or - word in collections["reserved"]): - tokens.append(word) - else: - raise Exception("unknown keyword/reserved word: " - + word) - - line2 += " " + word - lines.append(line2) - - -for word in collections["keyword"] + collections["reserved"]: - if word not in tokens: - tokens.append(word) - -for sym in collections["unop"] + collections["binop"] + symnames.keys(): - word = symnames[sym] - if word not in tokens: - tokens.append(word) - - -print("%start parser, token;") -print("%%token %s ;" % ("\n\t, ".join(tokens))) -for coll in ["keyword", "reserved"]: - print("%s: %s ; " % (coll, "\n\t| ".join(collections[coll]))) -for coll in ["binop", "unop"]: - print("%s: %s ; " % (coll, "\n\t| ".join([symnames[x] - for x in collections[coll]]))) -print("\n".join(lines))