diff --git a/.travis.yml b/.travis.yml index 9cdbcba60..7dbdbf35a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,18 +1,15 @@ -dist: trusty -group: stable -sudo: required - -env: - - SCALANATIVE_GC=boehm +os: linux language: scala + matrix: include: - jdk: openjdk8 # - jdk: openjdk11 before_install: - - bash scripts/travis_setup.sh + - sudo apt-get update + - curl https://raw.githubusercontent.com/scala-native/scala-native/master/scripts/travis_setup.sh | bash -x before_script: - export JVM_OPTS="-Dfile.encoding=UTF-8 -Xmx1G -Xms1G -server -XX:ReservedCodeCacheSize=128M" diff --git a/scripts/clangfmt b/scripts/clangfmt deleted file mode 100755 index afb123c3e..000000000 --- a/scripts/clangfmt +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash -# -# Format C/C++ code using clang-format. -# -# To ensure reproducible formatting the script checks that clang-format -# is from the most recent version of LLVM supported by Scala Native. -# -# Usage: $0 [--test] -# -# Set CLANG_FORMAT_PATH to configure path to clang-format. - -set -euo pipefail -IFS=$'\n\t' - -# The required version of clang-format. -CLANG_FORMAT_VERSION=5.0 -CLANG_FORMAT_VERSION_STRING="clang-format version $CLANG_FORMAT_VERSION" - -die() { - while [ "$#" -gt 0 ]; do - echo >&2 "$1"; shift - done - exit 1 -} - -check_clang_format_version() { - cmd="$1" - [ -e "$(type -P "$cmd")" ] && \ - "$cmd" --version 2> /dev/null | grep -q "$CLANG_FORMAT_VERSION_STRING" -} - -clang_format= - -if [ -n "${CLANG_FORMAT_PATH:-}" ]; then - check_clang_format_version "$CLANG_FORMAT_PATH" || \ - die "CLANG_FORMAT_PATH does not have required version $CLANG_FORMAT_VERSION" \ - "CLANG_FORMAT_PATH points to $CLANG_FORMAT_PATH" - clang_format="$CLANG_FORMAT_PATH" -else - for cmd in "clang-format-$CLANG_FORMAT_VERSION" clang-format; do - if check_clang_format_version "$cmd"; then - clang_format="$cmd" - break - fi - done -fi - -if [ -z "$clang_format" ]; then - die "clang-format version $CLANG_FORMAT_VERSION not found" \ - "Install LLVM version $CLANG_FORMAT_VERSION and rerun." -fi - -test_mode= - -while [ "$#" -gt 0 ]; do - arg="$1" - case "$arg" in - --test) test_mode=true; shift ;; - --*) die "Unknown argument: $arg" "Usage: $0 [--test]" ;; - *) break ;; - esac -done - -if [ "$#" -gt 0 ]; then - "$clang_format" --style=file -i "$@" -else - find . -name "*.[ch]" -or -name "*.cpp" | xargs "$clang_format" --style=file -i -fi - -if [ "$test_mode" = true ]; then - git diff --quiet --exit-code || \ - die "C/C++ code formatting changes detected" \ - "Run \`$0\` to reformat." -fi diff --git a/scripts/gyb.py b/scripts/gyb.py deleted file mode 100755 index 7a03d5798..000000000 --- a/scripts/gyb.py +++ /dev/null @@ -1,1128 +0,0 @@ -#!/usr/bin/env python -# GYB: Generate Your Boilerplate (improved names welcome; at least -# this one's short). See -h output for instructions - -from __future__ import print_function - -import os -import re -try: - from cStringIO import StringIO -except ImportError: - from io import StringIO -import textwrap -import tokenize - -from bisect import bisect - - -def get_line_starts(s): - """Return a list containing the start index of each line in s. - - The list also contains a sentinel index for the end of the string, - so there will be one more element in the list than there are lines - in the string - """ - starts = [0] - - for line in s.split('\n'): - starts.append(starts[-1] + len(line) + 1) - - starts[-1] -= 1 - return starts - - -def strip_trailing_nl(s): - """If s ends with a newline, drop it; else return s intact""" - return s[:-1] if s.endswith('\n') else s - - -def split_lines(s): - """Split s into a list of lines, each of which has a trailing newline - - If the lines are later concatenated, the result is s, possibly - with a single appended newline. - """ - return [l + '\n' for l in s.split('\n')] - -# text on a line up to the first '$$', '${', or '%%' -literalText = r'(?: [^$\n%] | \$(?![${]) | %(?!%) )*' - -# The part of an '%end' line that follows the '%' sign -linesClose = r'[\ \t]* end [\ \t]* (?: \# .* )? $' - -# Note: Where "# Absorb" appears below, the regexp attempts to eat up -# through the end of ${...} and %{...}% constructs. In reality we -# handle this with the Python tokenizer, which avoids mis-detections -# due to nesting, comments and strings. This extra absorption in the -# regexp facilitates testing the regexp on its own, by preventing the -# interior of some of these constructs from being treated as literal -# text. -tokenize_re = re.compile( - r''' -# %-lines and %{...}-blocks - # \n? # absorb one preceding newline - ^ - (?: - (?P - (?P<_indent> [\ \t]* % (?! [{%] ) [\ \t]* ) (?! [\ \t] | ''' + linesClose + r''' ) .* - ( \n (?P=_indent) (?! ''' + linesClose + r''' ) .* ) * - ) - | (?P [\ \t]* % [ \t]* ''' + linesClose + r''' ) - | [\ \t]* (?P %\{ ) - (?: [^}]| \} (?!%) )* \}% # Absorb - ) - \n? # absorb one trailing newline - -# Substitutions -| (?P \$\{ ) - [^}]* \} # Absorb - -# %% and $$ are literal % and $ respectively -| (?P[$%]) (?P=symbol) - -# Literal text -| (?P ''' + literalText + r''' - (?: - # newline that doesn't precede space+% - (?: \n (?! [\ \t]* %[^%] ) ) - ''' + literalText + r''' - )* - \n? - ) -''', re.VERBOSE | re.MULTILINE) - -gyb_block_close = re.compile('\}%[ \t]*\n?') - - -def token_pos_to_index(token_pos, start, line_starts): - """Translate a tokenize (line, column) pair into an absolute - position in source text given the position where we started - tokenizing and a list that maps lines onto their starting - character indexes. - """ - relative_token_line_plus1, token_col = token_pos - - # line number where we started tokenizing - start_line_num = bisect(line_starts, start) - 1 - - # line number of the token in the whole text - abs_token_line = relative_token_line_plus1 - 1 + start_line_num - - # if found in the first line, adjust the end column to account - # for the extra text - if relative_token_line_plus1 == 1: - token_col += start - line_starts[start_line_num] - - # Sometimes tokenizer errors report a line beyond the last one - if abs_token_line >= len(line_starts): - return line_starts[-1] - - return line_starts[abs_token_line] + token_col - - -def tokenize_python_to_unmatched_close_curly(source_text, start, line_starts): - """Apply Python's tokenize to source_text starting at index start - while matching open and close curly braces. When an unmatched - close curly brace is found, return its index. If not found, - return len(source_text). If there's a tokenization error, return - the position of the error. - """ - stream = StringIO(source_text) - stream.seek(start) - nesting = 0 - - try: - for kind, text, token_start, token_end, line_text \ - in tokenize.generate_tokens(stream.readline): - - if text == '{': - nesting += 1 - elif text == '}': - nesting -= 1 - if nesting < 0: - return token_pos_to_index(token_start, start, line_starts) - - except tokenize.TokenError as error: - (message, error_pos) = error.args - return token_pos_to_index(error_pos, start, line_starts) - - return len(source_text) - - -def tokenize_template(template_text): - r"""Given the text of a template, returns an iterator over - (tokenType, token, match) tuples. - - **Note**: this is template syntax tokenization, not Python - tokenization. - - When a non-literal token is matched, a client may call - iter.send(pos) on the iterator to reset the position in - template_text at which scanning will resume. - - This function provides a base level of tokenization which is - then refined by ParseContext.token_generator. - - >>> from pprint import * - >>> pprint(list((kind, text) for kind, text, _ in tokenize_template( - ... '%for x in range(10):\n% print x\n%end\njuicebox'))) - [('gybLines', '%for x in range(10):\n% print x'), - ('gybLinesClose', '%end'), - ('literal', 'juicebox')] - - >>> pprint(list((kind, text) for kind, text, _ in tokenize_template( - ... '''Nothing - ... % if x: - ... % for i in range(3): - ... ${i} - ... % end - ... % else: - ... THIS SHOULD NOT APPEAR IN THE OUTPUT - ... '''))) - [('literal', 'Nothing\n'), - ('gybLines', '% if x:\n% for i in range(3):'), - ('substitutionOpen', '${'), - ('literal', '\n'), - ('gybLinesClose', '% end'), - ('gybLines', '% else:'), - ('literal', 'THIS SHOULD NOT APPEAR IN THE OUTPUT\n')] - - >>> for kind, text, _ in tokenize_template(''' - ... This is $some$ literal stuff containing a ${substitution} - ... followed by a %{...} block: - ... %{ - ... # Python code - ... }% - ... and here $${are} some %-lines: - ... % x = 1 - ... % y = 2 - ... % if z == 3: - ... % print '${hello}' - ... % end - ... % for x in zz: - ... % print x - ... % # different indentation - ... % twice - ... and some lines that literally start with a %% token - ... %% first line - ... %% second line - ... '''): - ... print (kind, text.strip().split('\n',1)[0]) - ('literal', 'This is $some$ literal stuff containing a') - ('substitutionOpen', '${') - ('literal', 'followed by a %{...} block:') - ('gybBlockOpen', '%{') - ('literal', 'and here ${are} some %-lines:') - ('gybLines', '% x = 1') - ('gybLinesClose', '% end') - ('gybLines', '% for x in zz:') - ('gybLines', '% # different indentation') - ('gybLines', '% twice') - ('literal', 'and some lines that literally start with a % token') - """ - pos = 0 - end = len(template_text) - - saved_literal = [] - literal_first_match = None - - while pos < end: - m = tokenize_re.match(template_text, pos, end) - - # pull out the one matched key (ignoring internal patterns starting - # with _) - ((kind, text), ) = ( - (kind, text) for (kind, text) in m.groupdict().items() - if text is not None and kind[0] != '_') - - if kind in ('literal', 'symbol'): - if len(saved_literal) == 0: - literal_first_match = m - # literals and symbols get batched together - saved_literal.append(text) - pos = None - else: - # found a non-literal. First yield any literal we've accumulated - if saved_literal != []: - yield 'literal', ''.join(saved_literal), literal_first_match - saved_literal = [] - - # Then yield the thing we found. If we get a reply, it's - # the place to resume tokenizing - pos = yield kind, text, m - - # If we were not sent a new position by our client, resume - # tokenizing at the end of this match. - if pos is None: - pos = m.end(0) - else: - # Client is not yet ready to process next token - yield - - if saved_literal != []: - yield 'literal', ''.join(saved_literal), literal_first_match - - -def split_gyb_lines(source_lines): - r"""Return a list of lines at which to split the incoming source - - These positions represent the beginnings of python line groups that - will require a matching %end construct if they are to be closed. - - >>> src = split_lines('''\ - ... if x: - ... print x - ... if y: # trailing comment - ... print z - ... if z: # another comment\ - ... ''') - >>> s = split_gyb_lines(src) - >>> len(s) - 2 - >>> src[s[0]] - ' print z\n' - >>> s[1] - len(src) - 0 - - >>> src = split_lines('''\ - ... if x: - ... if y: print 1 - ... if z: - ... print 2 - ... pass\ - ... ''') - >>> s = split_gyb_lines(src) - >>> len(s) - 1 - >>> src[s[0]] - ' if y: print 1\n' - - >>> src = split_lines('''\ - ... if x: - ... if y: - ... print 1 - ... print 2 - ... ''') - >>> s = split_gyb_lines(src) - >>> len(s) - 2 - >>> src[s[0]] - ' if y:\n' - >>> src[s[1]] - ' print 1\n' - """ - last_token_text, last_token_kind = None, None - unmatched_indents = [] - - dedents = 0 - try: - for token_kind, token_text, token_start, \ - (token_end_line, token_end_col), line_text \ - in tokenize.generate_tokens(lambda i=iter(source_lines): - next(i)): - - if token_kind in (tokenize.COMMENT, tokenize.ENDMARKER): - continue - - if token_text == '\n' and last_token_text == ':': - unmatched_indents.append(token_end_line) - - # The tokenizer appends dedents at EOF; don't consider - # those as matching indentations. Instead just save them - # up... - if last_token_kind == tokenize.DEDENT: - dedents += 1 - # And count them later, when we see something real. - if token_kind != tokenize.DEDENT and dedents > 0: - unmatched_indents = unmatched_indents[:-dedents] - dedents = 0 - - last_token_text, last_token_kind = token_text, token_kind - - except tokenize.TokenError: - # Let the later compile() call report the error - return [] - - if last_token_text == ':': - unmatched_indents.append(len(source_lines)) - - return unmatched_indents - - -def code_starts_with_dedent_keyword(source_lines): - r"""Return True iff the incoming Python source_lines begin with "else", - "elif", "except", or "finally". - - Initial comments and whitespace are ignored. - - >>> code_starts_with_dedent_keyword(split_lines('if x in y: pass')) - False - >>> code_starts_with_dedent_keyword(split_lines('except ifSomethingElse:')) - True - >>> code_starts_with_dedent_keyword( - split_lines('\n# comment\nelse: # yes')) - True - """ - token_text = None - for token_kind, token_text, _, _, _ \ - in tokenize.generate_tokens(lambda i=iter(source_lines): next(i)): - - if token_kind != tokenize.COMMENT and token_text.strip() != '': - break - - return token_text in ('else', 'elif', 'except', 'finally') - - -class ParseContext(object): - """State carried through a parse of a template""" - - filename = '' - template = '' - line_starts = [] - code_start_line = -1 - code_text = None - tokens = None # The rest of the tokens - close_lines = False - - def __init__(self, filename, template=None): - self.filename = os.path.abspath(filename) - if template is None: - with open(filename) as f: - self.template = f.read() - else: - self.template = template - self.line_starts = get_line_starts(self.template) - self.tokens = self.token_generator(tokenize_template(self.template)) - self.next_token() - - def pos_to_line(self, pos): - return bisect(self.line_starts, pos) - 1 - - def token_generator(self, base_tokens): - r"""Given an iterator over (kind, text, match) triples (see - tokenize_template above), return a refined iterator over - token_kinds. - - Among other adjustments to the elements found by base_tokens, - this refined iterator tokenizes python code embedded in - template text to help determine its true extent. The - expression "base_tokens.send(pos)" is used to reset the index at - which base_tokens resumes scanning the underlying text. - - >>> ctx = ParseContext('dummy', ''' - ... %for x in y: - ... % print x - ... % end - ... literally - ... ''') - >>> while ctx.token_kind: - ... print (ctx.token_kind, ctx.code_text or ctx.token_text) - ... ignored = ctx.next_token() - ('literal', '\n') - ('gybLinesOpen', 'for x in y:\n') - ('gybLines', ' print x\n') - ('gybLinesClose', '% end') - ('literal', 'literally\n') - - >>> ctx = ParseContext('dummy', - ... '''Nothing - ... % if x: - ... % for i in range(3): - ... ${i} - ... % end - ... % else: - ... THIS SHOULD NOT APPEAR IN THE OUTPUT - ... ''') - >>> while ctx.token_kind: - ... print (ctx.token_kind, ctx.code_text or ctx.token_text) - ... ignored = ctx.next_token() - ('literal', 'Nothing\n') - ('gybLinesOpen', 'if x:\n') - ('gybLinesOpen', ' for i in range(3):\n') - ('substitutionOpen', 'i') - ('literal', '\n') - ('gybLinesClose', '% end') - ('gybLinesOpen', 'else:\n') - ('literal', 'THIS SHOULD NOT APPEAR IN THE OUTPUT\n') - - >>> ctx = ParseContext('dummy', - ... '''% for x in [1, 2, 3]: - ... % if x == 1: - ... literal1 - ... % elif x > 1: # add an output line after this line to fix bug - ... % if x == 2: - ... literal2 - ... % end - ... % end - ... % end - ... ''') - >>> while ctx.token_kind: - ... print (ctx.token_kind, ctx.code_text or ctx.token_text) - ... ignored = ctx.next_token() - ('gybLinesOpen', 'for x in [1, 2, 3]:\n') - ('gybLinesOpen', ' if x == 1:\n') - ('literal', 'literal1\n') - ('gybLinesOpen', 'elif x > 1: # add output line here to fix bug\n') - ('gybLinesOpen', ' if x == 2:\n') - ('literal', 'literal2\n') - ('gybLinesClose', '% end') - ('gybLinesClose', '% end') - ('gybLinesClose', '% end') - """ - for self.token_kind, self.token_text, self.token_match in base_tokens: - kind = self.token_kind - self.code_text = None - - # Do we need to close the current lines? - self.close_lines = kind == 'gybLinesClose' - - # %{...}% and ${...} constructs - if kind.endswith('Open'): - - # Tokenize text that follows as Python up to an unmatched '}' - code_start = self.token_match.end(kind) - self.code_start_line = self.pos_to_line(code_start) - - close_pos = tokenize_python_to_unmatched_close_curly( - self.template, code_start, self.line_starts) - self.code_text = self.template[code_start:close_pos] - yield kind - - if (kind == 'gybBlockOpen'): - # Absorb any '}% \n' - m2 = gyb_block_close.match(self.template, close_pos) - if not m2: - raise ValueError("Invalid block closure") - next_pos = m2.end(0) - else: - assert kind == 'substitutionOpen' - # skip past the closing '}' - next_pos = close_pos + 1 - - # Resume tokenizing after the end of the code. - base_tokens.send(next_pos) - - elif kind == 'gybLines': - - self.code_start_line = self.pos_to_line( - self.token_match.start('gybLines')) - indentation = self.token_match.group('_indent') - - # Strip off the leading indentation and %-sign - source_lines = re.split( - '^' + re.escape(indentation), - self.token_match.group('gybLines') + '\n', - flags=re.MULTILINE)[1:] - - if code_starts_with_dedent_keyword(source_lines): - self.close_lines = True - - last_split = 0 - for line in split_gyb_lines(source_lines): - self.token_kind = 'gybLinesOpen' - self.code_text = ''.join(source_lines[last_split:line]) - yield self.token_kind - last_split = line - self.code_start_line += line - last_split - self.close_lines = False - - self.code_text = ''.join(source_lines[last_split:]) - if self.code_text: - self.token_kind = 'gybLines' - yield self.token_kind - else: - yield self.token_kind - - def next_token(self): - """Move to the next token""" - for kind in self.tokens: - return self.token_kind - - self.token_kind = None - - -class ExecutionContext(object): - """State we pass around during execution of a template""" - - def __init__(self, line_directive='// ###sourceLocation', - **local_bindings): - self.local_bindings = local_bindings - self.line_directive = line_directive - self.local_bindings['__context__'] = self - self.result_text = [] - self.last_file_line = None - - def append_text(self, text, file, line): - # see if we need to inject a line marker - if self.line_directive: - if (file, line) != self.last_file_line: - # We can only insert the line directive at a line break - if len(self.result_text) == 0 \ - or self.result_text[-1].endswith('\n'): - self.result_text.append('%s(file: "%s", line: %d)\n' % ( - self.line_directive, file, line + 1)) - # But if the new text contains any line breaks, we can create - # one - elif '\n' in text: - i = text.find('\n') - self.result_text.append(text[:i + 1]) - self.last_file_line = ( - self.last_file_line[0], self.last_file_line[1] + 1) - # and try again - self.append_text(text[i + 1:], file, line) - return - - self.result_text.append(text) - self.last_file_line = (file, line + text.count('\n')) - - -class ASTNode(object): - """Abstract base class for template AST nodes""" - - def __init__(self): - raise NotImplementedError("ASTNode.__init__ is not implemented.") - - def execute(self, context): - raise NotImplementedError("ASTNode.execute is not implemented.") - - def __str__(self, indent=''): - raise NotImplementedError("ASTNode.__str__ is not implemented.") - - def format_children(self, indent): - if not self.children: - return ' []' - - return '\n'.join( - ['', indent + '['] + - [x.__str__(indent + 4 * ' ') for x in self.children] + - [indent + ']']) - - -class Block(ASTNode): - """A sequence of other AST nodes, to be executed in order""" - - children = [] - - def __init__(self, context): - self.children = [] - - while context.token_kind and not context.close_lines: - if context.token_kind == 'literal': - node = Literal - else: - node = Code - self.children.append(node(context)) - - def execute(self, context): - for x in self.children: - x.execute(context) - - def __str__(self, indent=''): - return indent + 'Block:' + self.format_children(indent) - - -class Literal(ASTNode): - """An AST node that generates literal text""" - - def __init__(self, context): - self.text = context.token_text - start_position = context.token_match.start(context.token_kind) - self.start_line_number = context.pos_to_line(start_position) - self.filename = context.filename - context.next_token() - - def execute(self, context): - context.append_text(self.text, self.filename, self.start_line_number) - - def __str__(self, indent=''): - return '\n'.join( - [indent + x for x in ['Literal:'] + - strip_trailing_nl(self.text).split('\n')]) - - -class Code(ASTNode): - """An AST node that is evaluated as Python""" - - code = None - children = () - kind = None - - def __init__(self, context): - - source = '' - source_line_count = 0 - - def accumulate_code(): - s = source + (context.code_start_line - source_line_count) * '\n' \ - + textwrap.dedent(context.code_text) - line_count = context.code_start_line + \ - context.code_text.count('\n') - context.next_token() - return s, line_count - - eval_exec = 'exec' - if context.token_kind.startswith('substitution'): - eval_exec = 'eval' - source, source_line_count = accumulate_code() - source = '(' + source.strip() + ')' - - else: - while context.token_kind == 'gybLinesOpen': - source, source_line_count = accumulate_code() - source += ' __children__[%d].execute(__context__)\n' % len( - self.children) - source_line_count += 1 - - self.children += (Block(context),) - - if context.token_kind == 'gybLinesClose': - context.next_token() - - if context.token_kind == 'gybLines': - source, source_line_count = accumulate_code() - - # Only handle a substitution as part of this code block if - # we don't already have some %-lines. - elif context.token_kind == 'gybBlockOpen': - - # Opening ${...} and %{...}% constructs - source, source_line_count = accumulate_code() - - self.filename = context.filename - self.start_line_number = context.code_start_line - self.code = compile(source, context.filename, eval_exec) - self.source = source - - def execute(self, context): - # Save __children__ from the local bindings - save_children = context.local_bindings.get('__children__') - # Execute the code with our __children__ in scope - context.local_bindings['__children__'] = self.children - result = eval(self.code, context.local_bindings) - - if context.local_bindings['__children__'] is not self.children: - raise ValueError("The code is not allowed to mutate __children__") - # Restore the bindings - context.local_bindings['__children__'] = save_children - - # If we got a result, the code was an expression, so append - # its value - if result is not None and result != '': - context.append_text( - str(result), self.filename, self.start_line_number) - - def __str__(self, indent=''): - source_lines = re.sub(r'^\n', '', strip_trailing_nl( - self.source), flags=re.MULTILINE).split('\n') - if len(source_lines) == 1: - s = indent + 'Code: {' + source_lines[0] + '}' - else: - s = indent + 'Code:\n' + indent + '{\n' + '\n'.join( - indent + 4 * ' ' + l for l in source_lines - ) + '\n' + indent + '}' - return s + self.format_children(indent) - - -def parse_template(filename, text=None): - r"""Return an AST corresponding to the given template file. - - If text is supplied, it is assumed to be the contents of the file, - as a string. - - >>> print parse_template('dummy.file', text= - ... '''% for x in [1, 2, 3]: - ... % if x == 1: - ... literal1 - ... % elif x > 1: # add an output line after this line to fix the bug - ... % if x == 2: - ... literal2 - ... % end - ... % end - ... % end - ... ''') - Block: - [ - Code: - { - for x in [1, 2, 3]: - __children__[0].execute(__context__) - } - [ - Block: - [ - Code: - { - if x == 1: - __children__[0].execute(__context__) - elif x > 1: # add output line after this line to fix bug - __children__[1].execute(__context__) - } - [ - Block: - [ - Literal: - literal1 - ] - Block: - [ - Code: - { - if x == 2: - __children__[0].execute(__context__) - } - [ - Block: - [ - Literal: - literal2 - ] - ] - ] - ] - ] - ] - ] - - >>> print parse_template( - >>> 'dummy.file', text='%for x in range(10):\n% print x\n%end\njuicebox') - Block: - [ - Code: - { - for x in range(10): - __children__[0].execute(__context__) - } - [ - Block: - [ - Code: {print x} [] - ] - ] - Literal: - juicebox - ] - - >>> print parse_template('/dummy.file', text= - ... '''Nothing - ... % if x: - ... % for i in range(3): - ... ${i} - ... % end - ... % else: - ... THIS SHOULD NOT APPEAR IN THE OUTPUT - ... ''') - Block: - [ - Literal: - Nothing - Code: - { - if x: - __children__[0].execute(__context__) - else: - __children__[1].execute(__context__) - } - [ - Block: - [ - Code: - { - for i in range(3): - __children__[0].execute(__context__) - } - [ - Block: - [ - Code: {(i)} [] - Literal: - - ] - ] - ] - Block: - [ - Literal: - THIS SHOULD NOT APPEAR IN THE OUTPUT - ] - ] - ] - - >>> print parse_template('dummy.file', text='''% - ... %for x in y: - ... % print y - ... ''') - Block: - [ - Code: - { - for x in y: - __children__[0].execute(__context__) - } - [ - Block: - [ - Code: {print y} [] - ] - ] - ] - - >>> print parse_template('dummy.file', text='''% - ... %if x: - ... % print y - ... AAAA - ... %else: - ... BBBB - ... ''') - Block: - [ - Code: - { - if x: - __children__[0].execute(__context__) - else: - __children__[1].execute(__context__) - } - [ - Block: - [ - Code: {print y} [] - Literal: - AAAA - ] - Block: - [ - Literal: - BBBB - ] - ] - ] - - >>> print parse_template('dummy.file', text='''% - ... %if x: - ... % print y - ... AAAA - ... %# This is a comment - ... %else: - ... BBBB - ... ''') - Block: - [ - Code: - { - if x: - __children__[0].execute(__context__) - # This is a comment - else: - __children__[1].execute(__context__) - } - [ - Block: - [ - Code: {print y} [] - Literal: - AAAA - ] - Block: - [ - Literal: - BBBB - ] - ] - ] - - >>> print parse_template('dummy.file', text='''\ - ... %for x in y: - ... AAAA - ... %if x: - ... BBBB - ... %end - ... CCCC - ... ''') - Block: - [ - Code: - { - for x in y: - __children__[0].execute(__context__) - } - [ - Block: - [ - Literal: - AAAA - Code: - { - if x: - __children__[0].execute(__context__) - } - [ - Block: - [ - Literal: - BBBB - ] - ] - Literal: - CCCC - ] - ] - ] - """ - return Block(ParseContext(filename, text)) - - -def execute_template(ast, line_directive='', **local_bindings): - r"""Return the text generated by executing the given template AST. - - Keyword arguments become local variable bindings in the execution context - - >>> ast = parse_template('/dummy.file', text= - ... '''Nothing - ... % if x: - ... % for i in range(3): - ... ${i} - ... % end - ... % else: - ... THIS SHOULD NOT APPEAR IN THE OUTPUT - ... ''') - >>> print execute_template(ast, line_directive='//#sourceLocation', x=1), - //#sourceLocation(file: "/dummy.file", line: 1) - Nothing - //#sourceLocation(file: "/dummy.file", line: 4) - 0 - //#sourceLocation(file: "/dummy.file", line: 4) - 1 - //#sourceLocation(file: "/dummy.file", line: 4) - 2 - - >>> ast = parse_template('/dummy.file', text= - ... '''Nothing - ... % a = [] - ... % for x in range(3): - ... % a.append(x) - ... % end - ... ${a} - ... ''') - >>> print execute_template(ast, line_directive='//#sourceLocation', x=1), - //#sourceLocation(file: "/dummy.file", line: 1) - Nothing - //#sourceLocation(file: "/dummy.file", line: 6) - [0, 1, 2] - """ - execution_context = ExecutionContext( - line_directive=line_directive, **local_bindings) - ast.execute(execution_context) - return ''.join(execution_context.result_text) - - -def main(): - import argparse - import sys - - parser = argparse.ArgumentParser( - formatter_class=argparse.RawDescriptionHelpFormatter, - description='Generate Your Boilerplate!', epilog=''' - A GYB template consists of the following elements: - - - Literal text which is inserted directly into the output - - - %% or $$ in literal text, which insert literal '%' and '$' - symbols respectively. - - - Substitutions of the form ${}. The Python - expression is converted to a string and the result is inserted - into the output. - - - Python code delimited by %{...}%. Typically used to inject - definitions (functions, classes, variable bindings) into the - evaluation context of the template. Common indentation is - stripped, so you can add as much indentation to the beginning - of this code as you like - - - Lines beginning with optional whitespace followed by a single - '%' and Python code. %-lines allow you to nest other - constructs inside them. To close a level of nesting, use the - "%end" construct. - - - Lines beginning with optional whitespace and followed by a - single '%' and the token "end", which close open constructs in - %-lines. - - Example template: - - - Hello - - %{ - x = 42 - def succ(a): - return a+1 - }% - - I can assure you that ${x} < ${succ(x)} - - % if int(y) > 7: - % for i in range(3): - y is greater than seven! - % end - % else: - y is less than or equal to seven - % end - - - The End. - - - When run with "gyb -Dy=9", the output is - - - Hello - - - I can assure you that 42 < 43 - - y is greater than seven! - y is greater than seven! - y is greater than seven! - - - The End. - -''' - ) - parser.add_argument( - '-D', action='append', dest='defines', metavar='NAME=VALUE', - default=[], - help='''Bindings to be set in the template's execution context''') - - parser.add_argument( - 'file', type=argparse.FileType(), - help='Path to GYB template file (defaults to stdin)', nargs='?', - default=sys.stdin) - parser.add_argument( - '-o', dest='target', type=argparse.FileType('w'), - help='Output file (defaults to stdout)', default=sys.stdout) - parser.add_argument( - '--test', action='store_true', - default=False, help='Run a self-test') - parser.add_argument( - '--verbose-test', action='store_true', - default=False, help='Run a verbose self-test') - parser.add_argument( - '--dump', action='store_true', - default=False, help='Dump the parsed template to stdout') - parser.add_argument( - '--line-directive', default='// ###sourceLocation', - help='Line directive prefix; empty => no line markers') - - args = parser.parse_args(sys.argv[1:]) - - if args.test or args.verbose_test: - import doctest - if doctest.testmod(verbose=args.verbose_test).failed: - sys.exit(1) - - bindings = dict(x.split('=', 1) for x in args.defines) - ast = parse_template(args.file.name, args.file.read()) - if args.dump: - print(ast) - # Allow the template to import .py files from its own directory - sys.path = [os.path.split(args.file.name)[0] or '.'] + sys.path - - args.target.write(execute_template(ast, args.line_directive, **bindings)) - -if __name__ == '__main__': - main() diff --git a/scripts/javalib.py b/scripts/javalib.py deleted file mode 100644 index f5a3ba8cb..000000000 --- a/scripts/javalib.py +++ /dev/null @@ -1,22 +0,0 @@ -""" - Utility that lists all non-implementation specific classes in javalib. - - It must be run from the root of the Scala Native checkout. -""" - -import subprocess,os - -cwd = os.getcwd() - -target = cwd + "/javalib/target/scala-2.11/classes/" - -paths = subprocess.check_output(["find", target, "-name", "*.nir"]) - -classes = sorted(list(set( - line.replace(target, "").replace(".nir", "").lstrip("/").rstrip("$").replace("/", ".") - for line in paths.split("\n") - if "$$anon" not in line and "java/" in line -))) - -for cls in classes: - print("* ``{}``".format(cls)) diff --git a/scripts/nix-run b/scripts/nix-run deleted file mode 100755 index 7f11a9264..000000000 --- a/scripts/nix-run +++ /dev/null @@ -1,5 +0,0 @@ -#!/usr/bin/env bash - -HERE="`dirname $0`" - -nix-shell $HERE/scala-native.nix -A clangEnv diff --git a/scripts/release b/scripts/release deleted file mode 100755 index 383eaab20..000000000 --- a/scripts/release +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -set -ex - -sbt rebuild test-all -sbt "tools/mimaReportBinaryIssues" -sbt publishSigned -sbt "project nscplugin" ++2.11.12 publishSigned -sbt "project nscplugin" ++2.11.11 publishSigned -sbt "project nscplugin" ++2.11.8 publishSigned -sbt ^^1.0.4 sbtScalaNative/publishSigned nir/publishSigned tools/publishSigned util/publishSigned testRunner/publishSigned diff --git a/scripts/scala-native.nix b/scripts/scala-native.nix deleted file mode 100644 index 9371b4988..000000000 --- a/scripts/scala-native.nix +++ /dev/null @@ -1,23 +0,0 @@ -let - pkgs = import {}; - stdenv = pkgs.stdenv; -in rec { - clangEnv = stdenv.mkDerivation rec { - name = "clang-env"; - shellHook = '' - alias cls=clear - ''; - CLANG_PATH = pkgs.clang + "/bin/clang"; - CLANGPP_PATH = pkgs.clang + "/bin/clang++"; - buildInputs = with pkgs; [ - stdenv - sbt - openjdk - boehmgc - libunwind - re2 - clang - zlib - ]; - }; -} diff --git a/scripts/scalafmt b/scripts/scalafmt deleted file mode 100755 index 92a912609..000000000 --- a/scripts/scalafmt +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash - -# set -x - -HERE="`dirname $0`" -VERSION="1.2.0" -COURSIER="$HERE/.coursier" -SCALAFMT="$HERE/.scalafmt-$VERSION" - -if [ ! -f $COURSIER ]; then - curl -L -o $COURSIER https://git.io/vgvpD - chmod +x $COURSIER -fi - -if [ ! -f $SCALAFMT ]; then - $COURSIER bootstrap com.geirsson:scalafmt-cli_2.11:$VERSION --main org.scalafmt.cli.Cli -o $SCALAFMT - chmod +x $SCALAFMT -fi - -$SCALAFMT "$@" diff --git a/scripts/travis_setup.sh b/scripts/travis_setup.sh deleted file mode 100755 index 18751dd24..000000000 --- a/scripts/travis_setup.sh +++ /dev/null @@ -1,42 +0,0 @@ -#!/usr/bin/env bash - -# Enable strict mode and fail the script on non-zero exit code, -# unresolved variable or pipe failure. -set -euo pipefail -IFS=$'\n\t' - -if [ "$(uname)" == "Darwin" ]; then - - brew update - brew install sbt - brew install bdw-gc - brew link bdw-gc - brew install jq - brew install re2 - brew install llvm@4 - export PATH="/usr/local/opt/llvm@4/bin:$PATH" - -else - - sudo apt-get update - - # Remove pre-bundled libunwind - sudo find /usr -name "*libunwind*" -delete - - # Use pre-bundled clang - export PATH=/usr/local/clang-5.0.0/bin:$PATH - export CXX=clang++ - - # Install Boehm GC and libunwind - sudo apt-get install libgc-dev libunwind8-dev - - # Build and install re2 from source - git clone https://code.googlesource.com/re2 - pushd re2 - git checkout 2017-03-01 - make -j4 test - sudo make install prefix=/usr - make testinstall prefix=/usr - popd - -fi