From 11b97221831138bf3881a38a07ded03328c0f118 Mon Sep 17 00:00:00 2001 From: exoego Date: Sun, 12 May 2019 22:30:21 +0900 Subject: [PATCH 01/10] Add Scala Native support --- .travis.yml | 10 ++++-- README.markdown | 18 ++++++++++- src/main/scala/sbt/ProjectMatrix.scala | 31 ++++++++++++++++++- src/sbt-test/projectMatrix/native/build.sbt | 17 ++++++++++ .../native/core/src/main/scala/Core.scala | 6 ++++ .../projectMatrix/native/project/plugins.sbt | 6 ++++ src/sbt-test/projectMatrix/native/test | 5 +++ 7 files changed, 89 insertions(+), 4 deletions(-) create mode 100644 src/sbt-test/projectMatrix/native/build.sbt create mode 100644 src/sbt-test/projectMatrix/native/core/src/main/scala/Core.scala create mode 100644 src/sbt-test/projectMatrix/native/project/plugins.sbt create mode 100644 src/sbt-test/projectMatrix/native/test diff --git a/.travis.yml b/.travis.yml index 475db2df4..d56744c46 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,13 +1,19 @@ -dist: trusty +dist: xenial group: stable language: scala -jdk: oraclejdk8 +jdk: openjdk11 before_script: - export JVM_OPTS="-Dfile.encoding=UTF-8 -Xmx1G -Xms1G -server -XX:ReservedCodeCacheSize=128M" - unset _JAVA_OPTIONS +addons: + apt: + packages: + - clang + - libunwind-dev + script: sbt scripted cache: diff --git a/README.markdown b/README.markdown index c21d25970..4e3158ecc 100644 --- a/README.markdown +++ b/README.markdown @@ -74,7 +74,7 @@ This is an example where `core` builds against Scala 2.11 and 2.12, but app only ### Scala.js support -Scala.js support was added in sbt-projectmatrix 0.2.0. +[Scala.js](http://scala-js.org/) support was added in sbt-projectmatrix 0.2.0. To use this, you need to setup sbt-scalajs as well: ```scala @@ -87,6 +87,22 @@ lazy val core = (projectMatrix in file("core")) This will create subprojects `coreJS2_11` and `coreJS2_12`. +### Scala Native support + +[Scala Native](http://scala-native.org) support will be added in upcoming release. +To use this, you need to setup sbt-scala-native` as well: + +```scala +lazy val core = (projectMatrix in file("core")) + .settings( + name := "core" + ) + .nativePlatform(scalaVersions = Seq("2.12.8", "2.11.12")) +``` + +This will create subprojects `coreJS2_11` and `coreJS2_12`. + + ### parallel cross-library building The rows can also be used for parallel cross-library building. diff --git a/src/main/scala/sbt/ProjectMatrix.scala b/src/main/scala/sbt/ProjectMatrix.scala index 6563457fe..3fb590c7e 100644 --- a/src/main/scala/sbt/ProjectMatrix.scala +++ b/src/main/scala/sbt/ProjectMatrix.scala @@ -69,6 +69,10 @@ sealed trait ProjectMatrix extends CompositeProject { def jsPlatform(scalaVersions: Seq[String], settings: Seq[Setting[_]]): ProjectMatrix def js: ProjectFinder + def nativePlatform(scalaVersions: Seq[String]): ProjectMatrix + def nativePlatform(scalaVersions: Seq[String], settings: Seq[Setting[_]]): ProjectMatrix + def native: ProjectFinder + def crossLibrary(scalaVersions: Seq[String], suffix: String, settings: Seq[Setting[_]]): ProjectMatrix def crossLib(suffix: String): ProjectFinder @@ -93,6 +97,8 @@ object ProjectMatrix { val jvmDirectorySuffix: String = "-jvm" val jsIdSuffix: String = "JS" val jsDirectorySuffix: String = "-js" + val nativeIdSuffix: String = "NATIVE" + val nativeDirectorySuffix: String = "-native" /** A row in the project matrix, typically representing a platform. */ @@ -209,7 +215,7 @@ object ProjectMatrix { override def jsPlatform(scalaVersions: Seq[String]): ProjectMatrix = jsPlatform(scalaVersions, Nil) - + override def jsPlatform(scalaVersions: Seq[String], settings: Seq[Setting[_]]): ProjectMatrix = custom(jsIdSuffix, jsDirectorySuffix, scalaVersions, { _ @@ -228,6 +234,29 @@ object ProjectMatrix { } } + override def native: ProjectFinder = new SuffixBaseProjectFinder(nativeIdSuffix) + + override def nativePlatform(scalaVersions: Seq[String]): ProjectMatrix = + nativePlatform(scalaVersions, Nil) + + override def nativePlatform(scalaVersions: Seq[String], settings: Seq[Setting[_]]): ProjectMatrix = + custom(nativeIdSuffix, nativeDirectorySuffix, scalaVersions, + { _ + .enablePlugins(nativePlugin(this.getClass.getClassLoader).getOrElse( + sys.error("""Scala Native plugin was not found. Add the sbt-scala-native plugin into project/plugins.sbt: + | addSbtPlugin("org.scala-native" % "sbt-scala-natiev" % "x.y.z") + |""".stripMargin) + )) + .settings(settings) + }) + + def nativePlugin(classLoader: ClassLoader): Try[AutoPlugin] = { + import sbtprojectmatrix.ReflectionUtil._ + withContextClassloader(classLoader) { loader => + getSingletonObject[AutoPlugin](loader, "scala.scalanative.sbtplugin.ScalaNativePlugin$") + } + } + override def js: ProjectFinder = new SuffixBaseProjectFinder(jsIdSuffix) override def crossLibrary(scalaVersions: Seq[String], suffix: String, settings: Seq[Setting[_]]): ProjectMatrix = diff --git a/src/sbt-test/projectMatrix/native/build.sbt b/src/sbt-test/projectMatrix/native/build.sbt new file mode 100644 index 000000000..83c4087ae --- /dev/null +++ b/src/sbt-test/projectMatrix/native/build.sbt @@ -0,0 +1,17 @@ +// lazy val root = (project in file(".")) +// .aggregate(core.projectRefs ++ app.projectRefs: _*) +// .settings( +// ) + +lazy val core = (projectMatrix in file("core")) + .settings( + name := "core" + ) + .nativePlatform(scalaVersions = Seq("2.11.12", "2.11.11")) + +lazy val app = (projectMatrix in file("app")) + .dependsOn(core) + .settings( + name := "app" + ) + .nativePlatform(scalaVersions = Seq("2.11.12")) diff --git a/src/sbt-test/projectMatrix/native/core/src/main/scala/Core.scala b/src/sbt-test/projectMatrix/native/core/src/main/scala/Core.scala new file mode 100644 index 000000000..274e01225 --- /dev/null +++ b/src/sbt-test/projectMatrix/native/core/src/main/scala/Core.scala @@ -0,0 +1,6 @@ +package a + +class Core { +} + +object Core extends Core diff --git a/src/sbt-test/projectMatrix/native/project/plugins.sbt b/src/sbt-test/projectMatrix/native/project/plugins.sbt new file mode 100644 index 000000000..2efd7a57d --- /dev/null +++ b/src/sbt-test/projectMatrix/native/project/plugins.sbt @@ -0,0 +1,6 @@ +sys.props.get("plugin.version") match { + case Some(x) => addSbtPlugin("com.eed3si9n" % "sbt-projectmatrix" % x) + case _ => sys.error("""|The system property 'plugin.version' is not defined. + |Specify this property using the scriptedLaunchOpts -D.""".stripMargin) +} +addSbtPlugin("org.scala-native" % "sbt-scala-native" % "0.3.9") diff --git a/src/sbt-test/projectMatrix/native/test b/src/sbt-test/projectMatrix/native/test new file mode 100644 index 000000000..1bdaa1968 --- /dev/null +++ b/src/sbt-test/projectMatrix/native/test @@ -0,0 +1,5 @@ +> nativeLink + +$ exists app/target/native-2.12/app-out +$ exists core/target/native-2.12/core-out +$ exists core/target/native-2.11/core-out From 278aeeb11f6c841aa008554f947efefe28f9b68e Mon Sep 17 00:00:00 2001 From: exoego Date: Mon, 13 May 2019 08:52:35 +0900 Subject: [PATCH 02/10] Add main classes --- .../projectMatrix/native/app/src/main/scala/App.scala | 8 ++++++++ src/sbt-test/projectMatrix/native/build.sbt | 6 ++++-- .../native/core/src/main/scala/CoreMain.scala | 6 ++++++ 3 files changed, 18 insertions(+), 2 deletions(-) create mode 100644 src/sbt-test/projectMatrix/native/app/src/main/scala/App.scala create mode 100644 src/sbt-test/projectMatrix/native/core/src/main/scala/CoreMain.scala diff --git a/src/sbt-test/projectMatrix/native/app/src/main/scala/App.scala b/src/sbt-test/projectMatrix/native/app/src/main/scala/App.scala new file mode 100644 index 000000000..2cbcd3db7 --- /dev/null +++ b/src/sbt-test/projectMatrix/native/app/src/main/scala/App.scala @@ -0,0 +1,8 @@ +package a + +object App { + def main(args: Array[String]): Unit = { + val a = new Core + println(s"Hello, world! ${a}") + } +} diff --git a/src/sbt-test/projectMatrix/native/build.sbt b/src/sbt-test/projectMatrix/native/build.sbt index 83c4087ae..eaab3000d 100644 --- a/src/sbt-test/projectMatrix/native/build.sbt +++ b/src/sbt-test/projectMatrix/native/build.sbt @@ -5,13 +5,15 @@ lazy val core = (projectMatrix in file("core")) .settings( - name := "core" + name := "core", + mainClass in (Compile, run) := Some("a.CoreMain") ) .nativePlatform(scalaVersions = Seq("2.11.12", "2.11.11")) lazy val app = (projectMatrix in file("app")) .dependsOn(core) .settings( - name := "app" + name := "app", + mainClass in (Compile, run) := Some("a.App") ) .nativePlatform(scalaVersions = Seq("2.11.12")) diff --git a/src/sbt-test/projectMatrix/native/core/src/main/scala/CoreMain.scala b/src/sbt-test/projectMatrix/native/core/src/main/scala/CoreMain.scala new file mode 100644 index 000000000..718013861 --- /dev/null +++ b/src/sbt-test/projectMatrix/native/core/src/main/scala/CoreMain.scala @@ -0,0 +1,6 @@ +package a + +object CoreMain { + def main(args: Array[String]): Unit = { + } +} From f71bd9163c3ca46672c12cc3bd50a94e939a570d Mon Sep 17 00:00:00 2001 From: exoego Date: Tue, 14 May 2019 11:15:10 +0900 Subject: [PATCH 03/10] Use travis_setup.sh from scala-native --- .travis.yml | 10 +++--- scripts/clangfmt | 74 +++++++++++++++++++++++++++++++++++++++++ scripts/scalafmt | 20 +++++++++++ scripts/travis_setup.sh | 42 +++++++++++++++++++++++ 4 files changed, 140 insertions(+), 6 deletions(-) create mode 100755 scripts/clangfmt create mode 100755 scripts/scalafmt create mode 100755 scripts/travis_setup.sh diff --git a/.travis.yml b/.travis.yml index d56744c46..ab0611e8d 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,19 +1,17 @@ dist: xenial group: stable +sudo: required language: scala jdk: openjdk11 +before_install: + - bash scripts/travis_setup.sh + before_script: - export JVM_OPTS="-Dfile.encoding=UTF-8 -Xmx1G -Xms1G -server -XX:ReservedCodeCacheSize=128M" - unset _JAVA_OPTIONS -addons: - apt: - packages: - - clang - - libunwind-dev - script: sbt scripted cache: diff --git a/scripts/clangfmt b/scripts/clangfmt new file mode 100755 index 000000000..afb123c3e --- /dev/null +++ b/scripts/clangfmt @@ -0,0 +1,74 @@ +#!/bin/bash +# +# Format C/C++ code using clang-format. +# +# To ensure reproducible formatting the script checks that clang-format +# is from the most recent version of LLVM supported by Scala Native. +# +# Usage: $0 [--test] +# +# Set CLANG_FORMAT_PATH to configure path to clang-format. + +set -euo pipefail +IFS=$'\n\t' + +# The required version of clang-format. +CLANG_FORMAT_VERSION=5.0 +CLANG_FORMAT_VERSION_STRING="clang-format version $CLANG_FORMAT_VERSION" + +die() { + while [ "$#" -gt 0 ]; do + echo >&2 "$1"; shift + done + exit 1 +} + +check_clang_format_version() { + cmd="$1" + [ -e "$(type -P "$cmd")" ] && \ + "$cmd" --version 2> /dev/null | grep -q "$CLANG_FORMAT_VERSION_STRING" +} + +clang_format= + +if [ -n "${CLANG_FORMAT_PATH:-}" ]; then + check_clang_format_version "$CLANG_FORMAT_PATH" || \ + die "CLANG_FORMAT_PATH does not have required version $CLANG_FORMAT_VERSION" \ + "CLANG_FORMAT_PATH points to $CLANG_FORMAT_PATH" + clang_format="$CLANG_FORMAT_PATH" +else + for cmd in "clang-format-$CLANG_FORMAT_VERSION" clang-format; do + if check_clang_format_version "$cmd"; then + clang_format="$cmd" + break + fi + done +fi + +if [ -z "$clang_format" ]; then + die "clang-format version $CLANG_FORMAT_VERSION not found" \ + "Install LLVM version $CLANG_FORMAT_VERSION and rerun." +fi + +test_mode= + +while [ "$#" -gt 0 ]; do + arg="$1" + case "$arg" in + --test) test_mode=true; shift ;; + --*) die "Unknown argument: $arg" "Usage: $0 [--test]" ;; + *) break ;; + esac +done + +if [ "$#" -gt 0 ]; then + "$clang_format" --style=file -i "$@" +else + find . -name "*.[ch]" -or -name "*.cpp" | xargs "$clang_format" --style=file -i +fi + +if [ "$test_mode" = true ]; then + git diff --quiet --exit-code || \ + die "C/C++ code formatting changes detected" \ + "Run \`$0\` to reformat." +fi diff --git a/scripts/scalafmt b/scripts/scalafmt new file mode 100755 index 000000000..92a912609 --- /dev/null +++ b/scripts/scalafmt @@ -0,0 +1,20 @@ +#!/usr/bin/env bash + +# set -x + +HERE="`dirname $0`" +VERSION="1.2.0" +COURSIER="$HERE/.coursier" +SCALAFMT="$HERE/.scalafmt-$VERSION" + +if [ ! -f $COURSIER ]; then + curl -L -o $COURSIER https://git.io/vgvpD + chmod +x $COURSIER +fi + +if [ ! -f $SCALAFMT ]; then + $COURSIER bootstrap com.geirsson:scalafmt-cli_2.11:$VERSION --main org.scalafmt.cli.Cli -o $SCALAFMT + chmod +x $SCALAFMT +fi + +$SCALAFMT "$@" diff --git a/scripts/travis_setup.sh b/scripts/travis_setup.sh new file mode 100755 index 000000000..18751dd24 --- /dev/null +++ b/scripts/travis_setup.sh @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +# Enable strict mode and fail the script on non-zero exit code, +# unresolved variable or pipe failure. +set -euo pipefail +IFS=$'\n\t' + +if [ "$(uname)" == "Darwin" ]; then + + brew update + brew install sbt + brew install bdw-gc + brew link bdw-gc + brew install jq + brew install re2 + brew install llvm@4 + export PATH="/usr/local/opt/llvm@4/bin:$PATH" + +else + + sudo apt-get update + + # Remove pre-bundled libunwind + sudo find /usr -name "*libunwind*" -delete + + # Use pre-bundled clang + export PATH=/usr/local/clang-5.0.0/bin:$PATH + export CXX=clang++ + + # Install Boehm GC and libunwind + sudo apt-get install libgc-dev libunwind8-dev + + # Build and install re2 from source + git clone https://code.googlesource.com/re2 + pushd re2 + git checkout 2017-03-01 + make -j4 test + sudo make install prefix=/usr + make testinstall prefix=/usr + popd + +fi From f749d7d896e8a90fc9b9dab3c0c77f6d2ac6bb92 Mon Sep 17 00:00:00 2001 From: exoego Date: Tue, 14 May 2019 11:38:28 +0900 Subject: [PATCH 04/10] Add openjdk8 since JDK11 is still not widely-supported in Scala ecosystem --- .travis.yml | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index ab0611e8d..42aad12c9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -3,7 +3,16 @@ group: stable sudo: required language: scala -jdk: openjdk11 +matrix: + include: + - jdk: openjdk8 + before_install: + - PATH=$(echo "$PATH" | sed -e 's/:\/usr\/local\/lib\/jvm\/openjdk11\/bin//') + - JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64 + apt: + packages: + - openjdk8 + - jdk: openjdk11 before_install: - bash scripts/travis_setup.sh From 7ff385627972e6c89d1754877053f431f0531b25 Mon Sep 17 00:00:00 2001 From: exoego Date: Tue, 14 May 2019 21:04:52 +0900 Subject: [PATCH 05/10] Paste all scripts from scala-native --- scripts/gyb.py | 1128 ++++++++++++++++++++++++++++++++++++++ scripts/javalib.py | 22 + scripts/nix-run | 5 + scripts/release | 11 + scripts/scala-native.nix | 23 + 5 files changed, 1189 insertions(+) create mode 100755 scripts/gyb.py create mode 100644 scripts/javalib.py create mode 100755 scripts/nix-run create mode 100755 scripts/release create mode 100644 scripts/scala-native.nix diff --git a/scripts/gyb.py b/scripts/gyb.py new file mode 100755 index 000000000..7a03d5798 --- /dev/null +++ b/scripts/gyb.py @@ -0,0 +1,1128 @@ +#!/usr/bin/env python +# GYB: Generate Your Boilerplate (improved names welcome; at least +# this one's short). See -h output for instructions + +from __future__ import print_function + +import os +import re +try: + from cStringIO import StringIO +except ImportError: + from io import StringIO +import textwrap +import tokenize + +from bisect import bisect + + +def get_line_starts(s): + """Return a list containing the start index of each line in s. + + The list also contains a sentinel index for the end of the string, + so there will be one more element in the list than there are lines + in the string + """ + starts = [0] + + for line in s.split('\n'): + starts.append(starts[-1] + len(line) + 1) + + starts[-1] -= 1 + return starts + + +def strip_trailing_nl(s): + """If s ends with a newline, drop it; else return s intact""" + return s[:-1] if s.endswith('\n') else s + + +def split_lines(s): + """Split s into a list of lines, each of which has a trailing newline + + If the lines are later concatenated, the result is s, possibly + with a single appended newline. + """ + return [l + '\n' for l in s.split('\n')] + +# text on a line up to the first '$$', '${', or '%%' +literalText = r'(?: [^$\n%] | \$(?![${]) | %(?!%) )*' + +# The part of an '%end' line that follows the '%' sign +linesClose = r'[\ \t]* end [\ \t]* (?: \# .* )? $' + +# Note: Where "# Absorb" appears below, the regexp attempts to eat up +# through the end of ${...} and %{...}% constructs. In reality we +# handle this with the Python tokenizer, which avoids mis-detections +# due to nesting, comments and strings. This extra absorption in the +# regexp facilitates testing the regexp on its own, by preventing the +# interior of some of these constructs from being treated as literal +# text. +tokenize_re = re.compile( + r''' +# %-lines and %{...}-blocks + # \n? # absorb one preceding newline + ^ + (?: + (?P + (?P<_indent> [\ \t]* % (?! [{%] ) [\ \t]* ) (?! [\ \t] | ''' + linesClose + r''' ) .* + ( \n (?P=_indent) (?! ''' + linesClose + r''' ) .* ) * + ) + | (?P [\ \t]* % [ \t]* ''' + linesClose + r''' ) + | [\ \t]* (?P %\{ ) + (?: [^}]| \} (?!%) )* \}% # Absorb + ) + \n? # absorb one trailing newline + +# Substitutions +| (?P \$\{ ) + [^}]* \} # Absorb + +# %% and $$ are literal % and $ respectively +| (?P[$%]) (?P=symbol) + +# Literal text +| (?P ''' + literalText + r''' + (?: + # newline that doesn't precede space+% + (?: \n (?! [\ \t]* %[^%] ) ) + ''' + literalText + r''' + )* + \n? + ) +''', re.VERBOSE | re.MULTILINE) + +gyb_block_close = re.compile('\}%[ \t]*\n?') + + +def token_pos_to_index(token_pos, start, line_starts): + """Translate a tokenize (line, column) pair into an absolute + position in source text given the position where we started + tokenizing and a list that maps lines onto their starting + character indexes. + """ + relative_token_line_plus1, token_col = token_pos + + # line number where we started tokenizing + start_line_num = bisect(line_starts, start) - 1 + + # line number of the token in the whole text + abs_token_line = relative_token_line_plus1 - 1 + start_line_num + + # if found in the first line, adjust the end column to account + # for the extra text + if relative_token_line_plus1 == 1: + token_col += start - line_starts[start_line_num] + + # Sometimes tokenizer errors report a line beyond the last one + if abs_token_line >= len(line_starts): + return line_starts[-1] + + return line_starts[abs_token_line] + token_col + + +def tokenize_python_to_unmatched_close_curly(source_text, start, line_starts): + """Apply Python's tokenize to source_text starting at index start + while matching open and close curly braces. When an unmatched + close curly brace is found, return its index. If not found, + return len(source_text). If there's a tokenization error, return + the position of the error. + """ + stream = StringIO(source_text) + stream.seek(start) + nesting = 0 + + try: + for kind, text, token_start, token_end, line_text \ + in tokenize.generate_tokens(stream.readline): + + if text == '{': + nesting += 1 + elif text == '}': + nesting -= 1 + if nesting < 0: + return token_pos_to_index(token_start, start, line_starts) + + except tokenize.TokenError as error: + (message, error_pos) = error.args + return token_pos_to_index(error_pos, start, line_starts) + + return len(source_text) + + +def tokenize_template(template_text): + r"""Given the text of a template, returns an iterator over + (tokenType, token, match) tuples. + + **Note**: this is template syntax tokenization, not Python + tokenization. + + When a non-literal token is matched, a client may call + iter.send(pos) on the iterator to reset the position in + template_text at which scanning will resume. + + This function provides a base level of tokenization which is + then refined by ParseContext.token_generator. + + >>> from pprint import * + >>> pprint(list((kind, text) for kind, text, _ in tokenize_template( + ... '%for x in range(10):\n% print x\n%end\njuicebox'))) + [('gybLines', '%for x in range(10):\n% print x'), + ('gybLinesClose', '%end'), + ('literal', 'juicebox')] + + >>> pprint(list((kind, text) for kind, text, _ in tokenize_template( + ... '''Nothing + ... % if x: + ... % for i in range(3): + ... ${i} + ... % end + ... % else: + ... THIS SHOULD NOT APPEAR IN THE OUTPUT + ... '''))) + [('literal', 'Nothing\n'), + ('gybLines', '% if x:\n% for i in range(3):'), + ('substitutionOpen', '${'), + ('literal', '\n'), + ('gybLinesClose', '% end'), + ('gybLines', '% else:'), + ('literal', 'THIS SHOULD NOT APPEAR IN THE OUTPUT\n')] + + >>> for kind, text, _ in tokenize_template(''' + ... This is $some$ literal stuff containing a ${substitution} + ... followed by a %{...} block: + ... %{ + ... # Python code + ... }% + ... and here $${are} some %-lines: + ... % x = 1 + ... % y = 2 + ... % if z == 3: + ... % print '${hello}' + ... % end + ... % for x in zz: + ... % print x + ... % # different indentation + ... % twice + ... and some lines that literally start with a %% token + ... %% first line + ... %% second line + ... '''): + ... print (kind, text.strip().split('\n',1)[0]) + ('literal', 'This is $some$ literal stuff containing a') + ('substitutionOpen', '${') + ('literal', 'followed by a %{...} block:') + ('gybBlockOpen', '%{') + ('literal', 'and here ${are} some %-lines:') + ('gybLines', '% x = 1') + ('gybLinesClose', '% end') + ('gybLines', '% for x in zz:') + ('gybLines', '% # different indentation') + ('gybLines', '% twice') + ('literal', 'and some lines that literally start with a % token') + """ + pos = 0 + end = len(template_text) + + saved_literal = [] + literal_first_match = None + + while pos < end: + m = tokenize_re.match(template_text, pos, end) + + # pull out the one matched key (ignoring internal patterns starting + # with _) + ((kind, text), ) = ( + (kind, text) for (kind, text) in m.groupdict().items() + if text is not None and kind[0] != '_') + + if kind in ('literal', 'symbol'): + if len(saved_literal) == 0: + literal_first_match = m + # literals and symbols get batched together + saved_literal.append(text) + pos = None + else: + # found a non-literal. First yield any literal we've accumulated + if saved_literal != []: + yield 'literal', ''.join(saved_literal), literal_first_match + saved_literal = [] + + # Then yield the thing we found. If we get a reply, it's + # the place to resume tokenizing + pos = yield kind, text, m + + # If we were not sent a new position by our client, resume + # tokenizing at the end of this match. + if pos is None: + pos = m.end(0) + else: + # Client is not yet ready to process next token + yield + + if saved_literal != []: + yield 'literal', ''.join(saved_literal), literal_first_match + + +def split_gyb_lines(source_lines): + r"""Return a list of lines at which to split the incoming source + + These positions represent the beginnings of python line groups that + will require a matching %end construct if they are to be closed. + + >>> src = split_lines('''\ + ... if x: + ... print x + ... if y: # trailing comment + ... print z + ... if z: # another comment\ + ... ''') + >>> s = split_gyb_lines(src) + >>> len(s) + 2 + >>> src[s[0]] + ' print z\n' + >>> s[1] - len(src) + 0 + + >>> src = split_lines('''\ + ... if x: + ... if y: print 1 + ... if z: + ... print 2 + ... pass\ + ... ''') + >>> s = split_gyb_lines(src) + >>> len(s) + 1 + >>> src[s[0]] + ' if y: print 1\n' + + >>> src = split_lines('''\ + ... if x: + ... if y: + ... print 1 + ... print 2 + ... ''') + >>> s = split_gyb_lines(src) + >>> len(s) + 2 + >>> src[s[0]] + ' if y:\n' + >>> src[s[1]] + ' print 1\n' + """ + last_token_text, last_token_kind = None, None + unmatched_indents = [] + + dedents = 0 + try: + for token_kind, token_text, token_start, \ + (token_end_line, token_end_col), line_text \ + in tokenize.generate_tokens(lambda i=iter(source_lines): + next(i)): + + if token_kind in (tokenize.COMMENT, tokenize.ENDMARKER): + continue + + if token_text == '\n' and last_token_text == ':': + unmatched_indents.append(token_end_line) + + # The tokenizer appends dedents at EOF; don't consider + # those as matching indentations. Instead just save them + # up... + if last_token_kind == tokenize.DEDENT: + dedents += 1 + # And count them later, when we see something real. + if token_kind != tokenize.DEDENT and dedents > 0: + unmatched_indents = unmatched_indents[:-dedents] + dedents = 0 + + last_token_text, last_token_kind = token_text, token_kind + + except tokenize.TokenError: + # Let the later compile() call report the error + return [] + + if last_token_text == ':': + unmatched_indents.append(len(source_lines)) + + return unmatched_indents + + +def code_starts_with_dedent_keyword(source_lines): + r"""Return True iff the incoming Python source_lines begin with "else", + "elif", "except", or "finally". + + Initial comments and whitespace are ignored. + + >>> code_starts_with_dedent_keyword(split_lines('if x in y: pass')) + False + >>> code_starts_with_dedent_keyword(split_lines('except ifSomethingElse:')) + True + >>> code_starts_with_dedent_keyword( + split_lines('\n# comment\nelse: # yes')) + True + """ + token_text = None + for token_kind, token_text, _, _, _ \ + in tokenize.generate_tokens(lambda i=iter(source_lines): next(i)): + + if token_kind != tokenize.COMMENT and token_text.strip() != '': + break + + return token_text in ('else', 'elif', 'except', 'finally') + + +class ParseContext(object): + """State carried through a parse of a template""" + + filename = '' + template = '' + line_starts = [] + code_start_line = -1 + code_text = None + tokens = None # The rest of the tokens + close_lines = False + + def __init__(self, filename, template=None): + self.filename = os.path.abspath(filename) + if template is None: + with open(filename) as f: + self.template = f.read() + else: + self.template = template + self.line_starts = get_line_starts(self.template) + self.tokens = self.token_generator(tokenize_template(self.template)) + self.next_token() + + def pos_to_line(self, pos): + return bisect(self.line_starts, pos) - 1 + + def token_generator(self, base_tokens): + r"""Given an iterator over (kind, text, match) triples (see + tokenize_template above), return a refined iterator over + token_kinds. + + Among other adjustments to the elements found by base_tokens, + this refined iterator tokenizes python code embedded in + template text to help determine its true extent. The + expression "base_tokens.send(pos)" is used to reset the index at + which base_tokens resumes scanning the underlying text. + + >>> ctx = ParseContext('dummy', ''' + ... %for x in y: + ... % print x + ... % end + ... literally + ... ''') + >>> while ctx.token_kind: + ... print (ctx.token_kind, ctx.code_text or ctx.token_text) + ... ignored = ctx.next_token() + ('literal', '\n') + ('gybLinesOpen', 'for x in y:\n') + ('gybLines', ' print x\n') + ('gybLinesClose', '% end') + ('literal', 'literally\n') + + >>> ctx = ParseContext('dummy', + ... '''Nothing + ... % if x: + ... % for i in range(3): + ... ${i} + ... % end + ... % else: + ... THIS SHOULD NOT APPEAR IN THE OUTPUT + ... ''') + >>> while ctx.token_kind: + ... print (ctx.token_kind, ctx.code_text or ctx.token_text) + ... ignored = ctx.next_token() + ('literal', 'Nothing\n') + ('gybLinesOpen', 'if x:\n') + ('gybLinesOpen', ' for i in range(3):\n') + ('substitutionOpen', 'i') + ('literal', '\n') + ('gybLinesClose', '% end') + ('gybLinesOpen', 'else:\n') + ('literal', 'THIS SHOULD NOT APPEAR IN THE OUTPUT\n') + + >>> ctx = ParseContext('dummy', + ... '''% for x in [1, 2, 3]: + ... % if x == 1: + ... literal1 + ... % elif x > 1: # add an output line after this line to fix bug + ... % if x == 2: + ... literal2 + ... % end + ... % end + ... % end + ... ''') + >>> while ctx.token_kind: + ... print (ctx.token_kind, ctx.code_text or ctx.token_text) + ... ignored = ctx.next_token() + ('gybLinesOpen', 'for x in [1, 2, 3]:\n') + ('gybLinesOpen', ' if x == 1:\n') + ('literal', 'literal1\n') + ('gybLinesOpen', 'elif x > 1: # add output line here to fix bug\n') + ('gybLinesOpen', ' if x == 2:\n') + ('literal', 'literal2\n') + ('gybLinesClose', '% end') + ('gybLinesClose', '% end') + ('gybLinesClose', '% end') + """ + for self.token_kind, self.token_text, self.token_match in base_tokens: + kind = self.token_kind + self.code_text = None + + # Do we need to close the current lines? + self.close_lines = kind == 'gybLinesClose' + + # %{...}% and ${...} constructs + if kind.endswith('Open'): + + # Tokenize text that follows as Python up to an unmatched '}' + code_start = self.token_match.end(kind) + self.code_start_line = self.pos_to_line(code_start) + + close_pos = tokenize_python_to_unmatched_close_curly( + self.template, code_start, self.line_starts) + self.code_text = self.template[code_start:close_pos] + yield kind + + if (kind == 'gybBlockOpen'): + # Absorb any '}% \n' + m2 = gyb_block_close.match(self.template, close_pos) + if not m2: + raise ValueError("Invalid block closure") + next_pos = m2.end(0) + else: + assert kind == 'substitutionOpen' + # skip past the closing '}' + next_pos = close_pos + 1 + + # Resume tokenizing after the end of the code. + base_tokens.send(next_pos) + + elif kind == 'gybLines': + + self.code_start_line = self.pos_to_line( + self.token_match.start('gybLines')) + indentation = self.token_match.group('_indent') + + # Strip off the leading indentation and %-sign + source_lines = re.split( + '^' + re.escape(indentation), + self.token_match.group('gybLines') + '\n', + flags=re.MULTILINE)[1:] + + if code_starts_with_dedent_keyword(source_lines): + self.close_lines = True + + last_split = 0 + for line in split_gyb_lines(source_lines): + self.token_kind = 'gybLinesOpen' + self.code_text = ''.join(source_lines[last_split:line]) + yield self.token_kind + last_split = line + self.code_start_line += line - last_split + self.close_lines = False + + self.code_text = ''.join(source_lines[last_split:]) + if self.code_text: + self.token_kind = 'gybLines' + yield self.token_kind + else: + yield self.token_kind + + def next_token(self): + """Move to the next token""" + for kind in self.tokens: + return self.token_kind + + self.token_kind = None + + +class ExecutionContext(object): + """State we pass around during execution of a template""" + + def __init__(self, line_directive='// ###sourceLocation', + **local_bindings): + self.local_bindings = local_bindings + self.line_directive = line_directive + self.local_bindings['__context__'] = self + self.result_text = [] + self.last_file_line = None + + def append_text(self, text, file, line): + # see if we need to inject a line marker + if self.line_directive: + if (file, line) != self.last_file_line: + # We can only insert the line directive at a line break + if len(self.result_text) == 0 \ + or self.result_text[-1].endswith('\n'): + self.result_text.append('%s(file: "%s", line: %d)\n' % ( + self.line_directive, file, line + 1)) + # But if the new text contains any line breaks, we can create + # one + elif '\n' in text: + i = text.find('\n') + self.result_text.append(text[:i + 1]) + self.last_file_line = ( + self.last_file_line[0], self.last_file_line[1] + 1) + # and try again + self.append_text(text[i + 1:], file, line) + return + + self.result_text.append(text) + self.last_file_line = (file, line + text.count('\n')) + + +class ASTNode(object): + """Abstract base class for template AST nodes""" + + def __init__(self): + raise NotImplementedError("ASTNode.__init__ is not implemented.") + + def execute(self, context): + raise NotImplementedError("ASTNode.execute is not implemented.") + + def __str__(self, indent=''): + raise NotImplementedError("ASTNode.__str__ is not implemented.") + + def format_children(self, indent): + if not self.children: + return ' []' + + return '\n'.join( + ['', indent + '['] + + [x.__str__(indent + 4 * ' ') for x in self.children] + + [indent + ']']) + + +class Block(ASTNode): + """A sequence of other AST nodes, to be executed in order""" + + children = [] + + def __init__(self, context): + self.children = [] + + while context.token_kind and not context.close_lines: + if context.token_kind == 'literal': + node = Literal + else: + node = Code + self.children.append(node(context)) + + def execute(self, context): + for x in self.children: + x.execute(context) + + def __str__(self, indent=''): + return indent + 'Block:' + self.format_children(indent) + + +class Literal(ASTNode): + """An AST node that generates literal text""" + + def __init__(self, context): + self.text = context.token_text + start_position = context.token_match.start(context.token_kind) + self.start_line_number = context.pos_to_line(start_position) + self.filename = context.filename + context.next_token() + + def execute(self, context): + context.append_text(self.text, self.filename, self.start_line_number) + + def __str__(self, indent=''): + return '\n'.join( + [indent + x for x in ['Literal:'] + + strip_trailing_nl(self.text).split('\n')]) + + +class Code(ASTNode): + """An AST node that is evaluated as Python""" + + code = None + children = () + kind = None + + def __init__(self, context): + + source = '' + source_line_count = 0 + + def accumulate_code(): + s = source + (context.code_start_line - source_line_count) * '\n' \ + + textwrap.dedent(context.code_text) + line_count = context.code_start_line + \ + context.code_text.count('\n') + context.next_token() + return s, line_count + + eval_exec = 'exec' + if context.token_kind.startswith('substitution'): + eval_exec = 'eval' + source, source_line_count = accumulate_code() + source = '(' + source.strip() + ')' + + else: + while context.token_kind == 'gybLinesOpen': + source, source_line_count = accumulate_code() + source += ' __children__[%d].execute(__context__)\n' % len( + self.children) + source_line_count += 1 + + self.children += (Block(context),) + + if context.token_kind == 'gybLinesClose': + context.next_token() + + if context.token_kind == 'gybLines': + source, source_line_count = accumulate_code() + + # Only handle a substitution as part of this code block if + # we don't already have some %-lines. + elif context.token_kind == 'gybBlockOpen': + + # Opening ${...} and %{...}% constructs + source, source_line_count = accumulate_code() + + self.filename = context.filename + self.start_line_number = context.code_start_line + self.code = compile(source, context.filename, eval_exec) + self.source = source + + def execute(self, context): + # Save __children__ from the local bindings + save_children = context.local_bindings.get('__children__') + # Execute the code with our __children__ in scope + context.local_bindings['__children__'] = self.children + result = eval(self.code, context.local_bindings) + + if context.local_bindings['__children__'] is not self.children: + raise ValueError("The code is not allowed to mutate __children__") + # Restore the bindings + context.local_bindings['__children__'] = save_children + + # If we got a result, the code was an expression, so append + # its value + if result is not None and result != '': + context.append_text( + str(result), self.filename, self.start_line_number) + + def __str__(self, indent=''): + source_lines = re.sub(r'^\n', '', strip_trailing_nl( + self.source), flags=re.MULTILINE).split('\n') + if len(source_lines) == 1: + s = indent + 'Code: {' + source_lines[0] + '}' + else: + s = indent + 'Code:\n' + indent + '{\n' + '\n'.join( + indent + 4 * ' ' + l for l in source_lines + ) + '\n' + indent + '}' + return s + self.format_children(indent) + + +def parse_template(filename, text=None): + r"""Return an AST corresponding to the given template file. + + If text is supplied, it is assumed to be the contents of the file, + as a string. + + >>> print parse_template('dummy.file', text= + ... '''% for x in [1, 2, 3]: + ... % if x == 1: + ... literal1 + ... % elif x > 1: # add an output line after this line to fix the bug + ... % if x == 2: + ... literal2 + ... % end + ... % end + ... % end + ... ''') + Block: + [ + Code: + { + for x in [1, 2, 3]: + __children__[0].execute(__context__) + } + [ + Block: + [ + Code: + { + if x == 1: + __children__[0].execute(__context__) + elif x > 1: # add output line after this line to fix bug + __children__[1].execute(__context__) + } + [ + Block: + [ + Literal: + literal1 + ] + Block: + [ + Code: + { + if x == 2: + __children__[0].execute(__context__) + } + [ + Block: + [ + Literal: + literal2 + ] + ] + ] + ] + ] + ] + ] + + >>> print parse_template( + >>> 'dummy.file', text='%for x in range(10):\n% print x\n%end\njuicebox') + Block: + [ + Code: + { + for x in range(10): + __children__[0].execute(__context__) + } + [ + Block: + [ + Code: {print x} [] + ] + ] + Literal: + juicebox + ] + + >>> print parse_template('/dummy.file', text= + ... '''Nothing + ... % if x: + ... % for i in range(3): + ... ${i} + ... % end + ... % else: + ... THIS SHOULD NOT APPEAR IN THE OUTPUT + ... ''') + Block: + [ + Literal: + Nothing + Code: + { + if x: + __children__[0].execute(__context__) + else: + __children__[1].execute(__context__) + } + [ + Block: + [ + Code: + { + for i in range(3): + __children__[0].execute(__context__) + } + [ + Block: + [ + Code: {(i)} [] + Literal: + + ] + ] + ] + Block: + [ + Literal: + THIS SHOULD NOT APPEAR IN THE OUTPUT + ] + ] + ] + + >>> print parse_template('dummy.file', text='''% + ... %for x in y: + ... % print y + ... ''') + Block: + [ + Code: + { + for x in y: + __children__[0].execute(__context__) + } + [ + Block: + [ + Code: {print y} [] + ] + ] + ] + + >>> print parse_template('dummy.file', text='''% + ... %if x: + ... % print y + ... AAAA + ... %else: + ... BBBB + ... ''') + Block: + [ + Code: + { + if x: + __children__[0].execute(__context__) + else: + __children__[1].execute(__context__) + } + [ + Block: + [ + Code: {print y} [] + Literal: + AAAA + ] + Block: + [ + Literal: + BBBB + ] + ] + ] + + >>> print parse_template('dummy.file', text='''% + ... %if x: + ... % print y + ... AAAA + ... %# This is a comment + ... %else: + ... BBBB + ... ''') + Block: + [ + Code: + { + if x: + __children__[0].execute(__context__) + # This is a comment + else: + __children__[1].execute(__context__) + } + [ + Block: + [ + Code: {print y} [] + Literal: + AAAA + ] + Block: + [ + Literal: + BBBB + ] + ] + ] + + >>> print parse_template('dummy.file', text='''\ + ... %for x in y: + ... AAAA + ... %if x: + ... BBBB + ... %end + ... CCCC + ... ''') + Block: + [ + Code: + { + for x in y: + __children__[0].execute(__context__) + } + [ + Block: + [ + Literal: + AAAA + Code: + { + if x: + __children__[0].execute(__context__) + } + [ + Block: + [ + Literal: + BBBB + ] + ] + Literal: + CCCC + ] + ] + ] + """ + return Block(ParseContext(filename, text)) + + +def execute_template(ast, line_directive='', **local_bindings): + r"""Return the text generated by executing the given template AST. + + Keyword arguments become local variable bindings in the execution context + + >>> ast = parse_template('/dummy.file', text= + ... '''Nothing + ... % if x: + ... % for i in range(3): + ... ${i} + ... % end + ... % else: + ... THIS SHOULD NOT APPEAR IN THE OUTPUT + ... ''') + >>> print execute_template(ast, line_directive='//#sourceLocation', x=1), + //#sourceLocation(file: "/dummy.file", line: 1) + Nothing + //#sourceLocation(file: "/dummy.file", line: 4) + 0 + //#sourceLocation(file: "/dummy.file", line: 4) + 1 + //#sourceLocation(file: "/dummy.file", line: 4) + 2 + + >>> ast = parse_template('/dummy.file', text= + ... '''Nothing + ... % a = [] + ... % for x in range(3): + ... % a.append(x) + ... % end + ... ${a} + ... ''') + >>> print execute_template(ast, line_directive='//#sourceLocation', x=1), + //#sourceLocation(file: "/dummy.file", line: 1) + Nothing + //#sourceLocation(file: "/dummy.file", line: 6) + [0, 1, 2] + """ + execution_context = ExecutionContext( + line_directive=line_directive, **local_bindings) + ast.execute(execution_context) + return ''.join(execution_context.result_text) + + +def main(): + import argparse + import sys + + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description='Generate Your Boilerplate!', epilog=''' + A GYB template consists of the following elements: + + - Literal text which is inserted directly into the output + + - %% or $$ in literal text, which insert literal '%' and '$' + symbols respectively. + + - Substitutions of the form ${}. The Python + expression is converted to a string and the result is inserted + into the output. + + - Python code delimited by %{...}%. Typically used to inject + definitions (functions, classes, variable bindings) into the + evaluation context of the template. Common indentation is + stripped, so you can add as much indentation to the beginning + of this code as you like + + - Lines beginning with optional whitespace followed by a single + '%' and Python code. %-lines allow you to nest other + constructs inside them. To close a level of nesting, use the + "%end" construct. + + - Lines beginning with optional whitespace and followed by a + single '%' and the token "end", which close open constructs in + %-lines. + + Example template: + + - Hello - + %{ + x = 42 + def succ(a): + return a+1 + }% + + I can assure you that ${x} < ${succ(x)} + + % if int(y) > 7: + % for i in range(3): + y is greater than seven! + % end + % else: + y is less than or equal to seven + % end + + - The End. - + + When run with "gyb -Dy=9", the output is + + - Hello - + + I can assure you that 42 < 43 + + y is greater than seven! + y is greater than seven! + y is greater than seven! + + - The End. - +''' + ) + parser.add_argument( + '-D', action='append', dest='defines', metavar='NAME=VALUE', + default=[], + help='''Bindings to be set in the template's execution context''') + + parser.add_argument( + 'file', type=argparse.FileType(), + help='Path to GYB template file (defaults to stdin)', nargs='?', + default=sys.stdin) + parser.add_argument( + '-o', dest='target', type=argparse.FileType('w'), + help='Output file (defaults to stdout)', default=sys.stdout) + parser.add_argument( + '--test', action='store_true', + default=False, help='Run a self-test') + parser.add_argument( + '--verbose-test', action='store_true', + default=False, help='Run a verbose self-test') + parser.add_argument( + '--dump', action='store_true', + default=False, help='Dump the parsed template to stdout') + parser.add_argument( + '--line-directive', default='// ###sourceLocation', + help='Line directive prefix; empty => no line markers') + + args = parser.parse_args(sys.argv[1:]) + + if args.test or args.verbose_test: + import doctest + if doctest.testmod(verbose=args.verbose_test).failed: + sys.exit(1) + + bindings = dict(x.split('=', 1) for x in args.defines) + ast = parse_template(args.file.name, args.file.read()) + if args.dump: + print(ast) + # Allow the template to import .py files from its own directory + sys.path = [os.path.split(args.file.name)[0] or '.'] + sys.path + + args.target.write(execute_template(ast, args.line_directive, **bindings)) + +if __name__ == '__main__': + main() diff --git a/scripts/javalib.py b/scripts/javalib.py new file mode 100644 index 000000000..f5a3ba8cb --- /dev/null +++ b/scripts/javalib.py @@ -0,0 +1,22 @@ +""" + Utility that lists all non-implementation specific classes in javalib. + + It must be run from the root of the Scala Native checkout. +""" + +import subprocess,os + +cwd = os.getcwd() + +target = cwd + "/javalib/target/scala-2.11/classes/" + +paths = subprocess.check_output(["find", target, "-name", "*.nir"]) + +classes = sorted(list(set( + line.replace(target, "").replace(".nir", "").lstrip("/").rstrip("$").replace("/", ".") + for line in paths.split("\n") + if "$$anon" not in line and "java/" in line +))) + +for cls in classes: + print("* ``{}``".format(cls)) diff --git a/scripts/nix-run b/scripts/nix-run new file mode 100755 index 000000000..7f11a9264 --- /dev/null +++ b/scripts/nix-run @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +HERE="`dirname $0`" + +nix-shell $HERE/scala-native.nix -A clangEnv diff --git a/scripts/release b/scripts/release new file mode 100755 index 000000000..383eaab20 --- /dev/null +++ b/scripts/release @@ -0,0 +1,11 @@ +#!/bin/bash + +set -ex + +sbt rebuild test-all +sbt "tools/mimaReportBinaryIssues" +sbt publishSigned +sbt "project nscplugin" ++2.11.12 publishSigned +sbt "project nscplugin" ++2.11.11 publishSigned +sbt "project nscplugin" ++2.11.8 publishSigned +sbt ^^1.0.4 sbtScalaNative/publishSigned nir/publishSigned tools/publishSigned util/publishSigned testRunner/publishSigned diff --git a/scripts/scala-native.nix b/scripts/scala-native.nix new file mode 100644 index 000000000..9371b4988 --- /dev/null +++ b/scripts/scala-native.nix @@ -0,0 +1,23 @@ +let + pkgs = import {}; + stdenv = pkgs.stdenv; +in rec { + clangEnv = stdenv.mkDerivation rec { + name = "clang-env"; + shellHook = '' + alias cls=clear + ''; + CLANG_PATH = pkgs.clang + "/bin/clang"; + CLANGPP_PATH = pkgs.clang + "/bin/clang++"; + buildInputs = with pkgs; [ + stdenv + sbt + openjdk + boehmgc + libunwind + re2 + clang + zlib + ]; + }; +} From 42fa30a9d23f1096588b01b4b2f576e93d402de6 Mon Sep 17 00:00:00 2001 From: exoego Date: Tue, 14 May 2019 21:58:58 +0900 Subject: [PATCH 06/10] Use trusty and jdk 8 as same as scala-native --- .travis.yml | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 42aad12c9..c97f953cb 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,7 +1,10 @@ -dist: xenial +dist: trusty group: stable sudo: required +env: + - SCALANATIVE_GC=boehm + language: scala matrix: include: @@ -12,7 +15,7 @@ matrix: apt: packages: - openjdk8 - - jdk: openjdk11 +# - jdk: openjdk11 before_install: - bash scripts/travis_setup.sh From 75a992fa24a0768d9b528504760e1c61d21fb74a Mon Sep 17 00:00:00 2001 From: exoego Date: Tue, 14 May 2019 22:14:39 +0900 Subject: [PATCH 07/10] Add Scala versions --- src/sbt-test/projectMatrix/native/build.sbt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sbt-test/projectMatrix/native/build.sbt b/src/sbt-test/projectMatrix/native/build.sbt index eaab3000d..ab770adb6 100644 --- a/src/sbt-test/projectMatrix/native/build.sbt +++ b/src/sbt-test/projectMatrix/native/build.sbt @@ -8,7 +8,7 @@ lazy val core = (projectMatrix in file("core")) name := "core", mainClass in (Compile, run) := Some("a.CoreMain") ) - .nativePlatform(scalaVersions = Seq("2.11.12", "2.11.11")) + .nativePlatform(scalaVersions = Seq("2.11.12", "2.12.4")) lazy val app = (projectMatrix in file("app")) .dependsOn(core) From 604b8524740855ea140e2eeeb0ef1d68a74906c7 Mon Sep 17 00:00:00 2001 From: exoego Date: Tue, 14 May 2019 22:20:47 +0900 Subject: [PATCH 08/10] Use preinstalled JDK --- .travis.yml | 6 ------ 1 file changed, 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index c97f953cb..9cdbcba60 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,12 +9,6 @@ language: scala matrix: include: - jdk: openjdk8 - before_install: - - PATH=$(echo "$PATH" | sed -e 's/:\/usr\/local\/lib\/jvm\/openjdk11\/bin//') - - JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64 - apt: - packages: - - openjdk8 # - jdk: openjdk11 before_install: From a985d4f07940a69e0a92a20430d3902bcd037ed9 Mon Sep 17 00:00:00 2001 From: Eugene Yokota Date: Tue, 25 Jun 2019 01:06:16 -0400 Subject: [PATCH 09/10] limit to Scala 2.11 --- src/sbt-test/projectMatrix/native/build.sbt | 2 +- src/sbt-test/projectMatrix/native/test | 3 +-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/sbt-test/projectMatrix/native/build.sbt b/src/sbt-test/projectMatrix/native/build.sbt index ab770adb6..b9a3aab87 100644 --- a/src/sbt-test/projectMatrix/native/build.sbt +++ b/src/sbt-test/projectMatrix/native/build.sbt @@ -8,7 +8,7 @@ lazy val core = (projectMatrix in file("core")) name := "core", mainClass in (Compile, run) := Some("a.CoreMain") ) - .nativePlatform(scalaVersions = Seq("2.11.12", "2.12.4")) + .nativePlatform(scalaVersions = Seq("2.11.12")) lazy val app = (projectMatrix in file("app")) .dependsOn(core) diff --git a/src/sbt-test/projectMatrix/native/test b/src/sbt-test/projectMatrix/native/test index 1bdaa1968..d4d53c054 100644 --- a/src/sbt-test/projectMatrix/native/test +++ b/src/sbt-test/projectMatrix/native/test @@ -1,5 +1,4 @@ > nativeLink -$ exists app/target/native-2.12/app-out -$ exists core/target/native-2.12/core-out +$ exists app/target/native-2.11/app-out $ exists core/target/native-2.11/core-out From e37ce28e8116505d8cb7614240b3453a387aae7f Mon Sep 17 00:00:00 2001 From: Eugene Yokota Date: Tue, 25 Jun 2019 01:21:43 -0400 Subject: [PATCH 10/10] replace NATIVE with Native --- src/main/scala/sbt/ProjectMatrix.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main/scala/sbt/ProjectMatrix.scala b/src/main/scala/sbt/ProjectMatrix.scala index 3fb590c7e..3bc51aca0 100644 --- a/src/main/scala/sbt/ProjectMatrix.scala +++ b/src/main/scala/sbt/ProjectMatrix.scala @@ -97,7 +97,7 @@ object ProjectMatrix { val jvmDirectorySuffix: String = "-jvm" val jsIdSuffix: String = "JS" val jsDirectorySuffix: String = "-js" - val nativeIdSuffix: String = "NATIVE" + val nativeIdSuffix: String = "Native" val nativeDirectorySuffix: String = "-native" /** A row in the project matrix, typically representing a platform.