Merge pull request #1389 from sbt/wip/only-push-scaladoc

Remove nested site generation in favor of just creating Scaladoc/Sxr doc...
This commit is contained in:
eugene yokota 2014-06-27 12:07:33 -04:00
commit 3dbf0ced7d
153 changed files with 5 additions and 22580 deletions

View File

@ -12,44 +12,19 @@
object Docs
{
val rootFiles = SettingKey[Seq[File]]("root-files", "Location of file that will be copied to the website root.")
val latestRelease = SettingKey[Boolean]("latest-release")
val siteExcludes = Set(".buildinfo", "objects.inv")
def siteInclude(f: File) = !siteExcludes.contains(f.getName)
def siteSourceBase(siteSourceVersion: String) = s"https://github.com/sbt/sbt/raw/$siteSourceVersion/src/sphinx/"
val sbtSiteBase = uri("http://www.scala-sbt.org/")
val SnapshotPath = "snapshot"
val ReleasePath = "release"
val DocsPath = "docs"
val IndexHtml = "index.html"
val HomeHtml = "home.html"
val VersionPattern = """(\d+)\.(\d+)\.(\d+)(-.+)?""".r.pattern
def settings: Seq[Setting[_]] =
site.settings ++
site.sphinxSupport(DocsPath) ++
site.includeScaladoc("api") ++
siteIncludeSxr("sxr") ++
ghPagesSettings ++
Seq(
SphinxSupport.sphinxEnv in SphinxSupport.Sphinx <<= sphinxEnvironmentVariables,
SphinxSupport.sphinxIncremental in SphinxSupport.Sphinx := true,
// TODO: set to true with newer sphinx plugin release
SphinxSupport.enableOutput in SphinxSupport.generatePdf := false
)
ghPagesSettings
def ghPagesSettings = ghpages.settings ++ Seq(
git.remoteRepo := "git@github.com:sbt/sbt.github.com.git",
localRepoDirectory,
ghkeys.synchLocal <<= synchLocalImpl,
rootFiles := {
val base = (sourceDirectory in SphinxSupport.Sphinx).value
Seq("CNAME", "robots.txt").map(base / _)
},
latestRelease in ThisBuild := false,
commands += setLatestRelease,
GitKeys.gitBranch in ghkeys.updatedRepository := Some("master")
)
@ -66,146 +41,14 @@ object Docs
site.addMappingsToSiteDir(mappings in sxr, prefix)
)
def sphinxEnvironmentVariables = (scalaVersion, version, isSnapshot) map { (scalaV, sbtV, snap) =>
// sphinx's terminology: major.minor
def release(v: String): String = CrossVersion.partialVersion(v) match {
case Some((major,minor)) => major + "." + minor
case None => v
}
val siteVersion = sbtV.takeWhile(_ != '-')
val siteSourceVersion = if(snap) release(siteVersion) else siteVersion
Map[String,String](
"sbt.full.version" -> sbtV,
"sbt.partial.version" -> release(sbtV),
"sbt.site.version" -> siteVersion,
"sbt.site.source.base" -> siteSourceBase(siteSourceVersion),
"sbt.binary.version" -> CrossVersion.binarySbtVersion(sbtV),
"scala.full.version" -> scalaV,
"scala.partial.version" -> release(scalaV),
"scala.binary.version" -> CrossVersion.binaryScalaVersion(scalaV)
)
}
def synchLocalImpl = (ghkeys.privateMappings, ghkeys.updatedRepository, version, isSnapshot, latestRelease, streams, rootFiles) map {
(mappings, repo, v, snap, latest, s, roots) =>
def synchLocalImpl = (ghkeys.privateMappings, ghkeys.updatedRepository, version, streams) map {
(mappings, repo, v, s) =>
val versioned = repo / v
IO.delete(versioned)
IO.delete(versioned / "sxr")
IO.delete(versioned / "api")
val toCopy = for( (file, target) <- mappings if siteInclude(file) ) yield (file, versioned / target)
IO.copy(toCopy)
for(f <- roots)
IO.copyFile(f, repo / f.getName)
IO.touch(repo / ".nojekyll")
IO.write(repo / "versions.js", versionsJs(sortVersions(collectVersions(repo))))
if(!snap && latest)
RootIndex(versioned / DocsPath / "home.html", repo / IndexHtml)
if(snap || latest)
linkSite(repo, v, if(snap) SnapshotPath else ReleasePath, s.log)
s.log.info("Copied site to " + versioned)
if(latest) {
val (index, siteMaps) = SiteMap.generate(repo, sbtSiteBase, gzip=true, siteEntry(v), s.log)
s.log.info(s"Generated site map index: $index")
s.log.debug(s"Generated site maps: ${siteMaps.mkString("\n\t", "\n\t", "")}")
}
repo
}
def siteEntry(CurrentVersion: String)(file: File, relPath: String): Option[Entry] =
{
val apiOrSxr = """([^/]+)/(api|sxr)/.*""".r
val docs = """([^/]+)/docs/.*""".r
val old077 = """0\.7\.7/.*""".r
val manualRedirects = """[^/]+\.html""".r
val snapshot = """(.+-SNAPSHOT|snapshot)/.+/.*""".r
// highest priority is the home page
// X/docs/ are higher priority than X/(api|sxr)/
// release/ is slighty higher priority than <releaseVersion>/
// non-current releases are low priority
// 0.7.7 documentation is very low priority
// snapshots docs are very low priority
// the manual redirects from the old version of the site have no priority at all
relPath match {
case "index.html" => Some(Entry("weekly", 1.0))
case docs(ReleasePath) => Some( Entry("weekly", 0.9) )
case docs(CurrentVersion) => Some( Entry("weekly", 0.8) )
case apiOrSxr(ReleasePath, _) => Some( Entry("weekly", 0.6) )
case apiOrSxr(CurrentVersion, _) => Some( Entry("weekly", 0.5) )
case snapshot(_) => Some( Entry("weekly", 0.02) )
case old077() => Some( Entry("never", 0.01) )
case docs(_) => Some( Entry("never", 0.2) )
case apiOrSxr(_, _) => Some( Entry("never", 0.1) )
case x => Some( Entry("never", 0.0) )
}
}
def versionsJs(vs: Seq[String]): String = "var availableDocumentationVersions = " + vs.mkString("['", "', '", "']")
// names of all directories that are explicit versions
def collectVersions(base: File): Seq[String] = (base * versionFilter).get.map(_.getName)
def sortVersions(vs: Seq[String]): Seq[String] = vs.sortBy(versionComponents).reverse
def versionComponents(v: String): Option[(Int,Int,Int,Option[String])] = {
val m = VersionPattern.matcher(v)
if(m.matches())
Some( (m.group(1).toInt, m.group(2).toInt, m.group(3).toInt, Option(m.group(4))) )
else
None
}
def versionFilter = new PatternFilter(VersionPattern) && DirectoryFilter
def linkSite(base: File, to: String, from: String, log: Logger) {
val current = base / to
assert(current.isDirectory, "Versioned site not present at " + current.getAbsolutePath)
val symlinkDir = base / from
symlinkDir.delete()
symlink(path = to, file = symlinkDir, log = log)
}
// TODO: platform independence/use symlink from Java 7
def symlink(path: String, file: File, log: Logger): Unit =
"ln" :: "-s" :: path :: file.getAbsolutePath :: Nil ! log match {
case 0 => ()
case code => error("Could not create symbolic link '" + file.getAbsolutePath + "' with path " + path)
}
def setLatestRelease = Command.command("latest-release-docs") { state =>
Project.extract(state).append((latestRelease in ThisBuild := true) :: Nil, state)
}
}
object RootIndex
{
import Docs._
import org.jsoup._
def apply(versionIndex: File, to: File)
{
val doc = Jsoup.parse(versionIndex, "UTF-8")
rewriteLinks(doc)
removeSearch(doc)
IO.write(to, doc.outerHtml)
}
def retargetIndexLink(original: String): String =
if(isAbsolute(original) || original.startsWith("#"))
original
else
ReleasePath + "/docs/" + original
def isAbsolute(s: String): Boolean = (new java.net.URI(s)).isAbsolute
def rewriteLinks(doc: nodes.Document)
{
rewriteLinks(doc, "*", "href")
rewriteLinks(doc, "script", "src")
}
def rewriteLinks(doc: nodes.Document, elemName: String, attrName: String): Unit =
for(elem <- select(doc, elemName + "[" + attrName + "]"))
elem.attr(attrName, retargetIndexLink(elem.attr(attrName)))
def removeSearch(doc: nodes.Document): Unit =
doc.select(".search").remove()
def select(doc: nodes.Document, s: String) =
{
import collection.JavaConverters._
doc.select(s).iterator.asScala
}
}

View File

@ -1,5 +0,0 @@
=================
Command Engine
=================
Placeholder for command engine details.

View File

@ -1,125 +0,0 @@
=================
Core Principles
=================
This document details the core principles overarching sbt's design and code style. Sbt's core principles can
be stated quite simply:
1. Everything should have a ``Type``, enforced as much as is practical.
2. Dependencies should be **explicit**.
3. Once learned, a concept should hold throughout **all** parts of sbt.
4. Parallel is the default.
With these principles in mind, let's walk through the core design of sbt.
Introduction to build state
===========================
This is the first piece you hit when starting sbt. Sbt's command engine is the means by which
it processes user requests using the build state. The command engine is essentially a means of applying
**state transformations** on the build state, to execute user requests.
In sbt, commands are functions that take the current build state (``sbt.State``) and produce the next state. In
other words, they are essentially functions of ``sbt.State => sbt.State``. However, in reality, Commands are
actually string processors which take some string input and act on it, returning the next build state.
The details of the command engine are covered in :doc:`the command engine section <Command-Engine>`.
So, the entirety of sbt is driven off the ``sbt.State`` class. Since this class needs to be resilient in the
face of custom code and plugins, it needs a mechanism to store the state from any potential client. In
dynamic languages, this can be done directly on objects.
A naive approach in Scala is to use a ``Map<String,Any>``. However, this vioaltes tennant #1: Everythign should have a `Type`.
So, sbt defines a new type of map called an ``AttributeMap``. An ``AttributeMap`` is a key-value storage mechanism where
keys are both strings *and* expected `Type`s for their value.
Here is what the typesafe ``AttributeKey`` key looks like ::
sealed trait AttributeKey[T] {
/** The label is the identifier for the key and is camelCase by convention. */
def label: String
/** The runtime evidence for `T` */
def manifest: Manifest[T]
}
These keys store both a `label` (``string``) and some runtime type information (``manifest``). To put or get something on
the AttributeMap, we first need to construct one of these keys. Let's look at the basic definition of the ``AttributeMap`` ::
trait AttributeMap {
/** Gets the value of type `T` associated with the key `k` or `None` if no value is associated.
* If a key with the same label but a different type is defined, this method will return `None`. */
def get[T](k: AttributeKey[T]): Option[T]
/** Adds the mapping `k -> value` to this map, replacing any existing mapping for `k`.
* Any mappings for keys with the same label but different types are unaffected. */
def put[T](k: AttributeKey[T], value: T): AttributeMap
}
Now that there's a definition of what build state is, there needs to be a way to dynamically construct it. In sbt, this is
done through the ``Setting[_]`` sequence.
Settings Architecture
=====================
A Setting represents the means of constructing the value of one particular ``AttributeKey[_]`` in the ``AttributeMap`` of build state. A setting consists of two pieces:
1. The ``AttributeKey[T]`` where the value of the setting should be assigned.
2. An ``Initialize[T]`` object which is able to construct the value for this setting.
Sbt's initialization time is basically just taking a sequence of these ``Setting[_]`` objects and running their initialization objects and then storing the value into the ``AttributeMap``. This means overwriting an exisitng value at a key is as easy as appending a
``Setting[_]`` to the end of the sequence which does so.
Where it gets interesting is that ``Initialize[T]`` can depend on other ``AttributeKey[_]``s in the build state. Each ``Initialize[_]``
can pull values from any ``AttributeKey[_]`` in the build state's ``AttributeMap`` to compute its value. Sbt ensures a few things
when it comes to ``Initialize[_]`` dependencies:
1. There can be no circular dependencies
2. If one ``Initialize[_]`` depends on another ``Initialize[_]`` key, then *all* associated ``Initialize[_]`` blocks for that key must
have run before we load the value.
Let's look at what gets stored for the setting ::
normalizedName := normalize(name.value)
.. Note: This image comes from a google drawing: https://docs.google.com/a/typesafe.com/drawings/d/1hvE89XVrQiXdSBsgaQgQGTmcO44EBZPg4_0WxKXU7Pw/edit
.. Feel free to request access to modify as appropriate.
.. image:: overview-setting-example.png
Here, a ``Setting[_]`` is constructed that understands it depends on the value in the ``name`` AttributeKey. Its initialize block first grabs the value of the ``name`` key, then runs the function normalize on it to compute its value.
This represents the core mechanism of how to construct sbt's build state. Conceptually, at some point we have a graph of dependencies
and initialization functions which we can use to construct the first build state. Once this is completed, we can then start to process
user requests.
Task Architecture
=================
The next layer in sbt is around these user request, or tasks. When a user configures a build, they are defining a set of repeatable
tasks that they can run on their project. Things like ``compile`` or ``test``. These tasks *also* have a dependency graph, where
e.g. the ``test`` task requires that ``compile`` has run before it can successfully execute.
Sbt's defines a class ``Task[T]``. The ``T`` type parameter represents the type of data returned by a task. Remember the tenets of
sbt? "All things have types" and "Dependencies are explicit" both hold true for tasks. Sbt promotes a style of task dependencies that
is closer to functional programming: Return data for your users rather than using shared mutable state.
Most build tools communciate over the filesystem, and indeed sbt, by necessity, does some of this. However, for stable parallelization it is far better to keep tasks isolated on the filesystem and communicate directly through types.
Similarly to how a ``Setting[_]`` stores both dependencies and an initialization function, a ``Task[_]`` stores both its
``Task[_]``dependencies and its behavior (a function).
TODO - More on ``Task[_]``
TODO - Transition into ``InputTask[_]``, rehash Command
TODO - Tansition into Scope.

View File

@ -1,131 +0,0 @@
======================
Setting Initialization
======================
This page outlines the mechanisms by which sbt loads settings for a particular build, including the hooks where
users can control the ordering of everything.
As stated elsewhere, sbt constructs its initialization graph and task graph via ``Setting[_]`` objects. A setting
is something which can take the values stored at other Keys in the build state, and generates a new value for
a particular build key. Sbt converts all registered ``Setting[_]`` objects into a giant linear sequence and
*compiles* them into the a task graph. This task graph is then used to execute your build.
All of sbt's loading semantics are contained within the `Load.scala <../../sxr/sbt/Load.scala.html>` file. It is approximately the following:
.. Note: This image comes from a google drawing: https://docs.google.com/a/typesafe.com/drawings/d/1Aj_IkOaJpRXJNhrVtVJaS8m-YRcKsympVOj3M2sUz7E/edit
.. Feel free to request access to modify as appropriate.
.. image:: settings-initialization-load-ordering.png
The blue circles represent actions happening when sbt loads a project. We can see that sbt performs the following actions in load:
1. Compile the user-level project (``~/.sbt/<version>/``)
a. Load any plugins defined by this project (``~/.sbt/<version>/plugins/*.sbt`` and ``~/.sbt/<version>/plugins/project/*.scala``)
b. Load all settings defined (``~/.sbt/<version>/*.sbt`` and ``~/.sbt/<version>/plugins/*.scala``)
2. Compile the current project (``<working-directory/project``)
a. Load all defined plugins (``project/plugins.sbt`` and ``project/project/*.scala``)
b. Load/Compile the project (``project/*.scala``)
3. Load project *.sbt files (``build.sbt`` and friends).
Each of these loads defines several sequences of settings. The diagram shows the two most important:
* ``buildSettings`` - These are settings defined to be ``in ThisBuild`` or directly against the ``Build`` object. They are initialized *once* for the build.
You can add these, e.g. in ``project/build.scala`` ::
object MyBuild extends Build {
override val settings = Seq(foo := "hi")
}
or in a ``build.sbt`` file ::
foo in ThisBuild := "hi"
* ``projectSettings`` - These are settings specific to a project. They are specific to a *particular sub project* in the build. A
plugin may be contributing its settings to more than on project, in which case the values are duplicated for each project.
You add project specific settings, eg. in ``project/build.scala`` ::
object MyBuild extends Build {
val test = project.in(file(".")).settings(...)
}
After loading/compiling all the build definitions, sbt has a series of Seq[Setting[_]] that it must order. As shown in the diagram,
the default inclusion order for sbt is:
1. All AutoPlugin settings
2. All settings defined in ``project/Build.scala``
3. All settings defined in the user directory (``~/.sbt/<verison>/*.sbt``)
4. All local configurations (``build.sbt``)
Controlling Initialization
==========================
The order which sbt uses to load settings is configurable at a *project* level. This means that we can't control
the order of settings added to Build/Global namespace, but we can control how each project loads, e.g. plugins and ``.sbt`` files.
To do so, use the ``AddSettings`` class ::
import sbt._
import Keys._
import AddSettings._
object MyOwnOrder extends Build {
// here we load config from a txt file.
lazy val root = project.in(file(".")).settingSets( autoPlugins, buildScalaFiles, sbtFiles(file("silly.txt")) )
}
In the above project, we've modified the order of settings to be:
1. All AutoPlugin settings.
2. All settings defined in the ``project/Build.scala`` file (shown above).
3. All settings found in the ``silly.txt`` file.
What we've excluded:
* All settings from the user directory (``~/.sbt/<verison>``)
* All ``*.sbt`` settings.
The AddSettings object provides the following "groups" of settings you can use for ordering:
``autoPlugins``
All the ordered settings of plugins after they've gone through dependency resolution
``buildScalaFiles``
The full sequence of settings defined directly in ``project/*.scala`` builds.
``sbtFiles(*)``
Specifies the exact setting DSL files to include (files must use the ``.sbt`` file format)
``userSettings``
All the settings defined in the user directory ``~/.sbt/<version>/``.
``defaultSbtFiles``
Include all local ``*.sbt`` file settings.
*Note: Be very careful when reordering settings. It's easy to accidentally remove core functionality.*
For example, let's see what happens if we move the ``build.sbt`` files *before* the ``buildScalaFile``.
Let's create an example project the following defintiion:
`project/build.scala` ::
object MyTestBuild extends Build {
val testProject = project.in(file(".")).settingSets(autoPlugins, defaultSbtFiles, buildScalaFile).settings(
version := scalaBinaryVersion.value match {
case "2.10" => "1.0-SNAPSHOT"
case v => "1.0-for-${v}-SNAPSHOT"
}
)
}
This build defines a version string which appends the scala version if the current scala version is not the in the ``2.10.x`` series.
Now, when issuing a release we want to lock down the version. Most tools assume this can happen by writing a ``version.sbt`` file:
`version.sbt` ::
version := "1.0.0"
However, when we load this new build, we find that the ``version`` in ``version.sbt`` has been **overriden** by the one defined
in ``project/Build.scala`` because of the order we defined for settings, so the new ``version.sbt`` file has no effect.

View File

@ -1,5 +0,0 @@
=================
Task Engine
=================
Placeholder for task engine design details.

View File

@ -1,16 +0,0 @@
==============
Architecture
==============
This is the set of documentation about the Architecture of sbt. This covers all the core components of
sbt as well as the general notion of how they all work together. This documentation is suitable for those who wish to
have a deeper understanding of sbt's core, but already understand the fundamentals of ``Setting[_]``, ``Task[_]`` and
constructing builds.
.. toctree::
:maxdepth: 2
Core-Principles
Setting-Initialization
Task-Engine
Command-Engine

Binary file not shown.

Before

Width:  |  Height:  |  Size: 29 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 81 KiB

View File

@ -1 +0,0 @@
www.scala-sbt.org

View File

@ -1,150 +0,0 @@
===================
Bintray For Plugins
===================
**This is currently in Beta mode.**
sbt hosts their community plugin repository on `Bintray <http://bintray.com/sbt>`_. Bintray is a repository hosting site, similar to github, which allows users to contribute their own plugins, while sbt can aggregate them together in a common repository.
This document walks you through the means to create your own repository for hosting your sbt plugins and then linking them into the sbt shared repository. This will make your plugins available for all sbt users without additonal configuration (besides declaring a dependency on your plugin).
To do this, we need to perform the following steps:
Create an account on Bintray
============================
First, go to http://bintray.com. Click on the sign in link on the top left, and then the sign up button.
*Note: If you had an account on repo.scala-sbt.org previous, please use the same email address when you create this account.*
Create a repository for your sbt plugins
========================================
Now, we'll create a repository to host our personal sbt plugins. In bintray, create a generic repository called ``sbt-plugins``.
First, go to your user page and click on the ``new repository`` link:
.. image:: bintray-new-repo-link.png
You should see the following dialog:
.. image:: bintray-new-repo-dialog.png
Fill it out similarly to the above image, the settings are:
Name
sbt-plugins
Type
Generic
Desc
My sbt plugins
Tags
sbt
Once this is done, you can begin to configure your sbt-plugins to publish to bintray.
Add the bintray-sbt plugin to your build.
=========================================
First, add the bintray-sbt to your plugin build.
First, create a ``project/bintray.sbt`` file ::
resolvers += Resolver.url(
"bintray-sbt-plugin-releases",
url("http://dl.bintray.com/content/sbt/sbt-plugin-releases"))(
Resolver.ivyStylePatterns)
addSbtPlugin("me.lessis" % "bintray-sbt" % "0.1.1")
Next, a make sure your ``build.sbt`` file has the following settings ::
import bintray.Keys._
sbtPlugin := true
name := "<YOUR PLUGIN HERE>"
organization := "<INSERT YOUR ORG HERE>"
version := "<YOUR PLUGIN VERSION HERE>"
publishMavenStyle := false
bintrayPublishSettings
repository in bintray := "sbt-plugins"
// This is an example. bintray-sbt requires licenses to be specified
// (using a canonical name).
licenses += ("Apache-2.0", url("http://www.apache.org/licenses/LICENSE-2.0.html"))
bintrayOrganization in bintray := None
Make sure your project has a valid license specified, as well as unique name and organization.
Make a release
==============
*Note: bintray does not support snapshots. We recommend using `git-revisions supplied by the sbt-git plugin <https://github.com/sbt/sbt-git#versioning-with-git>`_.
Once your build is configured, open the sbt console in your build and run:
.. code-block:: console
sbt> publish
The plugin will ask you for your credentials. If you don't know where they are, you can find them on `Bintray <http://bintray.com>`_.
1. Login to the website with your credentials.
2. Click on your username
3. Click on edit profile
4. Click on API Key
This will get you your password. The bintray-sbt plugin will save your API key for future use.
*NOTE: We have to do this before we can link our package to the sbt org.*
Linking your package to the sbt organization
============================================
Now that your plugin is packaged on bintray, you can include it in the community sbt repository. To do so, go to the `Community sbt repository <https://bintray.com/sbt/sbt-plugin-releases>`_ screen.
1. Click the green ``include my package`` button and select your plugin.
.. image:: bintray-include-my-package.png
2. Search for your plugin by name and click on the link.
.. image:: bintray-link-plugin-search.png
3. Your request should be automatically filled out, just click send
.. image:: bintray-include-package-form.png
4. Shortly, one of the sbt repository admins will approve your link request.
From here on, any releases of your plugin will automatically appear in the community sbt repository. Congratulations and thank you so much for your contributions!
Linking your package to the sbt organization (sbt org admins)
=============================================================
If you're a member of the sbt organization on bintray, you can link your package to the sbt organization, but via a different means. To do so, first navigate to the plugin you wish to include and click on the link button:
.. image:: bintray-org-member-link-button.png
After clicking this you should see a link like the following:
.. image:: bintray-org-member-link-dialog.png
Click on the sbt/sbt-plugin-releases repository and you're done! Any future releases will be included in the sbt-plugin repository.
Summary
=======
After setting up the repository, all new releases will automatically be included the sbt-plugin-releases repository, available for all users. When you create a new plugin, after the initial release you'll have to link it to the sbt community repository, but the rest of the setup should already be completed. Thanks for you contributions and happy hacking.

View File

@ -1,275 +0,0 @@
==============
0.12.0 Changes
==============
Features, fixes, changes with compatibility implications (incomplete, please help)
----------------------------------------------------------------------------------
- The cross versioning convention has changed for Scala versions 2.10
and later as well as for sbt plugins.
- When invoked directly, 'update' will always perform an update (gh-335)
- The sbt plugins repository is added by default for plugins and plugin definitions. gh-380
- Plugin configuration directory precedence has changed (see details
section below)
- Source dependencies have been fixed, but the fix required changes
(see details section below)
- Aggregation has changed to be more flexible (see details section
below)
- Task axis syntax has changed from key(for task) to task::key (see
details section below)
- The organization for sbt has to changed to `org.scala-sbt` (was:
org.scala-tools.sbt). This affects users of the scripted plugin in
particular.
- `artifactName` type has changed to
`(ScalaVersion, Artifact, ModuleID) => String`
- `javacOptions` is now a task
- `session save` overwrites settings in `build.sbt` (when appropriate). gh-369
- scala-library.jar is now required to be on the classpath in order to
compile Scala code. See the `scala-library.jar` section at the
bottom of the page for details.
Features
--------
- Support for forking tests (gh-415)
- `test-quick` (see details section below)
- Support globally overriding repositories (gh-472)
- Added `print-warnings` task that will print unchecked and
deprecation warnings from the previous compilation without needing to
recompile (Scala 2.10+ only)
- Support for loading an ivy settings file from a URL.
- `projects add/remove <URI>` for temporarily working with other builds
- Enhanced control over parallel execution (see details section below)
- `inspect tree <key>` for calling `inspect` command recursively (gh-274)
Fixes
-----
- Delete a symlink and not its contents when recursively deleting a directory.
- Fix detection of ancestors for java sources
- Fix the resolvers used for `update-sbt-classifiers` (gh-304)
- Fix auto-imports of plugins (gh-412)
- Argument quoting (see details section below)
- Properly reset JLine after being stopped by Ctrl+z (unix only). gh-394
Improvements
------------
- The launcher can launch all released sbt versions back to 0.7.0.
- A more refined hint to run 'last' is given when a stack trace is suppressed.
- Use java 7 Redirect.INHERIT to inherit input stream of subprocess (gh-462,\ gh-327).
This should fix issues when forking interactive programs. (@vigdorchik)
- Mirror ivy 'force' attribute (gh-361)
- Various improvements to `help` and `tasks` commands as well as
new `settings` command (gh-315)
- Bump jsch version to 0.1.46. (gh-403)
- Improved help commands: `help`, `tasks`, `settings`.
- Bump to JLine 1.0 (see details section below)
- Global repository setting (see details section below)
- Other fixes/improvements: gh-368, gh-377, gh-378, gh-386, gh-387, gh-388, gh-389
Experimental or In-progress
---------------------------
- API for embedding incremental compilation. This interface is subject
to change, but already being used in `a branch of the
scala-maven-plugin <https://github.com/davidB/scala-maven-plugin/tree/feature/sbt-inc>`_.
- Experimental support for keeping the Scala compiler resident. Enable
by passing `-Dsbt.resident.limit=n` to sbt, where `n` is an
integer indicating the maximum number of compilers to keep around.
- The `Howto pages <http://www.scala-sbt.org/howto.html>`_ on the `new
site <http://www.scala-sbt.org>`_ are at least readable now. There is
more content to write and more formatting improvements are needed, so
`pull requests are welcome <https://github.com/sbt/sbt.github.com>`_.
Details of major changes from 0.11.2 to 0.12.0
----------------------------------------------
Plugin configuration directory
------------------------------
In 0.11.0, plugin configuration moved from `project/plugins/` to just
`project/`, with `project/plugins/` being deprecated. Only 0.11.2
had a deprecation message, but in all of 0.11.x, the presence of the old
style `project/plugins/` directory took precedence over the new style.
In 0.12.0, the new style takes precedence. Support for the old style
won't be removed until 0.13.0.
1. Ideally, a project should ensure there is never a conflict. Both
styles are still supported; only the behavior when there is a
conflict has changed.
2. In practice, switching from an older branch of a project to a new
branch would often leave an empty `project/plugins/` directory that
would cause the old style to be used, despite there being no
configuration there.
3. Therefore, the intention is that this change is strictly an
improvement for projects transitioning to the new style and isn't
noticed by other projects.
Parsing task axis
-----------------
There is an important change related to parsing the task axis for
settings and tasks that fixes gh-202
1. The syntax before 0.12 has been
`{build}project/config:key(for task)`
2. The proposed (and implemented) change for 0.12 is
`{build}project/config:task::key`
3. By moving the task axis before the key, it allows for easier
discovery (via tab completion) of keys in plugins.
4. It is not planned to support the old syntax.
Aggregation
-----------
Aggregation has been made more flexible. This is along the direction
that has been previously discussed on the mailing list.
1. Before 0.12, a setting was parsed according to the current project
and only the exact setting parsed was aggregated.
2. Also, tab completion did not account for aggregation.
3. This meant that if the setting/task didn't exist on the current
project, parsing failed even if an aggregated project contained the
setting/task.
4. Additionally, if compile:package existed for the current project,
\*:package existed for an aggregated project, and the user requested
'package' to run (without specifying the configuration), \*:package
wouldn't be run on the aggregated project (because it isn't the same
as the compile:package key that existed on the current project).
5. In 0.12, both of these situations result in the aggregated settings
being selected. For example,
1. Consider a project `root` that aggregates a subproject `sub`.
2. `root` defines `*:package`.
3. `sub` defines `compile:package` and `compile:compile`.
4. Running `root/package` will run `root/*:package` and
`sub/compile:package`
5. Running `root/compile` will run `sub/compile:compile`
6. This change was made possible in part by the change to task axis
parsing.
Parallel Execution
------------------
Fine control over parallel execution is supported as described here:
:doc:`/Detailed-Topics/Parallel-Execution`
1. The default behavior should be the same as before, including the
`parallelExecution` settings.
2. The new capabilities of the system should otherwise be considered
experimental.
3. Therefore, `parallelExecution` won't be deprecated at this time.
Source dependencies
-------------------
A fix for issue gh-329 is included in 0.12.0. This fix ensures that only one version of a plugin
is loaded across all projects. There are two parts to this.
1. The version of a plugin is fixed by the first build to load it. In
particular, the plugin version used in the root build (the one in
which sbt is started in) always overrides the version used in
dependencies.
2. Plugins from all builds are loaded in the same class loader.
Additionally, Sanjin's patches to add support for hg and svn URIs are
included.
1. sbt uses subversion to retrieve URIs beginning with `svn` or
`svn+ssh`. An optional fragment identifies a specific revision to
checkout.
2. Because a URI for mercurial doesn't have a mercurial-specific scheme,
sbt requires the URI to be prefixed with `hg:` to identify it as a
mercurial repository.
3. Also, URIs that end with `.git` are now handled properly.
Cross building
--------------
The cross version suffix is shortened to only include the major and
minor version for Scala versions starting with the 2.10 series and for
sbt versions starting with the 0.12 series. For example,
`sbinary_2.10` for a normal library or `sbt-plugin_2.10_0.12` for an
sbt plugin. This requires forward and backward binary compatibility
across incremental releases for both Scala and sbt.
1. This change has been a long time coming, but it requires everyone
publishing an open source project to switch to 0.12 to publish for
2.10 or adjust the cross versioned prefix in their builds
appropriately.
2. Obviously, using 0.12 to publish a library for 2.10 requires 0.12.0
to be released before projects publish for 2.10.
3. There is now the concept of a binary version. This is a subset of the
full version string that represents binary compatibility. That is,
equal binary versions implies binary compatibility. All Scala
versions prior to 2.10 use the full version for the binary version to
reflect previous sbt behavior. For 2.10 and later, the binary version
is `<major>.<minor>`.
4. The cross version behavior for published artifacts is configured by
the crossVersion setting. It can be configured for dependencies by
using the `cross` method on `ModuleID` or by the traditional %%
dependency construction variant. By default, a dependency has cross
versioning disabled when constructed with a single % and uses the
binary Scala version when constructed with %%.
5. The artifactName function now accepts a type ScalaVersion as its
first argument instead of a String. The full type is now
`(ScalaVersion, ModuleID, Artifact) => String`. ScalaVersion
contains both the full Scala version (such as 2.10.0) as well as the
binary Scala version (such as 2.10).
6. The flexible version mapping added by Indrajit has been merged into
the `cross` method and the %% variants accepting more than one
argument have been deprecated. See :doc:`/Detailed-Topics/Cross-Build` for details.
Global repository setting
-------------------------
Define the repositories to use by putting a standalone
`[repositories]` section (see the
:doc:`/Detailed-Topics/Launcher` page) in
`~/.sbt/repositories` and pass `-Dsbt.override.build.repos=true` to
sbt. Only the repositories in that file will be used by the launcher for
retrieving sbt and Scala and by sbt when retrieving project
dependencies. (@jsuereth)
test-quick
----------
`test-quick` (gh-393) runs the tests specified as arguments (or all tests if no arguments are
given) that:
1. have not been run yet OR
2. failed the last time they were run OR
3. had any transitive dependencies recompiled since the last successful
run
Argument quoting
----------------
Argument quoting (gh-396) from the intereactive mode works like Scala string literals.
1. `> command "arg with spaces,\n escapes interpreted"`
2. `> command """arg with spaces,\n escapes not interpreted"""`
3. For the first variant, note that paths on Windows use backslashes and
need to be escaped (`\\`). Alternatively, use the second variant,
which does not interpret escapes.
4. For using either variant in batch mode, note that a shell will
generally require the double quotes themselves to be escaped.
scala-library.jar
-----------------
sbt versions prior to 0.12.0 provided the location of scala-library.jar
to scalac even if scala-library.jar wasn't on the classpath. This
allowed compiling Scala code without scala-library as a dependency, for
example, but this was a misfeature. Instead, the Scala library should be
declared as `provided`:
::
// Don't automatically add the scala-library dependency
// in the 'compile' configuration
autoScalaLibrary := false
libraryDependencies += "org.scala-lang" % "scala-library" % "2.9.2" % "provided"

View File

@ -1,238 +0,0 @@
==============
0.13.0 Changes
==============
Overview
========
Features, fixes, changes with compatibility implications (incomplete, please help)
----------------------------------------------------------------------------------
- Moved to Scala 2.10 for sbt and build definitions.
- Support for plugin configuration in `project/plugins/` has been removed. It was deprecated since 0.11.2.
- Dropped support for tab completing the right side of a setting for the `set` command. The new task macros make this tab completion obsolete.
- The convention for keys is now camelCase only. Details below.
- Fixed the default classifier for tests to be `tests` for proper Maven compatibility.
- The global settings and plugins directories are now versioned. Global settings go in `~/.sbt/0.13/` and global plugins in `~/.sbt/0.13/plugins/` by default. Explicit overrides, such as via the `sbt.global.base` system property, are still respected. (gh-735)
- sbt no longer canonicalizes files passed to scalac. (gh-723)
- sbt now enforces that each project must have a unique `target` directory.
- sbt no longer overrides the Scala version in dependencies. This allows independent configurations to depend on different Scala versions and treats Scala dependencies other than scala-library as normal dependencies. However, it can result in resolved versions other than `scalaVersion` for those other Scala libraries.
- JLine is now configured differently for Cygwin. See :doc:`/Getting-Started/Setup`.
- Jline and Ansi codes work better on Windows now. CI servers might have to explictly disable Ansi codes via `-Dsbt.log.format=false`.
- JLine now tries to respect `~/.inputrc`.
- Forked tests and runs now use the project's base directory as the current working directory.
- `compileInputs` is now defined in `(Compile,compile)` instead of just `Compile`
- The result of running tests is now `Tests.Output <../../api/#sbt.Tests$$Output>`_.
Features
--------
- Use the repositories in boot.properties as the default project resolvers. Add `bootOnly` to a repository in boot.properties to specify that it should not be used by projects by default. (Josh S., gh-608)
- Support vals and defs in .sbt files. Details below.
- Support defining Projects in .sbt files: vals of type Project are added to the Build. Details below.
- New syntax for settings, tasks, and input tasks. Details below.
- Automatically link to external API scaladocs of dependencies by setting `autoAPIMappings := true`. This requires at least Scala 2.10.1 and for dependencies to define `apiURL` for their scaladoc location. Mappings may be manually added to the `apiMappings` task as well.
- Support setting Scala home directory temporary using the switch command: `++ scala-version=/path/to/scala/home`. The scala-version part is optional, but is used as the version for any managed dependencies.
- Add `publishM2` task for publishing to `~/.m2/repository`. (gh-485)
- Use a default root project aggregating all projects if no root is defined. (gh-697)
- New API for getting tasks and settings from multiple projects and configurations. See the new section :ref:`getting values from multiple scopes <multiple-scopes>`.
- Enhanced test interface for better support of test framework features. (Details pending.)
- `export` command
* For tasks, prints the contents of the 'export' stream. By convention, this should be the equivalent command line(s) representation. `compile`, `doc`, and `console` show the approximate command lines for their execution. Classpath tasks print the classpath string suitable for passing as an option.
* For settings, directly prints the value of a setting instead of going through the logger
Fixes
-----
- sbt no longer tries to warn on dependency conflicts. Configure a :ref:`conflict manager <conflict-management>` instead. (gh-709)
- Run test Cleanup and Setup when forking. The test ClassLoader is not available because it is in another jvm.
Improvements
------------
- Run the API extraction phase after the compiler's `pickler` phase instead of `typer` to allow compiler plugins after `typer`. (Adriaan M., gh-609)
- Record defining source position of settings. `inspect` shows the definition location of all settings contributing to a defined value.
- Allow the root project to be specified explicitly in `Build.rootProject`.
- Tasks that need a directory for storing cache information can now use the `cacheDirectory` method on `streams`. This supersedes the `cacheDirectory` setting.
- The environment variables used when forking `run` and `test` may be set via `envVars`, which is a `Task[Map[String,String]]`. (gh-665)
- Restore class files after an unsuccessful compilation. This is useful when an error occurs in a later incremental step that requires a fix in the originally changed files.
- Better auto-generated IDs for default projects. (gh-554)
- Fork run directly with 'java' to avoid additional class loader from 'scala' command. (gh-702)
- Make autoCompilerPlugins support compiler plugins defined in a internal dependency (only if `exportJars := true` due to scalac limitations)
- Track ancestors of non-private templates and use this information to require fewer, smaller intermediate incremental compilation steps.
- `autoCompilerPlugins` now supports compiler plugins defined in a internal dependency. The plugin project must define `exportJars := true`. Depend on the plugin with `...dependsOn(... % Configurations.CompilerPlugin)`.
- Add utilities for debugging API representation extracted by the incremental compiler. (Grzegorz K., gh-677, gh-793)
- `consoleProject` unifies the syntax for getting the value of a setting and executing a task. See :doc:`/Detailed-Topics/Console-Project`.
Other
-----
- The source layout for the sbt project itself follows the package name to accommodate to Eclipse users. (Grzegorz K., gh-613)
Details of major changes
========================
camelCase Key names
-------------------
The convention for key names is now camelCase only instead of camelCase for Scala identifiers and hyphenated, lower-case on the command line. camelCase is accepted for existing hyphenated key names and the hyphenated form will still be accepted on the command line for those existing tasks and settings declared with hyphenated names. Only camelCase will be shown for tab completion, however.
New key definition methods
--------------------------
There are new methods that help avoid duplicating key names by declaring keys as:
::
val myTask = taskKey[Int]("A (required) description of myTask.")
The name will be picked up from the val identifier by the implementation of the taskKey macro so there is no reflection needed or runtime overhead. Note that a description is mandatory and the method `taskKey` begins with a lowercase `t`. Similar methods exist for keys for settings and input tasks: `settingKey` and `inputKey`.
New task/setting syntax
-----------------------
First, the old syntax is still supported with the intention of allowing conversion to the new syntax at your leisure. There may be some incompatibilities and some may be unavoidable, but please report any issues you have with an existing build.
The new syntax is implemented by making `:=`, `+=`, and `++=` macros and making these the only required assignment methods. To refer to the value of other settings or tasks, use the `value` method on settings and tasks. This method is a stub that is removed at compile time by the macro, which will translate the implementation of the task/setting to the old syntax.
For example, the following declares a dependency on `scala-reflect` using the value of the `scalaVersion` setting:
::
libraryDependencies += "org.scala-lang" % "scala-reflect" % scalaVersion.value
The `value` method is only allowed within a call to `:=`, `+=`, or `++=`. To construct a setting or task outside of these methods, use `Def.task` or `Def.setting`. For example,
::
val reflectDep = Def.setting { "org.scala-lang" % "scala-reflect" % scalaVersion.value }
libraryDependencies += reflectDep.value
A similar method `parsed` is defined on `Parser[T]`, `Initialize[Parser[T]]` (a setting that provides a parser), and `Initialize[State => Parser[T]]` (a setting that uses the current `State` to provide a `Parser[T]`. This method can be used when defining an input task to get the result of user input.
::
myInputTask := {
// Define the parser, which is the standard space-delimited arguments parser.
val args = Def.spaceDelimited("<args>").parsed
// Demonstrates using a setting value and a task result:
println("Project name: " + name.value)
println("Classpath: " + (fullClasspath in Compile).value.map(_.file))
println("Arguments:")
for(arg <- args) println(" " + arg)
}
For details, see :doc:`/Extending/Input-Tasks`.
To expect a task to fail and get the failing exception, use the `failure` method instead of `value`. This provides an `Incomplete` value, which wraps the exception. To get the result of a task whether or not it succeeds, use `result`, which provides a `Result[T]`.
Dynamic settings and tasks (`flatMap`) have been cleaned up. Use the `Def.taskDyn` and `Def.settingDyn` methods to define them (better name suggestions welcome). These methods expect the result to be a task and setting, respectively.
.sbt format enhancements
------------------------
vals and defs are now allowed in .sbt files. They must follow the same rules as settings concerning blank lines, although multiple definitions may be grouped together. For example,
::
val n = "widgets"
val o = "org.example"
name := n
organization := o
All definitions are compiled before settings, but it will probably be best practice to put definitions together.
Currently, the visibility of definitions is restricted to the .sbt file it is defined in.
They are not visible in `consoleProject` or the `set` command at this time, either.
Use Scala files in `project/` for visibility in all .sbt files.
vals of type `Project` are added to the `Build` so that multi-project builds can be defined entirely in .sbt files now.
For example,
::
lazy val a = Project("a", file("a")).dependsOn(b)
lazy val b = Project("b", file("sub")).settings(
version := "1.0"
)
Currently, it only makes sense to defines these in the root project's .sbt files.
A shorthand for defining Projects is provided by a new macro called `project`.
This requires the constructed Project to be directly assigned to a `val`.
The name of this val is used for the project ID and base directory.
The base directory can be changed with the `in` method.
The previous example can also be written as:
::
lazy val a = project.dependsOn(b)
lazy val b = project in file("sub") settings(
version := "1.0"
)
This macro is also available for use in Scala files.
Control over automatically added settings
-----------------------------------------
sbt loads settings from a few places in addition to the settings explicitly defined by the `Project.settings` field.
These include plugins, global settings, and .sbt files.
The new `Project.autoSettings` method configures these sources: whether to include them for the project and in what order.
`Project.autoSettings` accepts a sequence of values of type `AddSettings`.
Instances of `AddSettings` are constructed from methods in the `AddSettings` companion object.
The configurable settings are per-user settings (from ~/.sbt, for example), settings from .sbt files, and plugin settings (project-level only).
The order in which these instances are provided to `autoSettings` determines the order in which they are appended to the settings explicitly provided in `Project.settings`.
For .sbt files, `AddSettings.defaultSbtFiles` adds the settings from all .sbt files in the project's base directory as usual.
The alternative method `AddSettings.sbtFiles` accepts a sequence of `Files` that will be loaded according to the standard .sbt format.
Relative files are resolved against the project's base directory.
Plugin settings may be included on a per-Plugin basis by using the `AddSettings.plugins` method and passing a `Plugin => Boolean`.
The settings controlled here are only the automatic per-project settings.
Per-build and global settings will always be included.
Settings that plugins require to be manually added still need to be added manually.
For example,
::
import AddSettings._
lazy val root = Project("root", file(".")) autoSettings(
userSettings, allPlugins, sbtFiles(file("explicit/a.txt"))
)
lazy val sub = Project("sub", file("Sub")) autoSettings(
defaultSbtFiles, plugins(includePlugin)
)
def includePlugin(p: Plugin): Boolean =
p.getClass.getName.startsWith("org.example.")
Resolving Scala dependencies
----------------------------
Scala dependencies (like scala-library and scala-compiler) are now resolved via the normal `update` task. This means:
1. Scala jars won't be copied to the boot directory, except for those needed to run sbt.
2. Scala SNAPSHOTs behave like normal SNAPSHOTs. In particular, running `update` will properly re-resolve the dynamic revision.
3. Scala jars are resolved using the same repositories and configuration as other dependencies.
4. Scala dependencies are not resolved via `update` when `scalaHome` is set, but are instead obtained from the configured directory.
5. The Scala version for sbt will still be resolved via the repositories configured for the launcher.
sbt still needs access to the compiler and its dependencies in order to run `compile`, `console`, and other Scala-based tasks. So, the Scala compiler jar and dependencies (like scala-reflect.jar and scala-library.jar) are defined and resolved in the `scala-tool` configuration (unless `scalaHome` is defined). By default, this configuration and the dependencies in it are automatically added by sbt. This occurs even when dependencies are configured in a `pom.xml` or `ivy.xml` and so it means that the version of Scala defined for your project must be resolvable by the resolvers configured for your project.
If you need to manually configure where sbt gets the Scala compiler and library used for compilation, the REPL, and other Scala tasks, do one of the following:
1. Set `scalaHome` to use the existing Scala jars in a specific directory. If `autoScalaLibrary` is true, the library jar found here will be added to the (unmanaged) classpath.
2. Set `managedScalaInstance := false` and explicitly define `scalaInstance`, which is of type `ScalaInstance`. This defines the compiler, library, and other jars comprising Scala. If `autoScalaLibrary` is true, the library jar from the defined `ScalaInstance` will be added to the (unmanaged) classpath.
The :doc:`/Detailed-Topics/Configuring-Scala` page provides full details.

File diff suppressed because it is too large Load Diff

View File

@ -1,288 +0,0 @@
=================
Community Plugins
=================
sbt Organization
================
The `sbt organization <http://github.com/sbt>`_ is available for use by any sbt plugin.
Developers who contribute their plugins into the community organization will still retain
control over their repository and its access. The goal of the sbt organization is to
organize sbt software into one central location.
A side benefit to using the sbt organization for projects is that you can use gh-pages to host websites in the http://scala-sbt.org domain.
Community Ivy Repository
========================
`Typesafe <http://www.typesafe.com>`_ has provided a freely available `Ivy Repository <http://repo.scala-sbt.org/scalasbt>`_ for sbt projects to use.
This ivy repository is mirrored from the freely available `Bintray service <http://bintray.com>`_. If you'd like to submit your plugin, please follow these instructions: `Bintray For Plugins <Bintray-For-Plugins.html>`_.
Available Plugins
=================
Please feel free to `submit a pull request <https://github.com/sbt/sbt/pulls>`_ that adds your plugin to the list.
Plugins for IDEs
~~~~~~~~~~~~~~~~
- IntelliJ IDEA
- sbt Plugin to generate IDEA project configuration:
https://github.com/mpeltonen/sbt-idea
- IDEA Plugin to embed an sbt Console into the IDE:
https://github.com/orfjackal/idea-sbt-plugin
- Netbeans: https://github.com/remeniuk/sbt-netbeans-plugin
- Eclipse: https://github.com/typesafehub/sbteclipse
- Sublime Text: https://github.com/orrsella/sbt-sublime
- Ensime: https://github.com/aemoncannon/ensime-sbt-cmd
- sbt-mode for Emacs: https://github.com/hvesalai/sbt-mode
- sbt-ctags (manage library dependency sources for vim, emacs, sublime)
https://github.com/kalmanb/sbt-ctags
Web Plugins
~~~~~~~~~~~
- xsbt-web-plugin: https://github.com/JamesEarlDouglas/xsbt-web-plugin
- xsbt-webstart: https://github.com/ritschwumm/xsbt-webstart
- sbt-appengine: https://github.com/sbt/sbt-appengine
- sbt-gwt-plugin: https://github.com/thunderklaus/sbt-gwt-plugin
- sbt-cloudbees-plugin:
https://github.com/timperrett/sbt-cloudbees-plugin
- sbt-jelastic-deploy: https://github.com/casualjim/sbt-jelastic-deploy
Test plugins
~~~~~~~~~~~~
- junit_xml_listener: https://github.com/ijuma/junit_xml_listener
- sbt-growl-plugin: https://github.com/softprops/sbt-growl-plugin
- sbt-teamcity-test-reporting-plugin:
https://github.com/guardian/sbt-teamcity-test-reporting-plugin
- xsbt-cucumber-plugin:
https://github.com/skipoleschris/xsbt-cucumber-plugin
- sbt-multi-jvm:
https://github.com/typesafehub/sbt-multi-jvm
- sbt-testng-interface:
https://github.com/sbt/sbt-testng-interface
- schoir (Distributed testing plugin):
https://github.com/typesafehub/schoir
Static Code Analysis plugins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- cpd4sbt: https://github.com/sbt/cpd4sbt (copy/paste detection,
works for Scala, too)
- findbugs4sbt: https://github.com/sbt/findbugs4sbt (FindBugs
only supports Java projects atm)
- scalastyle: https://github.com/scalastyle/scalastyle-sbt-plugin (Scalastyle - static code checker for Scala)
- sbt-stats: https://github.com/orrsella/sbt-stats (simple, extensible source code statistics)
One jar plugins
~~~~~~~~~~~~~~~
- sbt-assembly: https://github.com/sbt/sbt-assembly
- xsbt-proguard-plugin: https://github.com/adamw/xsbt-proguard-plugin
- sbt-deploy: https://github.com/reaktor/sbt-deploy
- sbt-appbundle (os x standalone): https://github.com/sbt/sbt-appbundle
- sbt-onejar (Packages your project using One-JAR™):
https://github.com/sbt/sbt-onejar
Frontend development plugins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- coffeescripted-sbt: https://github.com/softprops/coffeescripted-sbt
- less-sbt (for less-1.3.0): https://github.com/softprops/less-sbt
- sbt-less-plugin (it uses less-1.3.0):
https://github.com/btd/sbt-less-plugin
- sbt-emberjs: https://github.com/stefri/sbt-emberjs
- sbt-closure: https://github.com/eltimn/sbt-closure
- sbt-imagej: https://github.com/jpsacha/sbt-imagej
- sbt-yui-compressor: https://github.com/indrajitr/sbt-yui-compressor
- sbt-requirejs: https://github.com/scalatra/sbt-requirejs
- sbt-vaadin-plugin: https://github.com/henrikerola/sbt-vaadin-plugin
Game development plugins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- sbt-lwjgl-plugin (Light Weight Java Game Library): https://github.com/philcali/sbt-lwjgl-plugin
- sbt-scage-plugin (Scala Game Engine): https://github.com/mvallerie/sbt-scage-plugin
Release plugins
~~~~~~~~~~~~~~~
- sbt-aether-plugin (Published artifacts using Sonatype Aether):
https://github.com/arktekk/sbt-aether-deploy
- posterous-sbt: https://github.com/n8han/posterous-sbt
- sbt-signer-plugin: https://github.com/rossabaker/sbt-signer-plugin
- sbt-izpack (generates IzPack an installer):
http://software.clapper.org/sbt-izpack/
- sbt-ghpages-plugin (publishes generated site and api):
https://github.com/jsuereth/xsbt-ghpages-plugin
- sbt-pgp (PGP signing plugin, can generate keys too):
https://github.com/sbt/sbt-pgp
- sbt-release (customizable release process):
https://github.com/sbt/sbt-release
- sbt-unique-version (emulates unique snapshots):
https://github.com/sbt/sbt-unique-version
- sbt-install4j: https://github.com/jpsacha/sbt-install4j
- sbt-pack (generates packages with dependent jars and launch scripts):
https://github.com/xerial/sbt-pack
- sbt-start-script:
https://github.com/sbt/sbt-start-script
- sbt-native-packager:
https://github.com/sbt/sbt-native-packager
- sbt-sonatype-plugin (releases to Sonatype Nexus repository)
https://github.com/xerial/sbt-sonatype
- xitrum-package (collects dependency .jar files for standalone Scala programs):
https://github.com/ngocdaothanh/xitrum-package
System plugins
~~~~~~~~~~~~~~
- sbt-sh (executes shell commands):
https://github.com/steppenwells/sbt-sh
- cronish-sbt (interval sbt / shell command execution):
https://github.com/philcali/cronish-sbt
- git (executes git commands): https://github.com/sbt/sbt-git
- svn (execute svn commands): https://github.com/xuwei-k/sbtsvn
- sbt-groll (sbt plugin to navigate the Git history):
https://github.com/sbt/sbt-groll
- sbt-twt (twitter processor for sbt):
https://github.com/sbt/sbt-twt
Code generator plugins
~~~~~~~~~~~~~~~~~~~~~~
- sbt-scalabuff (Google Protocol Buffers with native scala suppport thru ScalaBuff):
https://github.com/sbt/sbt-scalabuff
- sbt-fmpp (FreeMarker Scala/Java Templating):
https://github.com/sbt/sbt-fmpp
- sbt-scalaxb (XSD and WSDL binding):
https://github.com/eed3si9n/scalaxb
- sbt-protobuf (Google Protocol Buffers):
https://github.com/sbt/sbt-protobuf
- sbt-avro (Apache Avro): https://github.com/cavorite/sbt-avro
- sbt-xjc (XSD binding, using `JAXB XJC <http://download.oracle.com/javase/6/docs/technotes/tools/share/xjc.html>`_):
https://github.com/sbt/sbt-xjc
- xsbt-scalate-generate (Generate/Precompile Scalate Templates):
https://github.com/backchatio/xsbt-scalate-generate
- sbt-antlr (Generate Java source code based on ANTLR3 grammars):
https://github.com/stefri/sbt-antlr
- sbt-antlr4 (Antlr4 runner for generating Java source code):
https://github.com/ihji/sbt-antlr4
- xsbt-reflect (Generate Scala source code for project name and
version): https://github.com/ritschwumm/xsbt-reflect
- sbt-buildinfo (Generate Scala source for any settings):
https://github.com/sbt/sbt-buildinfo
- lifty (Brings scaffolding to sbt): https://github.com/lifty/lifty
- sbt-thrift (Thrift Code Generation):
https://github.com/bigtoast/sbt-thrift
- xsbt-hginfo (Generate Scala source code for Mercurial repository
information): https://bitbucket.org/lukas\_pustina/xsbt-hginfo
- sbt-scalashim (Generate Scala shim like `sys.error`):
https://github.com/sbt/sbt-scalashim
- sbtend (Generate Java source code from
`xtend <http://www.eclipse.org/xtend/>`_ ):
https://github.com/xuwei-k/sbtend
- sbt-boilerplate (generating scala.Tuple/Function related boilerplate code):
https://github.com/sbt/sbt-boilerplate
- sbt-fxml (Generates controller classes for JavaFX FXML files): https://bitbucket.org/phdoerfler/sbt-fxml
Database plugins
~~~~~~~~~~~~~~~~
- flyway-sbt (Flyway - The agile database migration framework):
http://flywaydb.org/getstarted/firststeps/sbt.html
- sbt-liquibase (Liquibase RDBMS database migrations):
https://github.com/bigtoast/sbt-liquibase
- sbt-dbdeploy (dbdeploy, a database change management tool):
https://github.com/mr-ken/sbt-dbdeploy
Documentation plugins
~~~~~~~~~~~~~~~~~~~~~
- sbt-lwm (Convert lightweight markup files, e.g., Markdown and
Textile, to HTML): http://software.clapper.org/sbt-lwm/
- sbt-site (Site generation for SBT):
https://github.com/sbt/sbt-site
- Laika (Template-based site generation, Markdown, reStructuredText,
no external tools): http://planet42.github.io/Laika/
- literator-plugin (Converts sources into markdown documents):
https://github.com/laughedelic/literator
Library dependency plugins
~~~~~~~~~~~~~~~~~~~~~~~~~~
- ls-sbt (An sbt interface for ls.implicit.ly):
https://github.com/softprops/ls
- sbt-dependency-graph (Creates a graphml file of the dependency tree):
https://github.com/jrudolph/sbt-dependency-graph
- sbt-dirty-money (Cleans Ivy2 cache):
https://github.com/sbt/sbt-dirty-money
- sbt-updates (Checks Maven repos for dependency updates):
https://github.com/rtimush/sbt-updates
Utility plugins
~~~~~~~~~~~~~~~
- jot (Write down your ideas lest you forget them)
https://github.com/softprops/jot
- np (Dead simple new project directory generation):
https://github.com/softprops/np
- npt (Creates new project skeletons based on templates):
https://github.com/reikje/npt
- sbt-editsource (A poor man's *sed*\ (1), for sbt):
http://software.clapper.org/sbt-editsource/
- sbt-cross-building (Simplifies building your plugins for multiple
versions of sbt): https://github.com/jrudolph/sbt-cross-building
- sbt-revolver (Triggered restart, hot reloading):
https://github.com/spray/sbt-revolver
- sbt-scalaedit (Open and upgrade ScalaEdit (text editor)):
https://github.com/kjellwinblad/sbt-scalaedit-plugin
- sbt-man (Looks up scaladoc): https://github.com/sbt/sbt-man
- sbt-taglist (Looks for TODO-tags in the sources):
https://github.com/johanandren/sbt-taglist
- migration-manager:
https://github.com/typesafehub/migration-manager
- sbt-scalariform (adding support for source code formatting using Scalariform):
https://github.com/sbt/sbt-scalariform
- sbt-aspectj:
https://github.com/sbt/sbt-aspectj
- sbt-properties:
https://github.com/sbt/sbt-properties
- sbt-multi-publish (publish to more than one repository simultaneously):
https://github.com/davidharcombe/sbt-multi-publish
- sbt-about-plugins (shows some details about plugins loaded):
https://github.com/jozic/sbt-about-plugins
- sbt-one-log (make Log dependency easy):
https://github.com/zavakid/sbt-one-log
Code coverage plugins
~~~~~~~~~~~~~~~~~~~~~
- sbt-scct: https://github.com/dvc94ch/sbt-scct
- sbt-scoverage: https://github.com/scoverage/sbt-scoverage
- jacoco4sbt: https://github.com/sbt/jacoco4sbt
- xsbt-coveralls-plugin: https://github.com/theon/xsbt-coveralls-plugin
Android plugin
~~~~~~~~~~~~~~
- android-plugin: https://github.com/jberkel/android-plugin
- android-sdk-plugin: https://github.com/pfn/android-sdk-plugin
Build interoperability plugins
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- ant4sbt: https://github.com/sbt/ant4sbt
OSGi plugin
~~~~~~~~~~~
- sbtosgi: https://github.com/typesafehub/sbtosgi
Plugin bundles
~~~~~~~~~~~~~~
- tl-os-sbt-plugins (Version, Release, and Package Management, Play 2.0 and Git utilities) :
https://github.com/trafficland/tl-os-sbt-plugins

View File

@ -1,26 +0,0 @@
=======
Credits
=======
See the `sbt contributors on GitHub <https://github.com/sbt/sbt/graphs/contributors>`_ and `sbt GitHub organization members <https://github.com/sbt?tab=members>`_.
Additionally, these people have contributed ideas, documentation, or code to sbt but are not recorded in either of the above:
- Josh Cough
- Nolan Darilek
- Nathan Hamblen
- Ismael Juma
- Viktor Klang
- David R. MacIver
- Ross McDonald
- Andrew O'Malley
- Jorge Ortiz
- Mikko Peltonen
- Ray Racine
- Stuart Roebuck
- Harshad RJ
- Tony Sloane
- Seth Tisue
- Francisco Treacy
- Vesa Vilhonen

View File

@ -1,26 +0,0 @@
==============
Nightly Builds
==============
The latest development versions of |version| are available as nightly builds on |typesafe-snapshots|_.
To use a nightly build, the instructions are the same for :doc:`normal manual setup </Getting-Started/Setup>` except:
1. Download the launcher jar from one of the subdirectories of |nightly-launcher|.
They should be listed in chronological order, so the most recent one will be last.
2. The version number is the name of the subdirectory and is of the form
`|version|.x-yyyyMMdd-HHmmss`. Use this in a `build.properties` file.
3. Call your script something like `sbt-nightly` to retain access to a
stable `sbt` launcher. The documentation will refer to the script as `sbt`, however.
Related to the third point, remember that an `sbt.version` setting in
`<build-base>/project/build.properties` determines the version of sbt
to use in a project. If it is not present, the default version
associated with the launcher is used. This means that you must set
`sbt.version=yyyyMMdd-HHmmss` in an existing
`<build-base>/project/build.properties`. You can verify the right
version of sbt is being used to build a project by running
`about`.
To reduce problems, it is recommended to not use a launcher jar for one
nightly version to launch a different nightly version of sbt.

View File

@ -1,93 +0,0 @@
=============
Opportunities
=============
Below is a running list of potential areas of contribution. This list
may become out of date quickly, so you may want to check on the `sbt-dev mailing list`_
if you are interested in a specific topic.
1. There are plenty of possible visualization and analysis
opportunities.
- 'compile' produces an Analysis of the source code containing
- Source dependencies
- Inter-project source dependencies
- Binary dependencies (jars + class files)
- data structure representing the
`API <https://github.com/sbt/sbt/tree/0.13/interface>`_ of
the source code There is some code already for generating dot
files that isn't hooked up, but graphing dependencies and
inheritance relationships is a general area of work.
- 'update' produces an :doc:`/Detailed-Topics/Update-Report` mapping
`Configuration/ModuleID/Artifact` to the retrieved `File`
- Ivy produces more detailed XML reports on dependencies. These come
with an XSL stylesheet to view them, but this does not scale to
large numbers of dependencies. Working on this is pretty
straightforward: the XML files are created in `~/.ivy2` and the
`.xsl` and `.css` are there as well, so you don't even need to
work with sbt. Other approaches described in `the email
thread <https://groups.google.com/group/simple-build-tool/browse_thread/thread/7761f8b2ce51f02c/129064ea836c9baf>`_
- Tasks are a combination of static and dynamic graphs and it would
be useful to view the graph of a run
- Settings are a static graph and there is code to generate the dot
files, but isn't hooked up anywhere.
2. There is support for dependencies on external projects, like on
GitHub. To be more useful, this should support being able to update
the dependencies. It is also easy to extend this to other ways of
retrieving projects. Support for svn and hg was a recent
contribution, for example.
3. Dependency management: see `adept`_
4. If you like parsers, sbt commands and input tasks are written using
custom parser combinators that provide tab completion and error
handling. Among other things, the efficiency could be improved.
5. The javap task hasn't been reintegrated
6. Implement enhanced 0.11-style warn/debug/info/error/trace commands.
Currently, you set it like any other setting:
.. code-block:: console
set logLevel := Level.Warn
or
set logLevel in Test := Level.Warn
You could make commands that wrap this, like:
.. code-block:: console
warn test:run
Also, trace is currently an integer, but should really be an abstract
data type.
7. Each sbt version has more aggressive incremental compilation and
reproducing bugs can be difficult. It would be helpful to
have a mode that generates a diff between successive
compilations and records the options passed to scalac. This could be
replayed or inspected to try to find the cause.
Documentation
=============
1. There's a lot to do with this documentation. If you check it out from
git, there's a directory called Dormant with some content that needs
going through.
2. the :doc:`main </index>` page mentions external project references (e.g. to a git
repo) but doesn't have anything to link to that explains how to use
those.
3. API docs are much needed.
4. Find useful answers or types/methods/values in the other docs, and
pull references to them up into :doc:`/faq` or :doc:`/Name-Index` so people can
find the docs. In general the :doc:`/faq` should feel a bit more like a
bunch of pointers into the regular docs, rather than an alternative
to the docs.
5. A lot of the pages could probably have better names, and/or little
2-4 word blurbs to the right of them in the sidebar.

View File

@ -1,21 +0,0 @@
=============================
Community Repository Policy
=============================
The community repository has the following guideline for artifacts
published to it:
1. All published artifacts are the authors own work or have an
appropriate license which grants distribution rights.
2. All published artifacts come from open source projects, that have an
open patch acceptance policy.
3. All published artifacts are placed under an organization in a DNS
domain for which you have the permission to use or are an owner
(scala-sbt.org is available for sbt plugins).
4. All published artifacts are signed by a committer of the project
(coming soon).

View File

@ -1,276 +0,0 @@
=======================
Deploying to Sonatype
=======================
Deploying to sonatype is easy! Just follow these simple steps:
First - PGP Signatures
----------------------
You'll need to PGP sign your artifacts for the Sonatype repository.
Don't worry, there's a `plugin for that <http://scala-sbt.org/sbt-pgp>`_.
Follow the instructions for the plugin and you'll have PGP signed artifacts in no time.
*Note: The plugin is a jvm-only solution to generate PGP keys and sign
artifacts. It can work with the GPG command line tool, but the command
line is not needed.*
If your PGP key has not yet been distributed to the keyserver pool, i.e.,
you've just generated it, you'll need to publish it. You can do so using
the `sbt-pgp <http://scala-sbt.org/sbt-pgp>`_ plugin:
::
pgp-cmd send-key keyname hkp://pool.sks-keyservers.net/
(where keyname is the name, email address used when creating the key or
hexadecimal identifier for the key.)
If you see no output from sbt-pgp then the key name specified was not found.
Second - Maven Publishing Settings
----------------------------------
To publish to a maven repository, you'll need to configure a few
settings so that the correct metadata is generated.
::
publishMavenStyle := true
is used to ensure POMs are generated and pushed. Next, you have to set
up the repositories you wish to push too. Luckily, Sonatype's OSSRH uses
the same URLs for everyone:
::
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (isSnapshot.value)
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
}
Another good idea is to not publish your test artifacts (this is the default):
::
publishArtifact in Test := false
Third - POM Metadata
--------------------
Now, we want to control what's available in the `pom.xml` file. This
file describes our project in the maven repository and is used by
indexing services for search and discover. This means it's important
that `pom.xml` should have all information we wish to advertise as
well as required info!
First, let's make sure no repositories show up in the POM file. To
publish on maven-central, all *required* artifacts must also be hosted
on maven central. However, sometimes we have optional dependencies for
special features. If that's the case, let's remove the repositories for
optional dependencies in our artifact:
::
pomIncludeRepository := { _ => false }
Next, the POM metadata that isn't generated by sbt must be added. This
is done through the :key:`pomExtra` configuration option:
::
pomExtra := (
<url>http://jsuereth.com/scala-arm</url>
<licenses>
<license>
<name>BSD-style</name>
<url>http://www.opensource.org/licenses/bsd-license.php</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<url>git@github.com:jsuereth/scala-arm.git</url>
<connection>scm:git:git@github.com:jsuereth/scala-arm.git</connection>
</scm>
<developers>
<developer>
<id>jsuereth</id>
<name>Josh Suereth</name>
<url>http://jsuereth.com</url>
</developer>
</developers>)
Specifically, the `url`, `license`, `scm.url`, `scm.connection`
and `developer` sections are required. The above is an example from
the `scala-arm <http://jsuereth.com/scala-arm>`_ project.
*Note* that sbt will automatically inject `licenses` and `url` nodes
if they are already present in your build file. Thus an alternative to
the above :key:`pomExtra` is to include the following entries:
::
licenses := Seq("BSD-style" -> url("http://www.opensource.org/licenses/bsd-license.php"))
homepage := Some(url("http://jsuereth.com/scala-arm"))
This might be advantageous if those keys are used also by other plugins
(e.g. `ls`). You **cannot use both** the sbt :key:`licenses` key and the
`licenses` section in :key:`pomExtra` at the same time, as this will
produce duplicate entries in the final POM file, leading to a rejection
in Sonatype's staging process.
*The full format of a pom.xml file is `outlined
here <http://maven.apache.org/pom.html>`_.*
Fourth - Adding credentials
---------------------------
The credentials for your Sonatype OSSRH account need to be added
somewhere. Common convention is a :sublit:`|globalBase|/sonatype.sbt` file with the
following:
::
credentials += Credentials("Sonatype Nexus Repository Manager",
"oss.sonatype.org",
"<your username>",
"<your password>")
*Note: The first two strings must be
`"Sonatype Nexus Repository Manager"` and `"oss.sonatype.org"` for
Ivy to use the credentials.*
Finally - Publish
-----------------
In sbt, run `publishSigned` and you should see something like the following:
.. code-block:: console
> publishSigned
Please enter your PGP passphrase> ***********
[info] Packaging /home/josh/projects/typesafe/scala-arm/target/scala-2.9.1/scala-arm_2.9.1-1.2.jar ...
[info] Wrote /home/josh/projects/typesafe/scala-arm/target/scala-2.9.1/scala-arm_2.9.1-1.2.pom
[info] Packaging /home/josh/projects/typesafe/scala-arm/target/scala-2.9.1/scala-arm_2.9.1-1.2-javadoc.jar ...
[info] Packaging /home/josh/projects/typesafe/scala-arm/target/scala-2.9.1/scala-arm_2.9.1-1.2-sources.jar ...
[info] :: delivering :: com.jsuereth#scala-arm_2.9.1;1.2 :: 1.2 :: release :: Mon Jan 23 13:16:57 EST 2012
[info] Done packaging.
[info] Done packaging.
[info] Done packaging.
[info] delivering ivy file to /home/josh/projects/typesafe/scala-arm/target/scala-2.9.1/ivy-1.2.xml
[info] published scala-arm_2.9.1 to https://oss.sonatype.org/service/local/staging/deploy/maven2/com/jsuereth/scala-arm_2.9.1/1.2/scala-arm_2.9.1-1.2-sources.jar
[info] published scala-arm_2.9.1 to https://oss.sonatype.org/service/local/staging/deploy/maven2/com/jsuereth/scala-arm_2.9.1/1.2/scala-arm_2.9.1-1.2-javadoc.jar.asc
[info] published scala-arm_2.9.1 to https://oss.sonatype.org/service/local/staging/deploy/maven2/com/jsuereth/scala-arm_2.9.1/1.2/scala-arm_2.9.1-1.2-sources.jar.asc
[info] published scala-arm_2.9.1 to https://oss.sonatype.org/service/local/staging/deploy/maven2/com/jsuereth/scala-arm_2.9.1/1.2/scala-arm_2.9.1-1.2.jar
[info] published scala-arm_2.9.1 to https://oss.sonatype.org/service/local/staging/deploy/maven2/com/jsuereth/scala-arm_2.9.1/1.2/scala-arm_2.9.1-1.2.jar.asc
[info] published scala-arm_2.9.1 to https://oss.sonatype.org/service/local/staging/deploy/maven2/com/jsuereth/scala-arm_2.9.1/1.2/scala-arm_2.9.1-1.2.pom.asc
[info] published scala-arm_2.9.1 to https://oss.sonatype.org/service/local/staging/deploy/maven2/com/jsuereth/scala-arm_2.9.1/1.2/scala-arm_2.9.1-1.2.pom
[info] published scala-arm_2.9.1 to https://oss.sonatype.org/service/local/staging/deploy/maven2/com/jsuereth/scala-arm_2.9.1/1.2/scala-arm_2.9.1-1.2-javadoc.jar
[success] Total time: 9 s, completed Jan 23, 2012 1:17:03 PM
After publishing you have to follow the `Release workflow of
nexus <https://docs.sonatype.org/display/Repository/Sonatype+OSS+Maven+Repository+Usage+Guide#SonatypeOSSMavenRepositoryUsageGuide-8.ReleaseIt>`_.
`sbt-sonatype plugin <https://github.com/xerial/sbt-sonatype>`_ allows the
release workflow procedures to be performed directly from sbt.
*Note: Staged releases allow testing across large projects of
independent releases before pushing the full project.*
\ *Note:* An error message of
`PGPException: checksum mismatch at 0 of 20` indicates that you got
the passphrase wrong. We have found at least on OS X that there may be
issues with characters outside the 7-bit ASCII range (e.g. Umlauts). If
you are absolutely sure that you typed the right phrase and the error
doesn't disappear, try changing the passphrase.
Summary
-------
To get your project hosted on Sonatype (and Maven Central), you will
need to:
- Have GPG key pair, with published public key,
- An sbt file with your Sonatype credentials *that is not pushed to the VCS*,
- Add the `sbt-pgp plugin <http://scala-sbt.org/sbt-pgp>`_ to sign the artefacts,
- Modify `build.sbt` with the required elements in the generated POM.
Starting with a project that is not being published, you'll need to
install GPG, generate and publish your key. Swtiching to sbt, you'll
then need to:
~/.sbt/sonatype.sbt
^^^^^^^^^^^^^^^^^^^
This file (kept *outside the VCS*) contains the Sonatype credentials
settings:
::
credentials += Credentials("Sonatype Nexus Repository Manager",
"oss.sonatype.org",
"your-sonatype-username",
"your-sonatype-password")
~/.sbt/plugins/gpg.sbt
^^^^^^^^^^^^^^^^^^^^^^
The `sbt-pgp plugin <http://scala-sbt.org/sbt-pgp>`_ allows you to
sign and publish your artefacts by running `publishSigned` in sbt:
::
addSbtPlugin("com.typesafe.sbt" % "sbt-pgp" % "0.8")
build.sbt
^^^^^^^^^
Finally, you'll need to tweak the generated POM in your `build.sbt`.
The tweaks include specifying the project's authors, URL, SCM and many
others:
::
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (isSnapshot.value)
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
}
publishMavenStyle := true
publishArtifact in Test := false
pomIncludeRepository := { _ => false }
pomExtra := (
<url>http://your.project.url</url>
<licenses>
<license>
<name>BSD-style</name>
<url>http://www.opensource.org/licenses/bsd-license.php</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<url>git@github.com:your-account/your-project.git</url>
<connection>scm:git:git@github.com:your-account/your-project.git</connection>
</scm>
<developers>
<developer>
<id>you</id>
<name>Your Name</name>
<url>http://your.url</url>
</developer>
</developers>
)
.. |globalBase| replace:: ~/.sbt/|version|

Binary file not shown.

Before

Width:  |  Height:  |  Size: 106 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 95 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 182 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 57 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 182 KiB

View File

@ -1,18 +0,0 @@
=========
Community
=========
This part of the documentation has project "meta-information" such as where to
find source code and how to contribute.
See :ref:`Getting Help <getting-help>` for information on sbt on StackOverflow, the development mailing list, and irc.
.. toctree::
:maxdepth: 1
Credits
Community-Plugins
Using-Sonatype
Nightly-Builds
Changes
Opportunities

View File

@ -1,20 +0,0 @@
==========
Advanced
==========
This part of the documentation has pages documenting particular sbt topics in detail.
Before reading anything in here, you will need the information in the
:doc:`Getting Started Guide </Getting-Started/Welcome>` as a foundation.
.. toctree::
:maxdepth: 2
Launcher
Scripts
TaskInputs
Understanding-incremental-recompilation
/Extending/Build-Loaders
/Extending/Command-Line-Applications
/Extending/Settings-Core
Migrating-from-sbt-0.7.x-to-0.10.x

View File

@ -1,207 +0,0 @@
=========
Artifacts
=========
Selecting default artifacts
===========================
By default, the published artifacts are the main binary jar, a jar
containing the main sources and resources, and a jar containing the API
documentation. You can add artifacts for the test classes, sources, or
API or you can disable some of the main artifacts.
To add all test artifacts:
::
publishArtifact in Test := true
To add them individually:
::
// enable publishing the jar produced by `test:package`
publishArtifact in (Test, packageBin) := true
// enable publishing the test API jar
publishArtifact in (Test, packageDoc) := true
// enable publishing the test sources jar
publishArtifact in (Test, packageSrc) := true
To disable main artifacts individually:
::
// disable publishing the main jar produced by `package`
publishArtifact in (Compile, packageBin) := false
// disable publishing the main API jar
publishArtifact in (Compile, packageDoc) := false
// disable publishing the main sources jar
publishArtifact in (Compile, packageSrc) := false
Modifying default artifacts
===========================
Each built-in artifact has several configurable settings in addition to
:key:`publishArtifact`. The basic ones are :key:`artifact` (of type
`SettingKey[Artifact]`), :key:`mappings` (of type
`TaskKey[(File,String)]`), and :key:`artifactPath` (of type
`SettingKey[File]`). They are scoped by `(<config>, <task>)` as
indicated in the previous section.
To modify the type of the main artifact, for example:
::
artifact in (Compile, packageBin) := {
val previous: Artifact = (artifact in (Compile, packageBin)).value
previous.copy(`type` = "bundle")
}
The generated artifact name is determined by the :key:`artifactName`
setting. This setting is of type
`(ScalaVersion, ModuleID, Artifact) => String`. The ScalaVersion
argument provides the full Scala version String and the binary
compatible part of the version String. The String result is the name of
the file to produce. The default implementation is
`Artifact.artifactName _`. The function may be modified to produce
different local names for artifacts without affecting the published
name, which is determined by the :key:`artifact` definition combined with
the repository pattern.
For example, to produce a minimal name without a classifier or cross
path:
::
artifactName := { (sv: ScalaVersion, module: ModuleID, artifact: Artifact) =>
artifact.name + "-" + module.revision + "." + artifact.extension
}
(Note that in practice you rarely want to drop the classifier.)
Finally, you can get the `(Artifact, File)` pair for the artifact by
mapping the :key:`packagedArtifact` task. Note that if you don't need the
`Artifact`, you can get just the File from the package task
(:key:`package`, :key:`packageDoc`, or :key:`packageSrc`). In both cases,
mapping the task to get the file ensures that the artifact is generated
first and so the file is guaranteed to be up-to-date.
For example:
::
val myTask = taskKey[Unit]("My task.")
myTask := {
val (art, file) = packagedArtifact.in(Compile, packageBin).value
println("Artifact definition: " + art)
println("Packaged file: " + file.getAbsolutePath)
}
Defining custom artifacts
=========================
In addition to configuring the built-in artifacts, you can declare other
artifacts to publish. Multiple artifacts are allowed when using Ivy
metadata, but a Maven POM file only supports distinguishing artifacts
based on classifiers and these are not recorded in the POM.
Basic `Artifact` construction look like:
::
Artifact("name", "type", "extension")
Artifact("name", "classifier")
Artifact("name", url: URL)
Artifact("name", Map("extra1" -> "value1", "extra2" -> "value2"))
For example:
::
Artifact("myproject", "zip", "zip")
Artifact("myproject", "image", "jpg")
Artifact("myproject", "jdk15")
See the `Ivy
documentation <http://ant.apache.org/ivy/history/2.3.0/ivyfile/dependency-artifact.html>`_
for more details on artifacts. See the `Artifact
API <../../api/sbt/Artifact$.html>`_ for
combining the parameters above and specifying [Configurations] and extra
attributes.
To declare these artifacts for publishing, map them to the task that
generates the artifact:
::
val myImageTask = taskKey[File](...)
myImageTask := {
val artifact: File = makeArtifact(...)
artifact
}
addArtifact( Artifact("myproject", "image", "jpg"), myImageTask )
`addArtifact` returns a sequence of settings (wrapped in a
`SettingsDefinition <../../api/#sbt.Init$SettingsDefinition>`_).
In a full build configuration, usage looks like:
::
...
lazy val proj = Project(...)
.settings( addArtifact(...).settings : _* )
...
Publishing .war files
=====================
A common use case for web applications is to publish the `.war` file
instead of the `.jar` file.
::
// disable .jar publishing
publishArtifact in (Compile, packageBin) := false
// create an Artifact for publishing the .war file
artifact in (Compile, packageWar) := {
val previous: Artifact = (artifact in (Compile, packageWar)).value
previous.copy(`type` = "war", extension = "war")
}
// add the .war file to what gets published
addArtifact(artifact in (Compile, packageWar), packageWar)
Using dependencies with artifacts
=================================
To specify the artifacts to use from a dependency that has custom or
multiple artifacts, use the `artifacts` method on your dependencies.
For example:
::
libraryDependencies += "org" % "name" % "rev" artifacts(Artifact("name", "type", "ext"))
The `from` and `classifer` methods (described on the :doc:`Library Management <Library-Management>`
page) are actually convenience methods that translate to `artifacts`:
::
def from(url: String) = artifacts( Artifact(name, new URL(url)) )
def classifier(c: String) = artifacts( Artifact(name, c) )
That is, the following two dependency declarations are equivalent:
::
libraryDependencies += "org.testng" % "testng" % "5.7" classifier "jdk15"
libraryDependencies += "org.testng" % "testng" % "5.7" artifacts(Artifact("testng", "jdk15") )

View File

@ -1,166 +0,0 @@
======================
General Best Practices
======================
This page describes best practices for working with sbt.
.. _global-vs-local-plugins:
`project/` vs. `~/.sbt/`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Anything that is necessary for building the project should go in
`project/`. This includes things like the web plugin. `~/.sbt/`
should contain local customizations and commands for working with a
build, but are not necessary. An example is an IDE plugin.
Local settings
~~~~~~~~~~~~~~
There are two options for settings that are specific to a user. An
example of such a setting is inserting the local Maven repository at the
beginning of the resolvers list:
::
resolvers := {
val localMaven = "Local Maven Repository" at "file://"+Path.userHome.absolutePath+"/.m2/repository"
localMaven +: resolvers.value
}
1. Put settings specific to a user in a global `.sbt` file, such as
`~/.sbt/local.sbt`. These settings will be applied to all projects.
2. Put settings in a `.sbt` file in a project that isn't checked into
version control, such as `<project>/local.sbt`. sbt combines the
settings from multiple `.sbt` files, so you can still have the
standard `<project>/build.sbt` and check that into version control.
.sbtrc
~~~~~~
Put commands to be executed when sbt starts up in a `.sbtrc` file, one
per line. These commands run before a project is loaded and are useful
for defining aliases, for example. sbt executes commands in
`$HOME/.sbtrc` (if it exists) and then `<project>/.sbtrc` (if it
exists).
Generated files
~~~~~~~~~~~~~~~
Write any generated files to a subdirectory of the output directory,
which is specified by the :key:`target` setting. This makes it easy to
clean up after a build and provides a single location to organize
generated files. Any generated files that are specific to a Scala
version should go in :key:`crossTarget` for efficient cross-building.
For generating sources and resources, see :doc:`/Howto/generatefiles`.
Don't hard code
~~~~~~~~~~~~~~~
Don't hard code constants, like the output directory `target/`. This
is especially important for plugins. A user might change the :key:`target`
setting to point to `build/`, for example, and the plugin needs to
respect that. Instead, use the setting, like:
::
myDirectory := target.value / "sub-directory"
Don't "mutate" files
~~~~~~~~~~~~~~~~~~~~
A build naturally consists of a lot of file manipulation. How can we
reconcile this with the task system, which otherwise helps us avoid
mutable state? One approach, which is the recommended approach and the
approach used by sbt's default tasks, is to only write to any given file
once and only from a single task.
A build product (or by-product) should be written exactly once by only
one task. The task should then, at a minimum, provide the Files created
as its result. Another task that wants to use Files should map the task,
simultaneously obtaining the File reference and ensuring that the task
has run (and thus the file is constructed). Obviously you cannot do much
about the user or other processes modifying the files, but you can make
the I/O that is under the build's control more predictable by treating
file contents as immutable at the level of Tasks.
For example:
::
lazy val makeFile = taskKey[File]("Creates a file with some content.")
// define a task that creates a file,
// writes some content, and returns the File
makeFile := {
val f: File = file("/tmp/data.txt")
IO.write(f, "Some content")
f
}
// The result of makeFile is the constructed File,
// so useFile can map makeFile and simultaneously
// get the File and declare the dependency on makeFile
useFile :=
doSomething( makeFile.value )
This arrangement is not always possible, but it should be the rule and
not the exception.
Use absolute paths
~~~~~~~~~~~~~~~~~~
Construct only absolute Files. Either specify an absolute path
::
file("/home/user/A.scala")
or construct the file from an absolute base:
::
base / "A.scala"
This is related to the no hard coding best practice because the proper
way involves referencing the :key:`baseDirectory` setting. For example, the
following defines the myPath setting to be the `<base>/licenses/`
directory.
::
myPath := baseDirectory.value / "licenses"
In Java (and thus in Scala), a relative File is relative to the current
working directory. The working directory is not always the same as the
build root directory for a number of reasons.
The only exception to this rule is when specifying the base directory
for a Project. Here, sbt will resolve a relative File against the build
root directory for you for convenience.
Parser combinators
~~~~~~~~~~~~~~~~~~
1. Use `token` everywhere to clearly delimit tab completion
boundaries.
2. Don't overlap or nest tokens. The behavior here is unspecified and
will likely generate an error in the future.
3. Use `flatMap` for general recursion. sbt's combinators are strict
to limit the number of classes generated, so use `flatMap` like:
.. code-block:: scala
lazy val parser: Parser[Int] =
token(IntBasic) flatMap { i =>
if(i <= 0)
success(i)
else
token(Space ~> parser)
}
This example defines a parser a whitespace-delimited list of
integers, ending with a negative number, and returning that final,
negative number.

View File

@ -1,153 +0,0 @@
==================================
Classpaths, sources, and resources
==================================
This page discusses how sbt builds up classpaths for different actions,
like :key:`compile`, :key:`run`, and :key:`test` and how to override or augment
these classpaths.
Basics
======
In sbt 0.10 and later, classpaths now include the Scala library and
(when declared as a dependency) the Scala compiler. Classpath-related
settings and tasks typically provide a value of type `Classpath`. This
is an alias for `Seq[Attributed[File]]`.
`Attributed <../../api/sbt/Attributed.html>`_
is a type that associates a heterogeneous map with each classpath entry.
Currently, this allows sbt to associate the `Analysis` resulting from
compilation with the corresponding classpath entry and for managed
entries, the `ModuleID` and `Artifact` that defined the dependency.
To explicitly extract the raw `Seq[File]`, use the `files` method
implicitly added to `Classpath`:
::
val cp: Classpath = ...
val raw: Seq[File] = cp.files
To create a `Classpath` from a `Seq[File]`, use `classpath` and to
create an `Attributed[File]` from a `File`, use
`Attributed.blank`:
::
val raw: Seq[File] = ...
val cp: Classpath = raw.classpath
val rawFile: File = ..
val af: Attributed[File] = Attributed.blank(rawFile)
Unmanaged v. managed
--------------------
Classpaths, sources, and resources are separated into two main
categories: unmanaged and managed. Unmanaged files are manually created
files that are outside of the control of the build. They are the inputs
to the build. Managed files are under the control of the build. These
include generated sources and resources as well as resolved and
retrieved dependencies and compiled classes.
Tasks that produce managed files should be inserted as follows:
::
sourceGenerators in Compile +=
generate( (sourceManaged in Compile).value / "some_directory")
In this example, `generate` is some function of type
`File => Seq[File]` that actually does the work. So, we are appending a new task
to the list of main source generators (`sourceGenerators in Compile`).
To insert a named task, which is the better approach for plugins:
::
val mySourceGenerator = taskKey[Seq[File]](...)
mySourceGenerator in Compile :=
generate( (sourceManaged in Compile).value / "some_directory")
sourceGenerators in Compile += (mySourceGenerator in Compile).task
The `task` method is used to refer to the actual task instead of the
result of the task.
For resources, there are similar keys :key:`resourceGenerators` and
:key:`resourceManaged`.
Excluding source files by name
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The project base directory is by default a source directory in addition
to `src/main/scala`. You can exclude source files by name
(`butler.scala` in the example below) like:
::
excludeFilter in unmanagedSources := "butler.scala"
Read more on `How to exclude .scala source file in project folder -
Google
Groups <http://groups.google.com/group/simple-build-tool/browse_thread/thread/cd5332a164405568?hl=en>`_
External v. internal
--------------------
Classpaths are also divided into internal and external dependencies. The
internal dependencies are inter-project dependencies. These effectively
put the outputs of one project on the classpath of another project.
External classpaths are the union of the unmanaged and managed
classpaths.
Keys
----
For classpaths, the relevant keys are:
- :key:`unmanagedClasspath`
- :key:`managedClasspath`
- :key:`externalDependencyClasspath`
- :key:`internalDependencyClasspath`
For sources:
- :key:`unmanagedSources` These are by default built up from
:key:`unmanagedSourceDirectories`, which consists of :key:`scalaSource`
and :key:`javaSource`.
- :key:`managedSources` These are generated sources.
- :key:`sources` Combines :key:`managedSources` and :key:`unmanagedSources`.
- :key:`sourceGenerators` These are tasks that generate source files.
Typically, these tasks will put sources in the directory provided by
:key:`sourceManaged`.
For resources
- :key:`unmanagedResources` These are by default built up from
:key:`unmanagedResourceDirectories`, which by default is
:key:`resourceDirectory`, excluding files matched by
:key:`defaultExcludes`.
- :key:`managedResources` By default, this is empty for standard
projects. sbt plugins will have a generated descriptor file here.
- :key:`resourceGenerators` These are tasks that generate resource files.
Typically, these tasks will put resources in the directory provided
by :key:`resourceManaged`.
Use the :doc:`inspect command </Detailed-Topics/Inspecting-Settings>` for more details.
See also a related `StackOverflow
answer <http://stackoverflow.com/a/7862872/850196>`_.
Example
-------
You have a standalone project which uses a library that loads
xxx.properties from classpath at run time. You put xxx.properties inside
directory "config". When you run "sbt run", you want the directory to be
in classpath.
::
unmanagedClasspath in Runtime += baseDirectory.value / "config"

View File

@ -1,16 +0,0 @@
=========
Using sbt
=========
This part of the documentation has pages documenting particular sbt topics in detail.
Before reading anything in here, you will need the information in the
:doc:`Getting Started Guide </Getting-Started/Welcome>` as a foundation.
.. toctree::
:maxdepth: 2
Command-Line-Reference
Console-Project
Cross-Build
Inspecting-Settings
Triggered-Execution

View File

@ -1,232 +0,0 @@
======================
Command Line Reference
======================
This page is a relatively complete list of command line options,
commands, and tasks you can use from the sbt interactive prompt or in
batch mode. See :doc:`Running </Getting-Started/Running>` in the Getting
Started Guide for an intro to the basics, while this page has a lot more
detail.
Notes on the command line
-------------------------
- There is a technical distinction in sbt between *tasks*, which are
"inside" the build definition, and *commands*, which manipulate the
build definition itself. If you're interested in creating a command,
see :doc:`/Extending/Commands`. This specific sbt meaning of "command" means
there's no good general term for "thing you can type at the sbt
prompt", which may be a setting, task, or command.
- Some tasks produce useful values. The `toString` representation of
these values can be shown using `show <task>` to run the task
instead of just `<task>`.
- In a multi-project build, execution dependencies and the
:key:`aggregate` setting control which tasks from which projects are
executed. See :doc:`multi-project builds </Getting-Started/Multi-Project>`.
Project-level tasks
-------------------
- :key:`clean` Deletes all generated files (the :key:`target` directory).
- :key:`publishLocal` Publishes artifacts (such as jars) to the local Ivy
repository as described in :doc:`Publishing`.
- :key:`publish` Publishes artifacts (such as jars) to the repository
defined by the :key:`publishTo` setting, described in :doc:`Publishing`.
- :key:`update` Resolves and retrieves external dependencies as described
in :doc:`library dependencies </Getting-Started/Library-Dependencies>`.
Configuration-level tasks
-------------------------
Configuration-level tasks are tasks associated with a configuration. For
example, :key:`compile`, which is equivalent to `compile:compile`,
compiles the main source code (the `compile` configuration).
`test:compile` compiles the test source code (test `test`
configuration). Most tasks for the `compile` configuration have an
equivalent in the `test` configuration that can be run using a
`test:` prefix.
- :key:`compile` Compiles the main sources (in the `src/main/scala`
directory). `test:compile` compiles test sources (in the
`src/test/scala/` directory).
- :key:`console` Starts the Scala interpreter with a classpath including
the compiled sources, all jars in the `lib` directory, and managed
libraries. To return to sbt, type `:quit`, Ctrl+D (Unix), or Ctrl+Z
(Windows). Similarly, `test:console` starts the interpreter with
the test classes and classpath.
- :key:`consoleQuick` Starts the Scala interpreter with the project's
compile-time dependencies on the classpath. `test:consoleQuick`
uses the test dependencies. This task differs from :key:`console` in
that it does not force compilation of the current project's sources.
- :key:`consoleProject` Enters an interactive session with sbt and the
build definition on the classpath. The build definition and related
values are bound to variables and common packages and values are
imported. See the :doc:`consoleProject documentation <Console-Project>` for more information.
- :key:`doc` Generates API documentation for Scala source files in
`src/main/scala` using scaladoc. `test:doc` generates API
documentation for source files in `src/test/scala`.
- :key:`package` Creates a jar file containing the files in
`src/main/resources` and the classes compiled from
`src/main/scala`. :key:`test:package` creates a jar containing the
files in `src/test/resources` and the class compiled from
`src/test/scala`.
- :key:`packageDoc` Creates a jar file containing API documentation
generated from Scala source files in `src/main/scala`.
`test:packageDoc` creates a jar containing API documentation for
test sources files in `src/test/scala`.
- :key:`packageSrc`: Creates a jar file containing all main source files
and resources. The packaged paths are relative to `src/main/scala`
and `src/main/resources`. Similarly, `test:packageSrc` operates
on test source files and resources.
- `run <argument>*` Runs the main class for the project in the same
virtual machine as `sbt`. The main class is passed the
`argument`\ s provided. Please see :doc:`Running-Project-Code` for
details on the use of `System.exit` and multithreading (including
GUIs) in code run by this action. `test:run` runs a main class in
the test code.
- `runMain <main-class> <argument>*` Runs the specified main class
for the project in the same virtual machine as `sbt`. The main
class is passed the `argument`\ s provided. Please see :doc:`Running-Project-Code`
for details on the use of `System.exit` and
multithreading (including GUIs) in code run by this action.
`test:runMain` runs the specified main class in the test code.
- :key:`test` Runs all tests detected during test compilation. See
:doc:`Testing` for details.
- `testOnly <test>*` Runs the tests provided as arguments. `*`
(will be) interpreted as a wildcard in the test name. See :doc:`Testing`
for details.
- `testQuick <test>*` Runs the tests specified as arguments (or all
tests if no arguments are given) that:
1. have not been run yet OR
2. failed the last time they were run OR
3. had any transitive dependencies recompiled since the last
successful run `*` (will be) interpreted as a wildcard in the
test name. See :doc:`Testing` for details.
General commands
----------------
- `exit` or `quit` End the current interactive session or build.
Additionally, `Ctrl+D` (Unix) or `Ctrl+Z` (Windows) will exit the
interactive prompt.
- `help <command>` Displays detailed help for the specified command.
If the command does not exist, `help` lists detailed help for
commands whose name or description match the argument, which is
interpreted as a regular expression. If no command is provided,
displays brief descriptions of the main commands. Related commands
are `tasks` and `settings`.
- `projects [add|remove <URI>]` List all available projects if no
arguments provided or adds/removes the build at the provided URI.
(See :doc:`/Getting-Started/Full-Def/` for details on multi-project builds.)
- `project <project-id>` Change the current project to the project
with ID `<project-id>`. Further operations will be done in the
context of the given project. (See :doc:`/Getting-Started/Full-Def/` for details
on multiple project builds.)
- `~ <command>` Executes the project specified action or method
whenever source files change. See :doc:`/Detailed-Topics/Triggered-Execution` for
details.
- `< filename` Executes the commands in the given file. Each command
should be on its own line. Empty lines and lines beginning with '#'
are ignored
- `+ <command>` Executes the project specified action or method for
all versions of Scala defined in the `crossScalaVersions`
setting.
- `++ <version|home-directory> <command>` Temporarily changes the version of Scala
building the project and executes the provided command. `<command>`
is optional. The specified version of Scala is used until the project
is reloaded, settings are modified (such as by the `set` or
`session` commands), or `++` is run again. `<version>` does not
need to be listed in the build definition, but it must be available
in a repository. Alternatively, specify the path to a Scala installation.
- `; A ; B` Execute A and if it succeeds, run B. Note that the
leading semicolon is required.
- `eval <Scala-expression>` Evaluates the given Scala expression and
returns the result and inferred type. This can be used to set system
properties, as a calculator, to fork processes, etc ... For example:
::
> eval System.setProperty("demo", "true")
> eval 1+1
> eval "ls -l" !
Commands for managing the build definition
------------------------------------------
- `reload [plugins|return]` If no argument is specified, reloads the
build, recompiling any build or plugin definitions as necessary.
`reload plugins` changes the current project to the build
definition project (in `project/`). This can be useful to directly
manipulate the build definition. For example, running `clean` on
the build definition project will force snapshots to be updated and
the build definition to be recompiled. `reload return` changes back
to the main project.
- `set <setting-expression>` Evaluates and applies the given setting
definition. The setting applies until sbt is restarted, the build is
reloaded, or the setting is overridden by another `set` command or
removed by the `session` command. See
:doc:`.sbt build definition </Getting-Started/Basic-Def>` and
:doc:`Inspecting-Settings` for details.
- `session <command>` Manages session settings defined by the `set`
command. It can persist settings configured at the prompt. See
:doc:`Inspecting-Settings` for details.
- `inspect <setting-key>` Displays information about settings, such
as the value, description, defining scope, dependencies, delegation
chain, and related settings. See :doc:`Inspecting-Settings` for details.
Command Line Options
--------------------
System properties can be provided either as JVM options, or as SBT
arguments, in both cases as `-Dprop=value`. The following properties
influence SBT execution. Also see :doc:`Launcher`.
+------------------------------+-----------+---------------------+----------------------------------------------------+
| Property | Values | Default | Meaning |
+==============================+===========+=====================+====================================================+
| `sbt.log.noformat` | Boolean | false | If true, disable ANSI color codes. Useful on build |
| | | | servers or terminals that don't support color. |
+------------------------------+-----------+---------------------+----------------------------------------------------+
| `sbt.global.base` | Directory | ~/.sbt | The directory containing global settings and |
| | | | plugins |
+------------------------------+-----------+---------------------+----------------------------------------------------+
| `sbt.ivy.home` | Directory | ~/.ivy2 | The directory containing the local Ivy repository |
| | | | and artifact cache |
+------------------------------+-----------+---------------------+----------------------------------------------------+
| `sbt.boot.directory` | Directory | ~/.sbt/boot | Path to shared boot directory |
+------------------------------+-----------+---------------------+----------------------------------------------------+
| `sbt.main.class` | String | | |
+------------------------------+-----------+---------------------+----------------------------------------------------+
| `xsbt.inc.debug` | Boolean | false | |
+------------------------------+-----------+---------------------+----------------------------------------------------+
| `sbt.extraClasspath` | Classpath | | A list of classpath entries (jar files or |
| | Entries | | directories) that are added to sbt's classpath. |
| | | | Note that the entries are deliminted by comma, |
| | | | e.g.: `entry1, entry2,..`. See also |
| | | | `resources` in the :doc:`Launcher` |
| | | | documentation. |
+------------------------------+-----------+---------------------+----------------------------------------------------+
| `sbt.version` | Version | 0.11.3 | sbt version to use, usually taken from |
| | | | project/build.properties |
+------------------------------+-----------+---------------------+----------------------------------------------------+
| `sbt.boot.properties` | File | | The path to find the SBT boot properties file. |
| | | | This can be a relative path, relative to the SBT |
| | | | base directory, the users home directory or the |
| | | | location of the sbt jar file, or it can be an |
| | | | absolute path, or it can be an absolute file URI. |
+------------------------------+-----------+---------------------+----------------------------------------------------+
| `sbt.override.build.repos` | Boolean | false | If true, repositories configured in a build |
| | | | definition are ignored and the repositories |
| | | | configured for the launcher are used instead. See |
| | | | `sbt.repository.config` and the :doc:`Launcher` |
| | | | documentation. |
+------------------------------+-----------+---------------------+----------------------------------------------------+
| `sbt.repository.config` | File | ~/.sbt/repositories | A file containing the repositories to use for the |
| | | | launcher. The format is the same as a |
| | | | `[repositories]` section for a :doc:`Launcher` |
| | | | configuration file. This setting is typically used |
| | | | in conjuction with setting |
| | | | `sbt.override.build.repos` to true (see previous |
| | | | row and the :doc:`Launcher` documentation). |
+------------------------------+-----------+---------------------+----------------------------------------------------+

View File

@ -1,64 +0,0 @@
=======================
Compiler Plugin Support
=======================
There is some special support for using compiler plugins. You can set
:key:`autoCompilerPlugins` to `true` to enable this functionality.
::
autoCompilerPlugins := true
To use a compiler plugin, you either put it in your unmanaged library
directory (`lib/` by default) or add it as managed dependency in the
`plugin` configuration. `addCompilerPlugin` is a convenience method
for specifying `plugin` as the configuration for a dependency:
::
addCompilerPlugin("org.scala-tools.sxr" %% "sxr" % "0.3.0")
The :key:`compile` and :key:`testCompile` actions will use any compiler
plugins found in the `lib` directory or in the `plugin`
configuration. You are responsible for configuring the plugins as
necessary. For example, Scala X-Ray requires the extra option:
::
// declare the main Scala source directory as the base directory
scalacOptions :=
scalacOptions.value :+ ("-Psxr:base-directory:" + (scalaSource in Compile).value.getAbsolutePath)
You can still specify compiler plugins manually. For example:
::
scalacOptions += "-Xplugin:<path-to-sxr>/sxr-0.3.0.jar"
Continuations Plugin Example
============================
Support for continuations in Scala 2.8 is implemented as a compiler
plugin. You can use the compiler plugin support for this, as shown here.
::
autoCompilerPlugins := true
addCompilerPlugin("org.scala-lang.plugins" % "continuations" % "2.8.1")
scalacOptions += "-P:continuations:enable"
Version-specific Compiler Plugin Example
========================================
Adding a version-specific compiler plugin can be done as follows:
::
autoCompilerPlugins := true
libraryDependencies +=
compilerPlugin("org.scala-lang.plugins" % "continuations" % scalaVersion.value)
scalacOptions += "-P:continuations:enable"

View File

@ -1,29 +0,0 @@
=============
Configuration
=============
This part of the documentation has pages documenting particular sbt topics in detail.
Before reading anything in here, you will need the information in the
:doc:`Getting Started Guide </Getting-Started/Welcome>` as a foundation.
.. toctree::
:maxdepth: 2
Classpaths
Compiler-Plugins
Configuring-Scala
Forking
Global-Settings
Java-Sources
Mapping-Files
Local-Scala
Macro-Projects
Paths
Parallel-Execution
Process
Running-Project-Code
Testing
Tasks
/Extending/Input-Tasks
Parsing-Input
Setup-Notes

View File

@ -1,167 +0,0 @@
=================
Configuring Scala
=================
sbt needs to obtain Scala for a project and it can do this automatically or you can configure it explicitly.
The Scala version that is configured for a project will compile, run, document, and provide a REPL for the project code.
When compiling a project, sbt needs to run the Scala compiler as well as provide the compiler with a classpath, which may include several Scala jars, like the reflection jar.
Automatically managed Scala
===========================
The most common case is when you want to use a version of Scala that is available in a repository.
The only required configuration is the Scala version you want to use.
For example,
::
scalaVersion := "2.10.0"
This will retrieve Scala from the repositories configured via the :key:`resolvers` setting.
It will use this version for building your project: compiling, running, scaladoc, and the REPL.
Configuring the scala-library dependency
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
By default, the standard Scala library is automatically added as a dependency.
If you want to configure it differently than the default or you have a project with only Java sources, set:
::
autoScalaLibrary := false
In order to compile Scala sources, the Scala library needs to be on the classpath.
When :key:`autoScalaLibrary` is true, the Scala library will be on all classpaths: test, runtime, and compile.
Otherwise, you need to add it like any other dependency.
For example, the following dependency definition uses Scala only for tests:
::
autoScalaLibrary := false
libraryDependencies += "org.scala-lang" % "scala-library" % scalaVersion.value % "test"
Configuring additional Scala dependencies
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When using a Scala dependency other than the standard library, add it as a normal managed dependency.
For example, to depend on the Scala compiler,
::
libraryDependencies += "org.scala-lang" % "scala-compiler" % scalaVersion.value
Note that this is necessary regardless of the value of the :key:`autoScalaLibrary` setting described in the previous section.
Configuring Scala tool dependencies
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In order to compile Scala code, run scaladoc, and provide a Scala REPL, sbt needs the `scala-compiler` jar.
This should not be a normal dependency of the project, so sbt adds a dependency on `scala-compiler` in the special, private `scala-tool` configuration.
It may be desirable to have more control over this in some situations.
Disable this automatic behavior with the :key:`managedScalaInstance` key:
::
managedScalaInstance := false
This will also disable the automatic dependency on `scala-library`.
If you do not need the Scala compiler for anything (compiling, the REPL, scaladoc, etc...), you can stop here.
sbt does not need an instance of Scala for your project in that case.
Otherwise, sbt will still need access to the jars for the Scala compiler for compilation and other tasks.
You can provide them by either declaring a dependency in the `scala-tool` configuration or by explicitly defining :key:`scalaInstance`.
In the first case, add the `scala-tool` configuration and add a dependency on `scala-compiler` in this configuration.
The organization is not important, but sbt needs the module name to be `scala-compiler` and `scala-library` in order to handle those jars appropriately.
For example,
::
managedScalaInstance := false
// Add the configuration for the dependencies on Scala tool jars
// You can also use a manually constructed configuration like:
// config("scala-tool").hide
ivyConfigurations += Configurations.ScalaTool
// Add the usual dependency on the library as well on the compiler in the
// 'scala-tool' configuration
libraryDependencies ++= Seq(
"org.scala-lang" % "scala-library" % scalaVersion.value,
"org.scala-lang" % "scala-compiler" % scalaVersion.value % "scala-tool"
)
In the second case, directly construct a value of type `ScalaInstance <../../api/sbt/ScalaInstance.html>`_, typically using a method in the `companion object <../../api/sbt/ScalaInstance$.html>`_, and assign it to :key:`scalaInstance`.
You will also need to add the `scala-library` jar to the classpath to compile and run Scala sources.
For example,
::
managedScalaInstance := false
scalaInstance := ...
unmanagedJars in Compile += scalaInstance.value.libraryJar
Switching to a local Scala version
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To use a locally built Scala version, configure Scala home as described in the following section.
Scala will still be resolved as before, but the jars will come from the configured Scala home directory.
Using Scala from a local directory
==================================
The result of building Scala from source is a Scala home directory `<base>/build/pack/` that contains a subdirectory `lib/` containing the Scala library, compiler, and other jars.
The same directory layout is obtained by downloading and extracting a Scala distribution.
Such a Scala home directory may be used as the source for jars by setting :key:`scalaHome`.
For example,
::
scalaHome := Some(file("/home/user/scala-2.10/"))
By default, `lib/scala-library.jar` will be added to the unmanaged classpath and `lib/scala-compiler.jar` will be used to compile Scala sources and provide a Scala REPL.
No managed dependency is recorded on `scala-library`.
This means that Scala will only be resolved from a repository if you explicitly define a dependency on Scala or if Scala is depended on indirectly via a dependency.
In these cases, the artifacts for the resolved dependencies will be substituted with jars in the Scala home `lib/` directory.
Mixing with managed dependencies
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As an example, consider adding a dependency on `scala-reflect` when :key:`scalaHome` is configured:
::
scalaHome := Some(file("/home/user/scala-2.10/"))
libraryDependencies += "org.scala-lang" % "scala-reflect" % scalaVersion.value
This will be resolved as normal, except that sbt will see if `/home/user/scala-2.10/lib/scala-reflect.jar` exists.
If it does, that file will be used in place of the artifact from the managed dependency.
Using unmanaged dependencies only
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Instead of adding managed dependencies on Scala jars, you can directly add them.
The :key:`scalaInstance` task provides structured access to the Scala distribution.
For example, to add all jars in the Scala home `lib/` directory,
::
scalaHome := Some(file("/home/user/scala-2.10/"))
unmanagedJars in Compile ++= scalaInstance.value.jars
To add only some jars, filter the jars from :key:`scalaInstance` before adding them.
sbt's Scala version
===================
sbt needs Scala jars to run itself since it is written in Scala.
sbt uses that same version of Scala to compile the build definitions that you write for your project because they use sbt APIs.
This version of Scala is fixed for a specific sbt release and cannot be changed.
For sbt |version|, this version is Scala |scalaVersion|.
Because this Scala version is needed before sbt runs, the repositories used to retrieve this version are configured in the sbt :doc:`launcher </Detailed-Topics/Launcher>`.

View File

@ -1,112 +0,0 @@
===============
Console Project
===============
Description
===========
The :key:`consoleProject` task starts the Scala interpreter with access to
your project definition and to `sbt`. Specifically, the interpreter is
started up with these commands already executed:
::
import sbt._
import Process._
import Keys._
import <your-project-definition>._
import currentState._
import extracted._
import cpHelpers._
For example, running external processes with sbt's process library (to
be included in the standard library in Scala 2.9):
.. code-block:: console
> "tar -zcvf project-src.tar.gz src" !
> "find project -name *.jar" !
> "cat build.sbt" #| "grep version" #> new File("sbt-version") !
> "grep -r null src" #|| "echo null-free" !
> uri("http://databinder.net/dispatch/About").toURL #> file("About.html") !
:key:`consoleProject` can be useful for creating and modifying your build
in the same way that the Scala interpreter is normally used to explore
writing code. Note that this gives you raw access to your build. Think
about what you pass to `IO.delete`, for example.
Accessing settings
==================
To get a particular setting, use the form:
.. code-block:: scala
> val value = (<key> in <scope>).eval
Examples
--------
.. code-block:: scala
> IO.delete( (classesDirectory in Compile).eval )
Show current compile options:
.. code-block:: scala
> (scalacOptions in Compile).eval foreach println
Show additionally configured repositories.
.. code-block:: scala
> resolvers.eval foreach println
Evaluating tasks
================
To evaluate a task (and its dependencies), use the same form:
.. code-block:: scala
> val value = (<key> in <scope>).eval
Examples
--------
Show all repositories, including defaults.
.. code-block:: scala
> fullResolvers.eval foreach println
Show the classpaths used for compilation and testing:
.. code-block:: scala
> (fullClasspath in Compile).eval.files foreach println
> (fullClasspath in Test).eval.files foreach println
State
=====
The current :doc:`build State </Extending/Build-State>` is available as `currentState`.
The contents of `currentState` are imported by default and can be used without qualification.
Examples
--------
Show the remaining commands to be executed in the build (more
interesting if you invoke :key:`consoleProject` like
`; consoleProject ; clean ; compile`):
.. code-block:: scala
> remainingCommands
Show the number of currently registered commands:
.. code-block:: scala
> definedCommands.size

View File

@ -1,146 +0,0 @@
==============
Cross-building
==============
Introduction
============
Different versions of Scala can be binary incompatible, despite
maintaining source compatibility. This page describes how to use `sbt`
to build and publish your project against multiple versions of Scala and
how to use libraries that have done the same.
Publishing Conventions
======================
The underlying mechanism used to indicate which version of Scala a
library was compiled against is to append `_<scala-version>` to the
library's name. For Scala 2.10.0 and later, the binary version is used.
For example, `dispatch` becomes `dispatch_2.8.1` for the variant
compiled against Scala 2.8.1 and `dispatch_2.10` when compiled against
2.10.0, 2.10.0-M1 or any 2.10.x version. This fairly simple approach
allows interoperability with users of Maven, Ant and other build tools.
The rest of this page describes how `sbt` handles this for you as part
of cross-building.
Using Cross-Built Libraries
===========================
To use a library built against multiple versions of Scala, double the
first `%` in an inline dependency to be `%%`. This tells `sbt`
that it should append the current version of Scala being used to build
the library to the dependency's name. For example:
::
libraryDependencies += "net.databinder" %% "dispatch" % "0.8.0"
A nearly equivalent, manual alternative for a fixed version of Scala is:
::
libraryDependencies += "net.databinder" % "dispatch_2.10" % "0.8.0"
or for Scala versions before 2.10:
::
libraryDependencies += "net.databinder" % "dispatch_2.8.1" % "0.8.0"
Cross-Building a Project
========================
Define the versions of Scala to build against in the
:key:`crossScalaVersions` setting. Versions of Scala 2.8.0 or later are
allowed. For example, in a `.sbt` build definition:
::
crossScalaVersions := Seq("2.8.2", "2.9.2", "2.10.0")
To build against all versions listed in `build.scala.versions`, prefix
the action to run with `+`. For example:
::
> + package
A typical way to use this feature is to do development on a single Scala
version (no `+` prefix) and then cross-build (using `+`)
occasionally and when releasing. The ultimate purpose of `+` is to
cross-publish your project. That is, by doing:
.. code-block:: console
> + publish
you make your project available to users for different versions of
Scala. See :doc:`Publishing` for more details on publishing your project.
In order to make this process as quick as possible, different output and
managed dependency directories are used for different versions of Scala.
For example, when building against Scala 2.10.0,
- `./target/` becomes `./target/scala_2.1.0/`
- `./lib_managed/` becomes `./lib_managed/scala_2.10/`
Packaged jars, wars, and other artifacts have `_<scala-version>`
appended to the normal artifact ID as mentioned in the Publishing
Conventions section above.
This means that the outputs of each build against each version of Scala
are independent of the others. `sbt` will resolve your dependencies
for each version separately. This way, for example, you get the version
of Dispatch compiled against 2.8.1 for your 2.8.1 build, the version
compiled against 2.10 for your 2.10.x builds, and so on. You can have
fine-grained control over the behavior for for different Scala versions
by using the `cross` method on `ModuleID` These are equivalent:
::
"a" % "b" % "1.0"
"a" % "b" % "1.0" cross CrossVersion.Disabled
These are equivalent:
::
"a" %% "b" % "1.0"
"a" % "b" % "1.0" cross CrossVersion.binary
This overrides the defaults to always use the full Scala version instead
of the binary Scala version:
::
"a" % "b" % "1.0" cross CrossVersion.full
This uses a custom function to determine the Scala version to use based
on the binary Scala version:
::
"a" % "b" % "1.0" cross CrossVersion.binaryMapped {
case "2.9.1" => "2.9.0" // remember that pre-2.10, binary=full
case "2.10" => "2.10.0" // useful if a%b was released with the old style
case x => x
}
This uses a custom function to determine the Scala version to use based
on the full Scala version:
::
"a" % "b" % "1.0" cross CrossVersion.fullMapped {
case "2.9.1" => "2.9.0"
case x => x
}
A custom function is mainly used when cross-building and a dependency
isn't available for all Scala versions or it uses a different convention
than the default.
As a final note, you can use `++ <version>` to temporarily switch the
Scala version currently being used to build. `<version>` should be either a version for Scala published to a repository, as in `++ 2.10.0` or the path to a Scala home directory, as in `++ /path/to/scala/home`. See
:doc:`/Detailed-Topics/Command-Line-Reference` for details.

View File

@ -1,121 +0,0 @@
===========================
Dependency Management Flow
===========================
0.12.1 addresses several issues with dependency management. These fixes
were made possible by specific, reproducible examples, such as a
situation where the resolution cache got out of date (gh-532). A brief summary of
the current work flow with dependency management in sbt follows.
Background
==========
:key:`update` resolves dependencies according to the settings in a build
file, such as :key:`libraryDependencies` and :key:`resolvers`. Other tasks use
the output of :key:`update` (an `UpdateReport`) to form various
classpaths. Tasks that in turn use these classpaths, such as :key:`compile`
or :key:`run`, thus indirectly depend on `update`. This means that before
:key:`compile` can run, the :key:`update` task needs to run. However,
resolving dependencies on every :key:`compile` would be unnecessarily slow
and so :key:`update` must be particular about when it actually performs a
resolution.
Caching and Configuration
=========================
1. Normally, if no dependency management configuration has changed since
the last successful resolution and the retrieved files are still
present, sbt does not ask Ivy to perform resolution.
2. Changing the configuration, such as adding or removing dependencies
or changing the version or other attributes of a dependency, will
automatically cause resolution to be performed. Updates to locally
published dependencies should be detected in sbt 0.12.1 and later and
will force an :key:`update`. Dependent tasks like :key:`compile` and
:key:`run` will get updated classpaths.
3. Directly running the :key:`update` task (as opposed to a task that
depends on it) will force resolution to run, whether or not
configuration changed. This should be done in order to refresh remote
SNAPSHOT dependencies.
4. When `offline := true`, remote SNAPSHOTs will not be updated by a
resolution, even an explicitly requested :key:`update`. This should
effectively support working without a connection to remote
repositories. Reproducible examples demonstrating otherwise are
appreciated. Obviously, :key:`update` must have successfully run before
going offline.
5. Overriding all of the above, `skip in update := true` will tell sbt
to never perform resolution. Note that this can cause dependent tasks
to fail. For example, compilation may fail if jars have been deleted
from the cache (and so needed classes are missing) or a dependency
has been added (but will not be resolved because skip is true). Also,
:key:`update` itself will immediately fail if resolution has not been
allowed to run since the last :key:`clean`.
General troubleshooting steps
=============================
A. Run :key:`update` explicitly. This will typically fix problems with out
of date SNAPSHOTs or locally published artifacts.
B. If a file cannot be
found, look at the output of :key:`update` to see where Ivy is looking for
the file. This may help diagnose an incorrectly defined dependency or a
dependency that is actually not present in a repository.
C. `last update` contains more information about the most recent
resolution and download. The amount of debugging output from Ivy is
high, so you may want to use `lastGrep` (run `help lastGrep` for
usage).
D. Run :key:`clean` and then :key:`update`. If this works, it could
indicate a bug in sbt, but the problem would need to be reproduced in
order to diagnose and fix it.
E. Before deleting all of the Ivy cache,
first try deleting files in `~/.ivy2/cache` related to problematic
dependencies. For example, if there are problems with dependency
`"org.example" % "demo" % "1.0"`, delete
`~/.ivy2/cache/org.example/demo/1.0/` and retry :key:`update`. This
avoids needing to redownload all dependencies.
F. Normal sbt usage
should not require deleting files from `~/.ivy2/cache`, especially if
the first four steps have been followed. If deleting the cache fixes a
dependency management issue, please try to reproduce the issue and
submit a test case.
Plugins
=======
These troubleshooting steps can be run for plugins by changing to the
build definition project, running the commands, and then returning to
the main project. For example:
.. code-block:: console
> reload plugins
> update
> reload return
Notes
=====
A. Configure offline behavior for all projects on a machine by putting
`offline := true` in `~/.sbt/global.sbt`. A command that does this
for the user would make a nice pull request. Perhaps the setting of
offline should go into the output of `about` or should it be a warning
in the output of :key:`update` or both?
B. The cache improvements in 0.12.1 address issues in the change detection
for :key:`update` so that it will correctly re-resolve automatically in more
situations. A problem with an out of date cache can usually be attributed
to a bug in that change detection if explicitly running `update` fixes
the problem.
C. A common solution to dependency management problems in sbt has been to
remove `~/.ivy2/cache`. Before doing this with 0.12.1, be sure to
follow the steps in the troubleshooting section first. In particular,
verify that a :key:`clean` and an explicit :key:`update` do not solve the
issue.
D. There is no need to mark SNAPSHOT dependencies as `changing()`
because sbt configures Ivy to know this already.

View File

@ -1,18 +0,0 @@
=======================
Dependency Management
=======================
This part of the documentation has pages documenting particular sbt topics in detail.
Before reading anything in here, you will need the information in the
:doc:`Getting Started Guide </Getting-Started/Welcome>` as a foundation.
.. toctree::
:maxdepth: 2
Artifacts
Dependency-Management-Flow
Library-Management
Proxy-Repositories
Publishing
Resolvers
Update-Report

View File

@ -1,181 +0,0 @@
=======
Forking
=======
By default, the :key:`run` task runs in the same JVM as sbt. Forking is
required under :doc:`certain circumstances <Running-Project-Code>`, however.
Or, you might want to fork Java processes when implementing new tasks.
By default, a forked process uses the same Java and Scala versions being
used for the build and the working directory and JVM options of the
current process. This page discusses how to enable and configure forking
for both :key:`run` and :key:`test` tasks. Each kind of task may be configured
separately by scoping the relevant keys as explained below.
Enable forking
==============
The :key:`fork` setting controls whether forking is enabled (true) or not
(false). It can be set in the :key:`run` scope to only fork :key:`run`
commands or in the :key:`test` scope to only fork :key:`test` commands.
To fork all test tasks (:key:`test`, :key:`testOnly`, and :key:`testQuick`) and
run tasks (:key:`run`, :key:`runMain`, `test:run`, and `test:runMain`),
::
fork := true
To enable forking :key:`run` tasks only, set :key:`fork` to `true` in the
:key:`run` scope.
::
fork in run := true
To only fork `test:run` and `test:runMain`:
::
fork in (Test,run) := true
Similarly, set `fork in (Compile,run) := true` to only fork the main
:key:`run` tasks. :key:`run` and :key:`runMain` share the same configuration and
cannot be configured separately.
To enable forking all :key:`test` tasks only, set :key:`fork` to `true` in
the :key:`test` scope:
::
fork in test := true
See :doc:`Testing` for more control over how tests are assigned to JVMs and
what options to pass to each group.
Change working directory
========================
To change the working directory when forked, set
`baseDirectory in run` or `baseDirectory in test`:
::
// sets the working directory for all `run`-like tasks
baseDirectory in run := file("/path/to/working/directory/")
// sets the working directory for `run` and `runMain` only
baseDirectory in (Compile,run) := file("/path/to/working/directory/")
// sets the working directory for `test:run` and `test:runMain` only
baseDirectory in (Test,run) := file("/path/to/working/directory/")
// sets the working directory for `test`, `testQuick`, and `testOnly`
baseDirectory in test := file("/path/to/working/directory/")
Forked JVM options
==================
To specify options to be provided to the forked JVM, set
:key:`javaOptions`:
::
javaOptions in run += "-Xmx8G"
or specify the configuration to affect only the main or test `run`
tasks:
::
javaOptions in (Test,run) += "-Xmx8G"
or only affect the :key:`test` tasks:
::
javaOptions in test += "-Xmx8G"
Java Home
=========
Select the Java installation to use by setting the :key:`javaHome`
directory:
::
javaHome := Some(file("/path/to/jre/"))
Note that if this is set globally, it also sets the Java installation
used to compile Java sources. You can restrict it to running only by
setting it in the :key:`run` scope:
::
javaHome in run := Some(file("/path/to/jre/"))
As with the other settings, you can specify the configuration to affect
only the main or test :key:`run` tasks or just the :key:`test` tasks.
Configuring output
==================
By default, forked output is sent to the Logger, with standard output
logged at the `Info` level and standard error at the `Error` level.
This can be configured with the :key:`outputStrategy` setting, which is of
type
`OutputStrategy <../../api/sbt/OutputStrategy.html>`_.
::
// send output to the build's standard output and error
outputStrategy := Some(StdoutOutput)
// send output to the provided OutputStream `someStream`
outputStrategy := Some(CustomOutput(someStream: OutputStream))
// send output to the provided Logger `log` (unbuffered)
outputStrategy := Some(LoggedOutput(log: Logger))
// send output to the provided Logger `log` after the process terminates
outputStrategy := Some(BufferedOutput(log: Logger))
As with other settings, this can be configured individually for main or
test :key:`run` tasks or for :key:`test` tasks.
Configuring Input
=================
By default, the standard input of the sbt process is not forwarded to
the forked process. To enable this, configure the :key:`connectInput`
setting:
::
connectInput in run := true
Direct Usage
============
To fork a new Java process, use the `Fork
API <../../api/sbt/Fork$.html>`_.
The values of interest are `Fork.java`, `Fork.javac`, `Fork.scala`, and `Fork.scalac`.
These are of type `Fork <../../api/sbt/Fork.html>`_ and provide `apply` and `fork` methods.
For example, to fork a new Java process, ::
val options = ForkOptions(...)
val arguments: Seq[String] = ...
val mainClass: String = ...
val exitCode: Int = Fork.java(options, mainClass +: arguments)
`ForkOptions <../../api/sbt/ForkOptions.html>`_ defines the Java installation to use, the working directory, environment variables, and more.
For example, ::
val cwd: File = ...
val javaDir: File = ...
val options = ForkOptions(
envVars = Map("KEY" -> "value"),
workingDirectory = Some(cwd),
javaHome = Some(javaDir)
)

View File

@ -1,61 +0,0 @@
===============
Global Settings
===============
Basic global configuration file
-------------------------------
Settings that should be applied to all projects can go in :sublit:`|globalSbtFile|`
(or any file in :sublit:`|globalBase|` with a `.sbt` extension).
Plugins that are defined globally in :sublit:`|globalPluginsBase|` are
available to these settings. For example, to change the default
:key:`shellPrompt` for your projects:
:sublit:`|globalSbtFile|`
::
shellPrompt := { state =>
"sbt (%s)> ".format(Project.extract(state).currentProject.id)
}
Global Settings using a Global Plugin
-------------------------------------
The :sublit:`|globalPluginsBase|` directory is a global plugin project. This can be
used to provide global commands, plugins, or other code.
To add a plugin globally, create :sublit:`|globalPluginSbtFile|` containing
the dependency definitions. For example:
::
addSbtPlugin("org.example" % "plugin" % "1.0")
To change the default :key:`shellPrompt` for every project using this
approach, create a local plugin :sublit:`|globalShellPromptScala|`:
::
import sbt._
import Keys._
object ShellPrompt extends Plugin {
override def settings = Seq(
shellPrompt := { state =>
"sbt (%s)> ".format(Project.extract(state).currentProject.id) }
)
}
The :sublit:`|globalPluginsBase|` directory is a full project that is included as
an external dependency of every plugin project. In practice, settings
and code defined here effectively work as if they were defined in a
project's `project/` directory. This means that :sublit:`|globalPluginsBase|` can
be used to try out ideas for plugins such as shown in the :key:`shellPrompt`
example.
.. |globalBase| replace:: ~/.sbt/|version|/
.. |globalPluginsBase| replace:: |globalBase|\ plugins/
.. |globalSbtFile| replace:: |globalBase|\ global.sbt
.. |globalPluginSbtFile| replace:: |globalPluginsBase|\ build.sbt
.. |globalShellPromptScala| replace:: |globalPluginsBase|\ ShellPrompt.scala`

View File

@ -1,319 +0,0 @@
=========================================
Interacting with the Configuration System
=========================================
Central to sbt is the new configuration system, which is designed to
enable extensive customization. The goal of this page is to explain the
general model behind the configuration system and how to work with it.
The Getting Started Guide (see :doc:`.sbt files </Getting-Started/Basic-Def>`)
describes how to define settings; this page describes interacting
with them and exploring them at the command line.
Selecting commands, tasks, and settings
=======================================
A fully-qualified reference to a setting or task looks like:
.. code-block:: console
{<build-uri>}<project-id>/config:inkey::key
This "scoped key" reference is used by commands like `last` and
`inspect` and when selecting a task to run. Only `key` is usually
required by the parser; the remaining optional pieces select the scope.
These optional pieces are individually referred to as scope axes. In the
above description, `{<build-uri>}` and `<project-id>/` specify the
project axis, `config:` is the configuration axis, and `inkey` is
the task-specific axis. Unspecified components are taken to be the
current project (project axis) or auto-detected (configuration and task
axes). An asterisk (`*`) is used to explicitly refer to the `Global`
context, as in `*/*:key`.
Selecting the configuration
---------------------------
In the case of an unspecified configuration (that is, when the
`config:` part is omitted), if the key is defined in `Global`, that
is selected. Otherwise, the first configuration defining the key is
selected, where order is determined by the project definition's
`configurations` member. By default, this ordering is
`compile, test, ...`
For example, the following are equivalent when run in a project `root`
in the build in `/home/user/sample/`:
.. code-block:: console
> compile
> compile:compile
> root/compile
> root/compile:compile
> {file:/home/user/sample/}root/compile:compile
As another example, :key:`run` by itself refers to `compile:run` because
there is no global :key:`run` task and the first configuration searched,
:key:`compile`, defines a :key:`run`. Therefore, to reference the :key:`run` task
for the `Test` configuration, the configuration axis must be specified
like `test:run`. Some other examples that require the explicit
`test:` axis:
.. code-block:: console
> test:consoleQuick
> test:console
> test:doc
> test:package
Task-specific Settings
----------------------
Some settings are defined per-task. This is used when there are several
related tasks, such as :key:`package`, :key:`packageSrc`, and
:key:`packageDoc`, in the same configuration (such as :key:`compile` or
:key:`test`). For package tasks, their settings are the files to package,
the options to use, and the output file to produce. Each package task
should be able to have different values for these settings.
This is done with the task axis, which selects the task to apply a
setting to. For example, the following prints the output jar for the
different package tasks.
.. code-block:: console
> package::artifactPath
[info] /home/user/sample/target/scala-2.8.1.final/demo_2.8.1-0.1.jar
> packageSrc::artifactPath
[info] /home/user/sample/target/scala-2.8.1.final/demo_2.8.1-0.1-src.jar
> packageDoc::artifactPath
[info] /home/user/sample/target/scala-2.8.1.final/demo_2.8.1-0.1-doc.jar
> test:package::artifactPath
[info] /home/user/sample/target/scala-2.8.1.final/root_2.8.1-0.1-test.jar
Note that a single colon `:` follows a configuration axis and a double
colon `::` follows a task axis.
Discovering Settings and Tasks
==============================
This section discusses the `inspect` command, which is useful for
exploring relationships between settings. It can be used to determine
which setting should be modified in order to affect another setting, for
example.
Value and Provided By
---------------------
The first piece of information provided by `inspect` is the type of a
task or the value and type of a setting. The following section of output
is labeled "Provided by". This shows the actual scope where the setting
is defined. For example,
.. code-block:: console
> inspect libraryDependencies
[info] Setting: scala.collection.Seq[sbt.ModuleID] = List(org.scalaz:scalaz-core:6.0-SNAPSHOT, org.scala-tools.testing:scalacheck:1.8:test)
[info] Provided by:
[info] {file:/home/user/sample/}root/*:libraryDependencies
...
This shows that :key:`libraryDependencies` has been defined on the current
project (`{file:/home/user/sample/}root`) in the global configuration
(`*:`). For a task like :key:`update`, the output looks like:
.. code-block:: console
> inspect update
[info] Task: sbt.UpdateReport
[info] Provided by:
[info] {file:/home/user/sample/}root/*:update
...
Related Settings
----------------
The "Related" section of `inspect` output lists all of the definitions
of a key. For example,
.. code-block:: console
> inspect compile
...
[info] Related:
[info] test:compile
This shows that in addition to the requested `compile:compile` task,
there is also a `test:compile` task.
Dependencies
------------
Forward dependencies show the other settings (or tasks) used to define a
setting (or task). Reverse dependencies go the other direction, showing
what uses a given setting. `inspect` provides this information based
on either the requested dependencies or the actual dependencies.
Requested dependencies are those that a setting directly specifies.
Actual settings are what those dependencies get resolved to. This
distinction is explained in more detail in the following sections.
Requested Dependencies
~~~~~~~~~~~~~~~~~~~~~~
As an example, we'll look at :key:`console`:
.. code-block:: console
> inspect console
...
[info] Dependencies:
[info] compile:console::fullClasspath
[info] compile:console::scalacOptions
[info] compile:console::initialCommands
[info] compile:console::cleanupCommands
[info] compile:console::compilers
[info] compile:console::taskTemporary-directory
[info] compile:console::scalaInstance
[info] compile:console::streams
...
This shows the inputs to the :key:`console` task. We can see that it gets
its classpath and options from :key:`fullClasspath` and
`scalacOptions(for console)`. The information provided by the
`inspect` command can thus assist in finding the right setting to
change. The convention for keys, like :key:`console` and
:key:`fullClasspath`, is that the Scala identifier is camel case, while
the String representation is lowercase and separated by dashes. The
Scala identifier for a configuration is uppercase to distinguish it from
tasks like :key:`compile` and :key:`test`. For example, we can infer from the
previous example how to add code to be run when the Scala interpreter
starts up:
.. code-block:: console
> set initialCommands in Compile in console := "import mypackage._"
> console
...
import mypackage._
...
`inspect` showed that :key:`console` used the setting
`compile:console::initialCommands`. Translating the
:key:`initialCommands` string to the Scala identifier gives us
:key:`initialCommands`. :key:`compile` indicates that this is for the main
sources. `console::` indicates that the setting is specific to
:key:`console`. Because of this, we can set the initial commands on the
:key:`console` task without affecting the :key:`consoleQuick` task, for
example.
Actual Dependencies
~~~~~~~~~~~~~~~~~~~
`inspect actual <scoped-key>` shows the actual dependency used. This
is useful because delegation means that the dependency can come from a
scope other than the requested one. Using `inspect actual`, we see
exactly which scope is providing a value for a setting. Combining
`inspect actual` with plain `inspect`, we can see the range of
scopes that will affect a setting. Returning to the example in Requested
Dependencies,
.. code-block:: console
> inspect actual console
...
[info] Dependencies:
[info] compile:scalacOptions
[info] compile:fullClasspath
[info] *:scalaInstance
[info] */*:initialCommands
[info] */*:cleanupCommands
[info] */*:taskTemporaryDirectory
[info] *:console::compilers
[info] compile:console::streams
...
For `initialCommands`, we see that it comes from the global scope
(`*/*:`). Combining this with the relevant output from
`inspect console`:
.. code-block:: console
compile:console::initialCommands
we know that we can set :key:`initialCommands` as generally as the global
scope, as specific as the current project's :key:`console` task scope, or
anything in between. This means that we can, for example, set
:key:`initialCommands` for the whole project and will affect :key:`console`:
.. code-block:: console
> set initialCommands := "import mypackage._"
...
The reason we might want to set it here this is that other console tasks
will use this value now. We can see which ones use our new setting by
looking at the reverse dependencies output of `inspect actual`:
.. code-block:: console
> inspect actual initialCommands
...
[info] Reverse dependencies:
[info] test:console
[info] compile:consoleQuick
[info] compile:console
[info] test:consoleQuick
[info] *:consoleProject
...
We now know that by setting :key:`initialCommands` on the whole project,
we affect all console tasks in all configurations in that project. If we
didn't want the initial commands to apply for :key:`consoleProject`, which
doesn't have our project's classpath available, we could use the more
specific task axis:
.. code-block:: console
> set initialCommands in console := "import mypackage._"
> set initialCommands in consoleQuick := "import mypackage._"`
or configuration axis:
.. code-block:: console
> set initialCommands in Compile := "import mypackage._"
> set initialCommands in Test := "import mypackage._"
The next part describes the Delegates section, which shows the chain of
delegation for scopes.
Delegates
---------
A setting has a key and a scope. A request for a key in a scope A may be
delegated to another scope if A doesn't define a value for the key. The
delegation chain is well-defined and is displayed in the Delegates
section of the `inspect` command. The Delegates section shows the
order in which scopes are searched when a value is not defined for the
requested key.
As an example, consider the initial commands for :key:`console` again:
.. code-block:: console
> inspect console::initialCommands
...
[info] Delegates:
[info] *:console::initialCommands
[info] *:initialCommands
[info] {.}/*:console::initialCommands
[info] {.}/*:initialCommands
[info] */*:console::initialCommands
[info] */*:initialCommands
...
This means that if there is no value specifically for
`*:console::initialCommands`, the scopes listed under Delegates will
be searched in order until a defined value is found.

View File

@ -1,77 +0,0 @@
============
Java Sources
============
sbt has support for compiling Java sources with the limitation that
dependency tracking is limited to the dependencies present in compiled
class files.
Usage
=====
- :key:`compile` will compile the sources under `src/main/java` by
default.
- :key:`testCompile` will compile the sources under `src/test/java` by
default.
Pass options to the Java compiler by setting :key:`javacOptions`:
::
javacOptions += "-g:none"
As with options for the Scala compiler, the arguments are not parsed by
sbt. Multi-element options, such as `-source 1.5`, are specified like:
::
javacOptions ++= Seq("-source", "1.5")
You can specify the order in which Scala and Java sources are built with
the :key:`compileOrder` setting. Possible values are from the
`CompileOrder` enumeration: `Mixed`, `JavaThenScala`, and
`ScalaThenJava`. If you have circular dependencies between Scala and
Java sources, you need the default, `Mixed`, which passes both Java
and Scala sources to `scalac` and then compiles the Java sources with
`javac`. If you do not have circular dependencies, you can use one of
the other two options to speed up your build by not passing the Java
sources to `scalac`. For example, if your Scala sources depend on your
Java sources, but your Java sources do not depend on your Scala sources,
you can do:
::
compileOrder := CompileOrder.JavaThenScala
To specify different orders for main and test sources, scope the setting
by configuration:
::
// Java then Scala for main sources
compileOrder in Compile := CompileOrder.JavaThenScala
// allow circular dependencies for test sources
compileOrder in Test := CompileOrder.Mixed
Note that in an incremental compilation setting, it is not practical to
ensure complete isolation between Java sources and Scala sources because
they share the same output directory. So, previously compiled classes
not involved in the current recompilation may be picked up. A clean
compile will always provide full checking, however.
By default, sbt includes `src/main/scala` and `src/main/java` in its
list of unmanaged source directories. For Java-only projects, the
unnecessary Scala directories can be ignored by modifying
:key:`unmanagedSourceDirectories`:
::
// Include only src/main/java in the compile configuration
unmanagedSourceDirectories in Compile := (javaSource in Compile).value :: Nil
// Include only src/test/java in the test configuration
unmanagedSourceDirectories in Test := (javaSource in Test).value :: Nil
However, there should not be any harm in leaving the Scala directories
if they are empty.

View File

@ -1,5 +0,0 @@
============
Sbt Launcher
============
This documentation has been moved to :doc:`The Launcher section </Launcher/index>`.

View File

@ -1,674 +0,0 @@
==================
Library Management
==================
There's now a :doc:`getting started page </Getting-Started/Library-Dependencies>`
about library management, which you may want to read first.
*Documentation Maintenance Note:* it would be nice to remove the overlap between
this page and the getting started page, leaving this page with the more
advanced topics such as checksums and external Ivy files.
Introduction
============
There are two ways for you to manage libraries with sbt: manually or
automatically. These two ways can be mixed as well. This page discusses
the two approaches. All configurations shown here are settings that go
either directly in a :doc:`.sbt file </Getting-Started/Basic-Def>` or are
appended to the `settings` of a Project in a :doc:`.scala file </Getting-Started/Full-Def>`.
Manual Dependency Management
============================
Manually managing dependencies involves copying any jars that you want
to use to the `lib` directory. sbt will put these jars on the
classpath during compilation, testing, running, and when using the
interpreter. You are responsible for adding, removing, updating, and
otherwise managing the jars in this directory. No modifications to your
project definition are required to use this method unless you would like
to change the location of the directory you store the jars in.
To change the directory jars are stored in, change the
:key:`unmanagedBase` setting in your project definition. For example, to
use `custom_lib/`:
::
unmanagedBase := baseDirectory.value / "custom_lib"
If you want more control and flexibility, override the
:key:`unmanagedJars` task, which ultimately provides the manual
dependencies to sbt. The default implementation is roughly:
::
unmanagedJars in Compile := (baseDirectory.value ** "*.jar").classpath
If you want to add jars from multiple directories in addition to the
default directory, you can do:
::
unmanagedJars in Compile ++= {
val base = baseDirectory.value
val baseDirectories = (base / "libA") +++ (base / "b" / "lib") +++ (base / "libC")
val customJars = (baseDirectories ** "*.jar") +++ (base / "d" / "my.jar")
customJars.classpath
}
See :doc:`Paths` for more information on building up paths.
Automatic Dependency Management
===============================
This method of dependency management involves specifying the direct
dependencies of your project and letting sbt handle retrieving and
updating your dependencies. sbt supports three ways of specifying these
dependencies:
- Declarations in your project definition
- Maven POM files (dependency definitions only: no repositories)
- Ivy configuration and settings files
sbt uses `Apache Ivy <http://ant.apache.org/ivy/>`_ to implement
dependency management in all three cases. The default is to use inline
declarations, but external configuration can be explicitly selected. The
following sections describe how to use each method of automatic
dependency management.
Inline Declarations
-------------------
Inline declarations are a basic way of specifying the dependencies to be
automatically retrieved. They are intended as a lightweight alternative
to a full configuration using Ivy.
Dependencies
~~~~~~~~~~~~
Declaring a dependency looks like:
::
libraryDependencies += groupID % artifactID % revision
or
::
libraryDependencies += groupID % artifactID % revision % configuration
See :ref:`configurations <ivy-configurations>` for details on configuration mappings. Also,
several dependencies can be declared together:
::
libraryDependencies ++= Seq(
groupID %% artifactID % revision,
groupID %% otherID % otherRevision
)
If you are using a dependency that was built with sbt, double the first
`%` to be `%%`:
::
libraryDependencies += groupID %% artifactID % revision
This will use the right jar for the dependency built with the version of
Scala that you are currently using. If you get an error while resolving
this kind of dependency, that dependency probably wasn't published for
the version of Scala you are using. See :doc:`Cross-Build` for details.
Ivy can select the latest revision of a module according to constraints
you specify. Instead of a fixed revision like `"1.6.1"`, you specify
`"latest.integration"`, `"2.9.+"`, or `"[1.0,)"`. See the `Ivy
revisions <http://ant.apache.org/ivy/history/2.3.0/ivyfile/dependency.html#revision>`_
documentation for details.
Resolvers
~~~~~~~~~
sbt uses the standard Maven2 repository by default.
Declare additional repositories with the form:
::
resolvers += name at location
For example:
::
libraryDependencies ++= Seq(
"org.apache.derby" % "derby" % "10.4.1.3",
"org.specs" % "specs" % "1.6.1"
)
resolvers += "Sonatype OSS Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots"
sbt can search your local Maven repository if you add it as a
repository:
::
resolvers += "Local Maven Repository" at "file://"+Path.userHome.absolutePath+"/.m2/repository"
See :doc:`Resolvers` for details on defining other types of repositories.
Override default resolvers
~~~~~~~~~~~~~~~~~~~~~~~~~~
:key:`resolvers` configures additional, inline user resolvers. By default,
`sbt` combines these resolvers with default repositories (Maven
Central and the local Ivy repository) to form :key:`externalResolvers`. To
have more control over repositories, set :key:`externalResolvers`
directly. To only specify repositories in addition to the usual
defaults, configure :key:`resolvers`.
For example, to use the Sonatype OSS Snapshots repository in addition to
the default repositories,
::
resolvers += "Sonatype OSS Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots"
To use the local repository, but not the Maven Central repository:
::
externalResolvers := Resolver.withDefaultResolvers(resolvers.value, mavenCentral = false)
Override all resolvers for all builds
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The repositories used to retrieve sbt, Scala, plugins, and application
dependencies can be configured globally and declared to override the
resolvers configured in a build or plugin definition. There are two
parts:
1. Define the repositories used by the launcher.
2. Specify that these repositories should override those in build
definitions.
The repositories used by the launcher can be overridden by defining
`~/.sbt/repositories`, which must contain a `[repositories]` section
with the same format as the :doc:`Launcher` configuration file. For
example:
.. code-block:: ini
[repositories]
local
my-maven-repo: http://example.org/repo
my-ivy-repo: http://example.org/ivy-repo/, [organization]/[module]/[revision]/[type]s/[artifact](-[classifier]).[ext]
A different location for the repositories file may be specified by the
`sbt.repository.config` system property in the sbt startup script. The
final step is to set `sbt.override.build.repos` to true to use these
repositories for dependency resolution and retrieval.
Explicit URL
~~~~~~~~~~~~
If your project requires a dependency that is not present in a
repository, a direct URL to its jar can be specified as follows:
::
libraryDependencies += "slinky" % "slinky" % "2.1" from "http://slinky2.googlecode.com/svn/artifacts/2.1/slinky.jar"
The URL is only used as a fallback if the dependency cannot be found
through the configured repositories. Also, the explicit URL is not
included in published metadata (that is, the pom or ivy.xml).
Disable Transitivity
~~~~~~~~~~~~~~~~~~~~
By default, these declarations fetch all project dependencies,
transitively. In some instances, you may find that the dependencies
listed for a project aren't necessary for it to build. Projects using
the Felix OSGI framework, for instance, only explicitly require its main
jar to compile and run. Avoid fetching artifact dependencies with either
`intransitive()` or `notTransitive()`, as in this example:
::
libraryDependencies += "org.apache.felix" % "org.apache.felix.framework" % "1.8.0" intransitive()
Classifiers
~~~~~~~~~~~
You can specify the classifier for a dependency using the `classifier`
method. For example, to get the jdk15 version of TestNG:
::
libraryDependencies += "org.testng" % "testng" % "5.7" classifier "jdk15"
For multiple classifiers, use multiple `classifier` calls:
::
libraryDependencies +=
"org.lwjgl.lwjgl" % "lwjgl-platform" % lwjglVersion classifier "natives-windows" classifier "natives-linux" classifier "natives-osx"
To obtain particular classifiers for all dependencies transitively, run
the :key:`updateClassifiers` task. By default, this resolves all artifacts
with the `sources` or `javadoc` classifier. Select the classifiers
to obtain by configuring the :key:`transitiveClassifiers` setting. For
example, to only retrieve sources:
::
transitiveClassifiers := Seq("sources")
Exclude Transitive Dependencies
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To exclude certain transitive dependencies of a dependency, use the
`excludeAll` or `exclude` methods. The `exclude` method should be
used when a pom will be published for the project. It requires the
organization and module name to exclude. For example,
::
libraryDependencies +=
"log4j" % "log4j" % "1.2.15" exclude("javax.jms", "jms")
The `excludeAll` method is more flexible, but because it cannot be
represented in a pom.xml, it should only be used when a pom doesn't need
to be generated. For example,
::
libraryDependencies +=
"log4j" % "log4j" % "1.2.15" excludeAll(
ExclusionRule(organization = "com.sun.jdmk"),
ExclusionRule(organization = "com.sun.jmx"),
ExclusionRule(organization = "javax.jms")
)
See
`ModuleID <../../api/sbt/ModuleID.html>`_
for API details.
Download Sources
~~~~~~~~~~~~~~~~
Downloading source and API documentation jars is usually handled by an
IDE plugin. These plugins use the :key:`updateClassifiers` and
:key:`updateSbtClassifiers` tasks, which produce an :doc:`Update-Report`
referencing these jars.
To have sbt download the dependency's sources without using an IDE
plugin, add `withSources()` to the dependency definition. For API
jars, add `withJavadoc()`. For example:
::
libraryDependencies +=
"org.apache.felix" % "org.apache.felix.framework" % "1.8.0" withSources() withJavadoc()
Note that this is not transitive. Use the `update-*classifiers` tasks
for that.
Extra Attributes
~~~~~~~~~~~~~~~~
`Extra
attributes <http://ant.apache.org/ivy/history/2.3.0/concept.html#extra>`_
can be specified by passing key/value pairs to the `extra` method.
To select dependencies by extra attributes:
::
libraryDependencies += "org" % "name" % "rev" extra("color" -> "blue")
To define extra attributes on the current project:
::
projectID := {
val previous = projectID.value
previous.extra("color" -> "blue", "component" -> "compiler-interface")
}
Inline Ivy XML
~~~~~~~~~~~~~~
sbt additionally supports directly specifying the configurations or
dependencies sections of an Ivy configuration file inline. You can mix
this with inline Scala dependency and repository declarations.
For example:
::
ivyXML :=
<dependencies>
<dependency org="javax.mail" name="mail" rev="1.4.2">
<exclude module="activation"/>
</dependency>
</dependencies>
Ivy Home Directory
~~~~~~~~~~~~~~~~~~
By default, sbt uses the standard Ivy home directory location
`${user.home}/.ivy2/`. This can be configured machine-wide, for use by
both the sbt launcher and by projects, by setting the system property
`sbt.ivy.home` in the sbt startup script (described in
:doc:`Setup </Getting-Started/Setup>`).
For example:
.. code-block:: text
java -Dsbt.ivy.home=/tmp/.ivy2/ ...
Checksums
~~~~~~~~~
sbt (`through
Ivy <http://ant.apache.org/ivy/history/latest-milestone/concept.html#checksum>`_)
verifies the checksums of downloaded files by default. It also publishes
checksums of artifacts by default. The checksums to use are specified by
the *checksums* setting.
To disable checksum checking during update:
::
checksums in update := Nil
To disable checksum creation during artifact publishing:
::
checksums in publishLocal := Nil
checksums in publish := Nil
The default value is:
::
checksums := Seq("sha1", "md5")
.. _conflict-management:
Conflict Management
~~~~~~~~~~~~~~~~~~~
The conflict manager decides what to do when dependency resolution brings in different versions of the same library.
By default, the latest revision is selected.
This can be changed by setting :key:`conflictManager`, which has type `ConflictManager <../../api/sbt/ConflictManager.html>`_.
See the `Ivy documentation <http://ant.apache.org/ivy/history/latest-milestone/settings/conflict-managers.html>`_ for details on the different conflict managers.
For example, to specify that no conflicts are allowed,
::
conflictManager := ConflictManager.strict
With this set, any conflicts will generate an error.
To resolve a conflict,
* configure a dependency override if the conflict is for a transitive dependency
* force the revision if it is a direct dependency
Both are explained in the following sections.
Forcing a revision
~~~~~~~~~~~~~~~~~~
The following direct dependencies will introduce a conflict on the log4j version because spark requires log4j 1.2.16.
::
libraryDependencies ++= Seq(
"org.spark-project" %% "spark-core" % "0.5.1",
"log4j" % "log4j" % "1.2.14"
)
The default conflict manager will select the newer version of log4j, 1.2.16.
This can be confirmed in the output of `show update`, which shows the newer version as being selected and the older version as not selected:
::
> show update
[info] compile:
[info] log4j:log4j:1.2.16: ...
...
[info] (EVICTED) log4j:log4j:1.2.14
...
To say that we prefer the version we've specified over the version from indirect dependencies, use `force()`:
::
libraryDependencies ++= Seq(
"org.spark-project" %% "spark-core" % "0.5.1",
"log4j" % "log4j" % "1.2.14" force()
)
The output of `show update` is now reversed:
::
> show update
[info] compile:
[info] log4j:log4j:1.2.14: ...
...
[info] (EVICTED) log4j:log4j:1.2.16
...
**Note:** this is an Ivy-only feature and cannot be included in a published pom.xml.
Forcing a revision without introducing a dependency
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Use of the `force()` method described in the previous section requires having a direct dependency.
However, it may be desirable to force a revision without introducing that direct dependency.
Ivy provides overrides for this and in sbt, overrides are configured in sbt with the `dependencyOverrides` setting, which is a set of `ModuleIDs`.
For example, the following dependency definitions conflict because spark uses log4j 1.2.16 and scalaxb uses log4j 1.2.17:
::
libraryDependencies ++= Seq(
"org.spark-project" %% "spark-core" % "0.5.1",
"org.scalaxb" %% "scalaxb" % "1.0.0"
)
The default conflict manager chooses the latest revision of log4j, 1.2.17:
::
> show update
[info] compile:
[info] log4j:log4j:1.2.17: ...
...
[info] (EVICTED) log4j:log4j:1.2.16
...
To change the version selected, add an override:
::
dependencyOverrides += "log4j" % "log4j" % "1.2.16"
This will not add a direct dependency on log4j, but will force the revision to be 1.2.16.
This is confirmed by the output of `show update`:
::
> show update
[info] compile:
[info] log4j:log4j:1.2.16
...
**Note:** this is an Ivy-only feature and will not be included in a published pom.xml.
Publishing
~~~~~~~~~~
See :doc:`Publishing` for how to publish your project.
.. _ivy-configurations:
Configurations
~~~~~~~~~~~~~~
Ivy configurations are a useful feature for your build when you need
custom groups of dependencies, such as for a plugin. Ivy configurations
are essentially named sets of dependencies. You can read the
`Ivy documentation <http://ant.apache.org/ivy/history/2.3.0/tutorial/conf.html>`_
for details.
The built-in use of configurations in sbt is similar to scopes in Maven.
sbt adds dependencies to different classpaths by the configuration that
they are defined in. See the description of `Maven
Scopes <http://maven.apache.org/guides/introduction/introduction-to-dependency-mechanism.html#Dependency_Scope>`_
for details.
You put a dependency in a configuration by selecting one or more of its
configurations to map to one or more of your project's configurations.
The most common case is to have one of your configurations `A` use a
dependency's configuration `B`. The mapping for this looks like
`"A->B"`. To apply this mapping to a dependency, add it to the end of
your dependency definition:
::
libraryDependencies += "org.scalatest" %% "scalatest" % "2.1.3" % "test->compile"
This says that your project's `"test"` configuration uses
`ScalaTest`'s `"compile"` configuration. See the `Ivy
documentation <http://ant.apache.org/ivy/history/2.3.0/tutorial/conf.html>`_
for more advanced mappings. Most projects published to Maven
repositories will use the `"compile"` configuration.
A useful application of configurations is to group dependencies that are
not used on normal classpaths. For example, your project might use a
`"js"` configuration to automatically download jQuery and then include
it in your jar by modifying :key:`resources`. For example:
::
ivyConfigurations += config("js") hide
libraryDependencies += "jquery" % "jquery" % "1.3.2" % "js->default" from "http://jqueryjs.googlecode.com/files/jquery-1.3.2.min.js"
resources ++= update.value.select(configurationFilter("js"))
The `config` method defines a new configuration with name `"js"` and
makes it private to the project so that it is not used for publishing.
See :doc:`/Detailed-Topics/Update-Report` for more information on selecting managed
artifacts.
A configuration without a mapping (no `"->"`) is mapped to `"default"`
or `"compile"`. The `->` is only needed when mapping to a different
configuration than those. The ScalaTest dependency above can then be
shortened to:
::
libraryDependencies += "org.scalatest" %% "scalatest" % "2.1.3" % "test"
.. _external-maven-ivy:
Maven/Ivy
---------
For this method, create the configuration files as you would for Maven
(`pom.xml`) or Ivy (`ivy.xml` and optionally `ivysettings.xml`).
External configuration is selected by using one of the following
expressions.
Ivy settings (resolver configuration)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
externalIvySettings()
or
::
externalIvySettings(baseDirectory.value / "custom-settings-name.xml")
or
::
externalIvySettingsURL(url("your_url_here"))
Ivy file (dependency configuration)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
externalIvyFile()
or
::
externalIvyFile(Def.setting(baseDirectory.value / "custom-name.xml"))
Because Ivy files specify their own configurations, sbt needs to know
which configurations to use for the compile, runtime, and test
classpaths. For example, to specify that the Compile classpath should
use the 'default' configuration:
::
classpathConfiguration in Compile := config("default")
Maven pom (dependencies only)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
externalPom()
or
::
externalPom(Def.setting(baseDirectory.value / "custom-name.xml"))
Full Ivy Example
~~~~~~~~~~~~~~~~
For example, a `build.sbt` using external Ivy files might look like:
::
externalIvySettings()
externalIvyFile(Def.setting(baseDirectory.value / "ivyA.xml"))
classpathConfiguration in Compile := Compile
classpathConfiguration in Test := Test
classpathConfiguration in Runtime := Runtime
Known limitations
~~~~~~~~~~~~~~~~~
Maven support is dependent on Ivy's support for Maven POMs. Known issues
with this support:
- Specifying `relativePath` in the `parent` section of a POM will
produce an error.
- Ivy ignores repositories specified in the POM. A workaround is to
specify repositories inline or in an Ivy `ivysettings.xml` file.

View File

@ -1,27 +0,0 @@
===========
Local Scala
===========
To use a locally built Scala version, define the :key:`scalaHome` setting,
which is of type `Option[File]`. This Scala version will only be used
for the build and not for sbt, which will still use the version it was
compiled against.
Example:
::
scalaHome := Some(file("/path/to/scala"))
Using a local Scala version will override the :key:`scalaVersion` setting
and will not work with :doc:`cross building <Cross-Build>`.
sbt reuses the class loader for the local Scala version. If you
recompile your local Scala version and you are using sbt interactively,
run
::
> reload
to use the new compilation results.

View File

@ -1,154 +0,0 @@
==============
Macro Projects
==============
Introduction
============
Some common problems arise when working with macros.
1. The current macro implementation in the compiler requires that macro implementations be compiled before they are used. The solution is typically to put the macros in a subproject or in their own configuration.
2. Sometimes the macro implementation should be distributed with the main code that uses them and sometimes the implementation should not be distributed at all.
The rest of the page shows example solutions to these problems.
Defining the Project Relationships
==================================
The macro implementation will go in a subproject in the `macro/` directory.
The main project in the project's base directory will depend on this subproject and use the macro.
This configuration is shown in the following build definition:
`project/Build.scala`
::
import sbt._
import Keys._
object MacroBuild extends Build {
lazy val main = Project("main", file(".")) dependsOn(macroSub)
lazy val macroSub = Project("macro", file("macro")) settings(
libraryDependencies += "org.scala-lang" % "scala-reflect" % scalaVersion.value
)
}
This specifies that the macro implementation goes in `macro/src/main/scala/` and tests go in `macro/src/test/scala/`.
It also shows that we need a dependency on the compiler for the macro implementation.
As an example macro, we'll use `desugar` from `macrocosm <https://github.com/retronym/macrocosm>`_.
`macro/src/main/scala/demo/Demo.scala`
::
package demo
import language.experimental.macros
import scala.reflect.macros.Context
object Demo {
// Returns the tree of `a` after the typer, printed as source code.
def desugar(a: Any): String = macro desugarImpl
def desugarImpl(c: Context)(a: c.Expr[Any]) = {
import c.universe._
val s = show(a.tree)
c.Expr(
Literal(Constant(s))
)
}
}
`macro/src/test/scala/demo/Usage.scala`
::
package demo
object Usage {
def main(args: Array[String]) {
val s = Demo.desugar(List(1, 2, 3).reverse)
println(s)
}
}
This can be then be run at the console:
.. raw:: text
$ sbt
> macro/test:run
immutable.this.List.apply[Int](1, 2, 3).reverse
Actual tests can be defined and run as usual with `macro/test`.
The main project can use the macro in the same way that the tests do.
For example,
`src/main/scala/MainUsage.scala`
::
package demo
object Usage {
def main(args: Array[String]) {
val s = Demo.desugar(List(6, 4, 5).sorted)
println(s)
}
}
.. raw:: text
$ sbt
> run
immutable.this.List.apply[Int](6, 4, 5).sorted[Int](math.this.Ordering.Int)
Common Interface
================
Sometimes, the macro implementation and the macro usage should share some common code.
In this case, declare another subproject for the common code and have the main project and the macro subproject depend on the new subproject.
For example, the project definitions from above would look like:
::
lazy val main = Project("main", file(".")) dependsOn(macroSub, commonSub)
lazy val macroSub = Project("macro", file("macro")) dependsOn(commonSub) settings(
libraryDependencies += "org.scala-lang" % "scala-reflect" % scalaVersion.value
)
lazy val commonSub = Project("common", file("common"))
Code in `common/src/main/scala/` is available for both the `macro` and `main` projects to use.
Distribution
============
To include the macro code with the main code, add the binary and source mappings from the macro subproject to the main project.
For example, the `main` Project definition above would now look like:
::
lazy val main = Project("main", file(".")) dependsOn(macroSub) settings(
// include the macro classes and resources in the main jar
mappings in (Compile, packageBin) ++= mappings.in(macroSub, Compile, packageBin).value,
// include the macro sources in the main source jar
mappings in (Compile, packageSrc) ++= mappings.in(macroSub, Compile, packageSrc).value
)
You may wish to disable publishing the macro implementation.
This is done by overriding :key:`publish` and :key:`publishLocal` to do nothing:
::
lazy val macroSub = Project("macro", file("macro")) settings(
publish := {},
publishLocal := {}
)
The techniques described here may also be used for the common interface described in the previous section.

View File

@ -1,125 +0,0 @@
=============
Mapping Files
=============
Tasks like :key:`package`, :key:`packageSrc`, and :key:`packageDoc` accept
mappings of type `Seq[(File, String)]` from an input file to the path
to use in the resulting artifact (jar). Similarly, tasks that copy files
accept mappings of type `Seq[(File, File)]` from an input file to the
destination file. There are some methods on
`PathFinder <../../api/sbt/PathFinder.html>`_
and `Path <../../api/sbt/Path$.html>`_
that can be useful for constructing the `Seq[(File, String)]` or
`Seq[(File, File)]` sequences.
A common way of making this sequence is to start with a `PathFinder`
or `Seq[File]` (which is implicitly convertible to `PathFinder`) and
then call the `pair` method. See the
`PathFinder <../../api/sbt/PathFinder.html>`_
API for details, but essentially this method accepts a function
`File => Option[String]` or `File => Option[File]` that is used to
generate mappings.
Relative to a directory
-----------------------
The `Path.relativeTo` method is used to map a `File` to its path
`String` relative to a base directory or directories. The
`relativeTo` method accepts a base directory or sequence of base
directories to relativize an input file against. The first directory
that is an ancestor of the file is used in the case of a sequence of
base directories.
For example:
::
import Path.relativeTo
val files: Seq[File] = file("/a/b/C.scala") :: Nil
val baseDirectories: Seq[File] = file("/a") :: Nil
val mappings: Seq[(File,String)] = files pair relativeTo(baseDirectories)
val expected = (file("/a/b/C.scala") -> "b/C.scala") :: Nil
assert( mappings == expected )
Rebase
------
The `Path.rebase` method relativizes an input file against one or more
base directories (the first argument) and then prepends a base String or
File (the second argument) to the result. As with `relativeTo`, the
first base directory that is an ancestor of the input file is used in
the case of multiple base directories.
For example, the following demonstrates building a
`Seq[(File, String)]` using `rebase`:
::
import Path.rebase
val files: Seq[File] = file("/a/b/C.scala") :: Nil
val baseDirectories: Seq[File] = file("/a") :: Nil
val mappings: Seq[(File,String)] = files pair rebase(baseDirectories, "pre/")
val expected = (file("/a/b/C.scala") -> "pre/b/C.scala" ) :: Nil
assert( mappings == expected )
Or, to build a `Seq[(File, File)]`:
::
import Path.rebase
val files: Seq[File] = file("/a/b/C.scala") :: Nil
val baseDirectories: Seq[File] = file("/a") :: Nil
val newBase: File = file("/new/base")
val mappings: Seq[(File,File)] = files pair rebase(baseDirectories, newBase)
val expected = (file("/a/b/C.scala") -> file("/new/base/b/C.scala") ) :: Nil
assert( mappings == expected )
Flatten
-------
The `Path.flat` method provides a function that maps a file to the
last component of the path (its name). For a File to File mapping, the
input file is mapped to a file with the same name in a given target
directory. For example:
::
import Path.flat
val files: Seq[File] = file("/a/b/C.scala") :: Nil
val mappings: Seq[(File,String)] = files pair flat
val expected = (file("/a/b/C.scala") -> "C.scala" ) :: Nil
assert( mappings == expected )
To build a `Seq[(File, File)]` using `flat`:
::
import Path.flat
val files: Seq[File] = file("/a/b/C.scala") :: Nil
val newBase: File = file("/new/base")
val mappings: Seq[(File,File)] = files pair flat(newBase)
val expected = (file("/a/b/C.scala") -> file("/new/base/C.scala") ) :: Nil
assert( mappings == expected )
Alternatives
------------
To try to apply several alternative mappings for a file, use `|`,
which is implicitly added to a function of type `A => Option[B]`. For
example, to try to relativize a file against some base directories but
fall back to flattening:
::
import Path.relativeTo
val files: Seq[File] = file("/a/b/C.scala") :: file("/zzz/D.scala") :: Nil
val baseDirectories: Seq[File] = file("/a") :: Nil
val mappings: Seq[(File,String)] = files pair ( relativeTo(baseDirectories) | flat )
val expected = (file("/a/b/C.scala") -> "b/C.scala") ) :: (file("/zzz/D.scala") -> "D.scala") ) :: Nil
assert( mappings == expected )

View File

@ -1,128 +0,0 @@
===========================
Migrating from 0.7 to 0.10+
===========================
The assumption here is that you are familiar with sbt 0.7 but new to sbt |version|.
sbt |version|'s many new capabilities can be a bit overwhelming, but this
page should help you migrate to |version| with a minimum of fuss.
Why move to |version|?
----------------------
1. Faster builds (because it is smarter at re-compiling only what it
must)
2. Easier configuration. For simple projects a single `build.sbt` file
in your root directory is easier to create than
`project/build/MyProject.scala` was.
3. No more `lib_managed` directory, reducing disk usage and avoiding
backup and version control hassles.
4. :key:`update` is now much faster and it's invoked automatically by sbt.
5. Terser output. (Yet you can ask for more details if something goes
wrong.)
Step 1: Read the Getting Started Guide for sbt |version|
========================================================
Reading the :doc:`Getting Started Guide </Getting-Started/Welcome>` will
probably save you a lot of confusion.
Step 2: Install sbt |release|
=============================
Download sbt |version| as described on :doc:`the setup page </Getting-Started/Setup>`.
You can run |version| the same way that you run 0.7.x, either simply:
.. code-block:: console
java -jar sbt-launch.jar
Or (as most users do) with a shell script, as described on
:doc:`the setup page </Getting-Started/Setup>`.
For more details see :doc:`the setup page </Getting-Started/Setup>`.
Step 3: A technique for switching an existing project
=====================================================
Here is a technique for switching an existing project to |version| while
retaining the ability to switch back again at will. Some builds, such as
those with subprojects, are not suited for this technique, but if you
learn how to transition a simple project it will help you do a more
complex one next.
Preserve `project/` for 0.7.x project
---------------------------------------
Rename your `project/` directory to something like `project-old`.
This will hide it from sbt |version| but keep it in case you want to switch
back to 0.7.x.
Create `build.sbt` for |version|
----------------------------------
Create a `build.sbt` file in the root directory of your project. See
:doc:`.sbt build definition </Getting-Started/Basic-Def>` in the Getting
Started Guide, and for simple examples :doc:`/Examples/Quick-Configuration-Examples`.
If you have a simple project then converting your existing project file
to this format is largely a matter of re-writing your dependencies and
maven archive declarations in a modified yet familiar syntax.
This `build.sbt` file combines aspects of the old
`project/build/ProjectName.scala` and `build.properties` files. It
looks like a property file, yet contains Scala code in a special format.
A `build.properties` file like:
.. code-block:: text
#Project properties
#Fri Jan 07 15:34:00 GMT 2011
project.organization=org.myproject
project.name=My Project
sbt.version=0.7.7
project.version=1.0
def.scala.version=2.7.7
build.scala.versions=2.8.1
project.initialize=false
Now becomes part of your `build.sbt` file with lines like:
::
name := "My Project"
version := "1.0"
organization := "org.myproject"
scalaVersion := "2.9.2"
Currently, a `project/build.properties` is still needed to explicitly
select the sbt version. For example:
.. parsed-literal::
sbt.version=\ |release|
Run sbt |version|
-----------------
Now launch sbt. If you're lucky it works and you're done. For help
debugging, see below.
Switching back to sbt 0.7.x
---------------------------
If you get stuck and want to switch back, you can leave your
`build.sbt` file alone. sbt 0.7.x will not understand or notice it.
Just rename your |version| `project` directory to something like
`project10` and rename the backup of your old project from
`project-old` to `project` again.
FAQs
====
There's a section in the :doc:`FAQ </faq>` about migration from 0.7 that covers
several other important points.

View File

@ -1,340 +0,0 @@
==================
Parallel Execution
==================
Task ordering
=============
Task ordering is specified by declaring a task's inputs. Correctness of
execution requires correct input declarations. For example, the
following two tasks do not have an ordering specified:
::
write := IO.write(file("/tmp/sample.txt"), "Some content.")
read := IO.read(file("/tmp/sample.txt"))
sbt is free to execute `write` first and then `read`, `read` first
and then `write`, or `read` and `write` simultaneously. Execution
of these tasks is non-deterministic because they share a file. A correct
declaration of the tasks would be:
::
write := {
val f = file("/tmp/sample.txt")
IO.write(f, "Some content.")
f
}
read := IO.read(write.value)
This establishes an ordering: `read` must run after `write`. We've
also guaranteed that `read` will read from the same file that
`write` created.
Practical constraints
=====================
Note: The feature described in this section is experimental. The default
configuration of the feature is subject to change in particular.
Background
----------
Declaring inputs and dependencies of a task ensures the task is properly
ordered and that code executes correctly. In practice, tasks share
finite hardware and software resources and can require control over
utilization of these resources. By default, sbt executes tasks in
parallel (subject to the ordering constraints already described) in an
effort to utilize all available processors. Also by default, each test
class is mapped to its own task to enable executing tests in parallel.
Prior to sbt 0.12, user control over this process was restricted to:
1. Enabling or disabling all parallel execution
(`parallelExecution := false`, for example).
2. Enabling or disabling mapping tests to their own tasks
(`parallelExecution in Test := false`, for example).
(Although never exposed as a setting, the maximum number of tasks
running at a given time was internally configurable as well.)
The second configuration mechanism described above only selected between
running all of a project's tests in the same task or in separate tasks.
Each project still had a separate task for running its tests and so test
tasks in separate projects could still run in parallel if overall
execution was parallel. There was no way to restriction execution such
that only a single test out of all projects executed.
Configuration
-------------
sbt 0.12.0 introduces a general infrastructure for restricting task
concurrency beyond the usual ordering declarations. There are two parts
to these restrictions.
1. A task is tagged in order to classify its purpose and resource
utilization. For example, the :key:`compile` task may be tagged as
`Tags.Compile` and `Tags.CPU`.
2. A list of rules restrict the tasks that may execute concurrently. For
example, `Tags.limit(Tags.CPU, 4)` would allow up to four
computation-heavy tasks to run at a time.
The system is thus dependent on proper tagging of tasks and then on a
good set of rules.
Tagging Tasks
~~~~~~~~~~~~~
In general, a tag is associated with a weight that represents the task's
relative utilization of the resource represented by the tag. Currently,
this weight is an integer, but it may be a floating point in the future.
`Initialize[Task[T]]` defines two methods for tagging the constructed
Task: `tag` and `tagw`. The first method, `tag`, fixes the weight
to be 1 for the tags provided to it as arguments. The second method,
`tagw`, accepts pairs of tags and weights. For example, the following
associates the `CPU` and `Compile` tags with the :key:`compile` task
(with a weight of 1).
::
def myCompileTask = Def.task { ... } tag(Tags.CPU, Tags.Compile)
compile := myCompileTask.value
Different weights may be specified by passing tag/weight pairs to
`tagw`:
::
def downloadImpl = Def.task { ... } tagw(Tags.Network -> 3)
download := downloadImpl.value
Defining Restrictions
~~~~~~~~~~~~~~~~~~~~~
Once tasks are tagged, the :key:`concurrentRestrictions` setting sets
restrictions on the tasks that may be concurrently executed based on the
weighted tags of those tasks. This is necessarily a global set of rules,
so it must be scoped `in Global`. For example,
::
concurrentRestrictions in Global := Seq(
Tags.limit(Tags.CPU, 2),
Tags.limit(Tags.Network, 10),
Tags.limit(Tags.Test, 1),
Tags.limitAll( 15 )
)
The example limits:
- the number of CPU-using tasks to be no more than 2
- the number of tasks using the network to be no more than 10
- test execution to only one test at a time across all projects
- the total number of tasks to be less than or equal to 15
Note that these restrictions rely on proper tagging of tasks. Also, the
value provided as the limit must be at least 1 to ensure every task is
able to be executed. sbt will generate an error if this condition is not
met.
Most tasks won't be tagged because they are very short-lived. These
tasks are automatically assigned the label `Untagged`. You may want to
include these tasks in the CPU rule by using the `limitSum` method.
For example:
::
...
Tags.limitSum(2, Tags.CPU, Tags.Untagged)
...
Note that the limit is the first argument so that tags can be provided
as varargs.
Another useful convenience function is `Tags.exclusive`. This
specifies that a task with the given tag should execute in isolation. It
starts executing only when no other tasks are running (even if they have
the exclusive tag) and no other tasks may start execution until it
completes. For example, a task could be tagged with a custom tag
`Benchmark` and a rule configured to ensure such a task is executed by
itself:
::
...
Tags.exclusive(Benchmark)
...
Finally, for the most flexibility, you can specify a custom function of
type `Map[Tag,Int] => Boolean`. The `Map[Tag,Int]` represents the
weighted tags of a set of tasks. If the function returns `true`, it
indicates that the set of tasks is allowed to execute concurrently. If
the return value is `false`, the set of tasks will not be allowed to
execute concurrently. For example, `Tags.exclusive(Benchmark)` is
equivalent to the following:
::
...
Tags.customLimit { (tags: Map[Tag,Int]) =>
val exclusive = tags.getOrElse(Benchmark, 0)
// the total number of tasks in the group
val all = tags.getOrElse(Tags.All, 0)
// if there are no exclusive tasks in this group, this rule adds no restrictions
exclusive == 0 ||
// If there is only one task, allow it to execute.
all == 1
}
...
There are some basic rules that custom functions must follow, but the
main one to be aware of in practice is that if there is only one task,
it must be allowed to execute. sbt will generate a warning if the user
defines restrictions that prevent a task from executing at all and will
then execute the task anyway.
Built-in Tags and Rules
~~~~~~~~~~~~~~~~~~~~~~~
Built-in tags are defined in the `Tags` object. All tags listed below
must be qualified by this object. For example, `CPU` refers to the
`Tags.CPU` value.
The built-in semantic tags are:
- `Compile` - describes a task that compiles sources.
- `Test` - describes a task that performs a test.
- `Publish`
- `Update`
- `Untagged` - automatically added when a task doesn't explicitly
define any tags.
- `All`- automatically added to every task.
The built-in resource tags are:
- `Network` - describes a task's network utilization.
- `Disk` - describes a task's filesystem utilization.
- `CPU` - describes a task's computational utilization.
The tasks that are currently tagged by default are:
- :key:`compile` : `Compile`, `CPU`
- :key:`test` : `Test`
- :key:`update` : `Update`, `Network`
- :key:`publish`, :key:`publishLocal` : `Publish`, `Network`
Of additional note is that the default :key:`test` task will propagate its
tags to each child task created for each test class.
The default rules provide the same behavior as previous versions of sbt:
::
concurrentRestrictions in Global := {
val max = Runtime.getRuntime.availableProcessors
Tags.limitAll(if(parallelExecution.value) max else 1) :: Nil
}
As before, `parallelExecution in Test` controls whether tests are
mapped to separate tasks. To restrict the number of concurrently
executing tests in all projects, use:
::
concurrentRestrictions in Global += Tags.limit(Tags.Test, 1)
Custom Tags
-----------
To define a new tag, pass a String to the `Tags.Tag` method. For
example:
::
val Custom = Tags.Tag("custom")
Then, use this tag as any other tag. For example:
::
def aImpl = Def.task { ... } tag(Custom)
aCustomTask := aImpl.value
concurrentRestrictions in Global +=
Tags.limit(Custom, 1)
Future work
-----------
This is an experimental feature and there are several aspects that may
change or require further work.
Tagging Tasks
~~~~~~~~~~~~~
Currently, a tag applies only to the immediate computation it is defined
on. For example, in the following, the second compile definition has no
tags applied to it. Only the first computation is labeled.
::
def myCompileTask = Def.task { ... } tag(Tags.CPU, Tags.Compile)
compile := myCompileTask.value
compile := {
val result = compile.value
... do some post processing ...
}
Is this desirable? expected? If not, what is a better, alternative
behavior?
Fractional weighting
~~~~~~~~~~~~~~~~~~~~
Weights are currently `int`\ s, but could be changed to be
`double`\ s if fractional weights would be useful. It is important to
preserve a consistent notion of what a weight of 1 means so that
built-in and custom tasks share this definition and useful rules can be
written.
Default Behavior
~~~~~~~~~~~~~~~~
User feedback on what custom rules work for what workloads will help
determine a good set of default tags and rules.
Adjustments to Defaults
~~~~~~~~~~~~~~~~~~~~~~~
Rules should be easier to remove or redefine, perhaps by giving them
names. As it is, rules must be appended or all rules must be completely
redefined. Also, tags can only be defined for tasks at the original
definition site when using the `:=` syntax.
For removing tags, an implementation of `removeTag` should follow from
the implementation of `tag` in a straightforward manner.
Other characteristics
~~~~~~~~~~~~~~~~~~~~~
The system of a tag with a weight was selected as being reasonably
powerful and flexible without being too complicated. This selection is
not fundamental and could be enhance, simplified, or replaced if
necessary. The fundamental interface that describes the constraints the
system must work within is `sbt.ConcurrentRestrictions`. This
interface is used to provide an intermediate scheduling queue between
task execution (`sbt.Execute`) and the underlying thread-based
parallel execution service (`java.util.concurrent.CompletionService`).
This intermediate queue restricts new tasks from being forwarded to the
`j.u.c.CompletionService` according to the
`sbt.ConcurrentRestrictions` implementation. See the
`sbt.ConcurrentRestrictions <https://github.com/sbt/sbt/blob/v0.12.0/tasks/ConcurrentRestrictions.scala>`_
API documentation for details.

View File

@ -1,172 +0,0 @@
==========================
Parsing and tab completion
==========================
This page describes the parser combinators in sbt. These parser
combinators are typically used to parse user input and provide tab
completion for :doc:`/Extending/Input-Tasks` and :doc:`/Extending/Commands`. If you are already
familiar with Scala's parser combinators, the methods are mostly the
same except that their arguments are strict. There are two additional
methods for controlling tab completion that are discussed at the end of
the section.
Parser combinators build up a parser from smaller parsers. A
`Parser[T]` in its most basic usage is a function
`String => Option[T]`. It accepts a `String` to parse and produces a
value wrapped in `Some` if parsing succeeds or `None` if it fails.
Error handling and tab completion make this picture more complicated,
but we'll stick with `Option` for this discussion.
The following examples assume the imports: ::
import sbt._
import complete.DefaultParsers._
Basic parsers
-------------
The simplest parser combinators match exact inputs:
::
// A parser that succeeds if the input is 'x', returning the Char 'x'
// and failing otherwise
val singleChar: Parser[Char] = 'x'
// A parser that succeeds if the input is "blue", returning the String "blue"
// and failing otherwise
val litString: Parser[String] = "blue"
In these examples, implicit conversions produce a literal `Parser`
from a `Char` or `String`. Other basic parser constructors are the
`charClass`, `success` and `failure` methods:
::
// A parser that succeeds if the character is a digit, returning the matched Char
// The second argument, "digit", describes the parser and is used in error messages
val digit: Parser[Char] = charClass( (c: Char) => c.isDigit, "digit")
// A parser that produces the value 3 for an empty input string, fails otherwise
val alwaysSucceed: Parser[Int] = success( 3 )
// Represents failure (always returns None for an input String).
// The argument is the error message.
val alwaysFail: Parser[Nothing] = failure("Invalid input.")
Built-in parsers
----------------
sbt comes with several built-in parsers defined in `sbt.complete.DefaultParsers <../../api/sbt/complete/DefaultParsers$.html>`_.
Some commonly used built-in parsers are:
* `Space`, `NotSpace`, `OptSpace`, and `OptNotSpace` for parsing spaces or non-spaces, required or not.
* `StringBasic` for parsing text that may be quoted.
* `IntBasic` for parsing a signed Int value.
* `Digit` and `HexDigit` for parsing a single decimal or hexadecimal digit.
* `Bool` for parsing a `Boolean` value
See the `DefaultParsers API <../../api/sbt/complete/DefaultParsers$.html>`_ for details.
Combining parsers
-----------------
We build on these basic parsers to construct more interesting parsers.
We can combine parsers in a sequence, choose between parsers, or repeat
a parser.
::
// A parser that succeeds if the input is "blue" or "green",
// returning the matched input
val color: Parser[String] = "blue" | "green"
// A parser that matches either "fg" or "bg"
val select: Parser[String] = "fg" | "bg"
// A parser that matches "fg" or "bg", a space, and then the color, returning the matched values.
// ~ is an alias for Tuple2.
val setColor: Parser[String ~ Char ~ String] =
select ~ ' ' ~ color
// Often, we don't care about the value matched by a parser, such as the space above
// For this, we can use ~> or <~, which keep the result of
// the parser on the right or left, respectively
val setColor2: Parser[String ~ String] = select ~ (' ' ~> color)
// Match one or more digits, returning a list of the matched characters
val digits: Parser[Seq[Char]] = charClass(_.isDigit, "digit").+
// Match zero or more digits, returning a list of the matched characters
val digits0: Parser[Seq[Char]] = charClass(_.isDigit, "digit").*
// Optionally match a digit
val optDigit: Parser[Option[Char]] = charClass(_.isDigit, "digit").?
Transforming results
--------------------
A key aspect of parser combinators is transforming results along the way
into more useful data structures. The fundamental methods for this are
`map` and `flatMap`. Here are examples of `map` and some
convenience methods implemented on top of `map`.
::
// Apply the `digits` parser and apply the provided function to the matched
// character sequence
val num: Parser[Int] = digits map { (chars: Seq[Char]) => chars.mkString.toInt }
// Match a digit character, returning the matched character or return '0' if the input is not a digit
val digitWithDefault: Parser[Char] = charClass(_.isDigit, "digit") ?? '0'
// The previous example is equivalent to:
val digitDefault: Parser[Char] =
charClass(_.isDigit, "digit").? map { (d: Option[Char]) => d getOrElse '0' }
// Succeed if the input is "blue" and return the value 4
val blue = "blue" ^^^ 4
// The above is equivalent to:
val blueM = "blue" map { (s: String) => 4 }
Controlling tab completion
--------------------------
Most parsers have reasonable default tab completion behavior. For
example, the string and character literal parsers will suggest the
underlying literal for an empty input string. However, it is impractical
to determine the valid completions for `charClass`, since it accepts
an arbitrary predicate. The `examples` method defines explicit
completions for such a parser:
::
val digit = charClass(_.isDigit, "digit").examples("0", "1", "2")
Tab completion will use the examples as suggestions. The other method
controlling tab completion is `token`. The main purpose of `token`
is to determine the boundaries for suggestions. For example, if your
parser is:
::
("fg" | "bg") ~ ' ' ~ ("green" | "blue")
then the potential completions on empty input are:
`console fg green fg blue bg green bg blue`
Typically, you want to suggest smaller segments or the number of
suggestions becomes unmanageable. A better parser is:
::
token( ("fg" | "bg") ~ ' ') ~ token("green" | "blue")
Now, the initial suggestions would be (with \_ representing a space):
`console fg_ bg_`
Be careful not to overlap or nest tokens, as in
`token("green" ~ token("blue"))`. The behavior is unspecified (and
should generate an error in the future), but typically the outer most
token definition will be used.

View File

@ -1,260 +0,0 @@
=====
Paths
=====
This page describes files, sequences of files, and file filters. The
base type used is
`java.io.File <http://download.oracle.com/javase/6/docs/api/java/io/File.html>`_,
but several methods are augmented through implicits:
- `RichFile <../../api/sbt/RichFile.html>`_
adds methods to `File`
- `PathFinder <../../api/sbt/PathFinder.html>`_
adds methods to `File` and `Seq[File]`
- `Path <../../api/sbt/Path$.html>`_ and
`IO <../../api/sbt/IO$.html>`_ provide
general methods related to files and I/O.
Constructing a File
-------------------
sbt 0.10+ uses
`java.io.File <http://download.oracle.com/javase/6/docs/api/java/io/File.html>`_
to represent a file instead of the custom `sbt.Path` class that was in
sbt 0.7 and earlier. sbt defines the alias `File` for `java.io.File`
so that an extra import is not necessary. The `file` method is an
alias for the single-argument `File` constructor to simplify
constructing a new file from a String:
::
val source: File = file("/home/user/code/A.scala")
Additionally, sbt augments File with a `/` method, which is an alias
for the two-argument `File` constructor for building up a path:
::
def readme(base: File): File = base / "README"
Relative files should only be used when defining the base directory of a
`Project`, where they will be resolved properly.
::
val root = Project("root", file("."))
Elsewhere, files should be absolute or be built up from an absolute base
`File`. The :key:`baseDirectory` setting defines the base directory of
the build or project depending on the scope.
For example, the following setting sets the unmanaged library directory
to be the "custom\_lib" directory in a project's base directory:
::
unmanagedBase := baseDirectory.value /"custom_lib"
Or, more concisely:
::
unmanagedBase := baseDirectory.value /"custom_lib"
This setting sets the location of the shell history to be in the base
directory of the build, irrespective of the project the setting is
defined in:
::
historyPath := Some( (baseDirectory in ThisBuild).value / ".history"),
Path Finders
------------
A `PathFinder` computes a `Seq[File]` on demand. It is a way to
build a sequence of files. There are several methods that augment
`File` and `Seq[File]` to construct a `PathFinder`. Ultimately,
call `get` on the resulting `PathFinder` to evaluate it and get back
a `Seq[File]`.
Selecting descendants
~~~~~~~~~~~~~~~~~~~~~
The `**` method accepts a `java.io.FileFilter` and selects all files
matching that filter.
::
def scalaSources(base: File): PathFinder = (base / "src") ** "*.scala"
get
~~~
This selects all files that end in `.scala` that are in `src` or a
descendent directory. The list of files is not actually evaluated until
`get` is called:
::
def scalaSources(base: File): Seq[File] = {
val finder: PathFinder = (base / "src") ** "*.scala"
finder.get
}
If the filesystem changes, a second call to `get` on the same
`PathFinder` object will reflect the changes. That is, the `get`
method reconstructs the list of files each time. Also, `get` only
returns `File`\ s that existed at the time it was called.
Selecting children
~~~~~~~~~~~~~~~~~~
Selecting files that are immediate children of a subdirectory is done
with a single `*`:
::
def scalaSources(base: File): PathFinder = (base / "src") * "*.scala"
This selects all files that end in `.scala` that are in the `src`
directory.
Existing files only
~~~~~~~~~~~~~~~~~~~
If a selector, such as `/`, `**`, or `*`, is used on a path that
does not represent a directory, the path list will be empty:
::
def emptyFinder(base: File) = (base / "lib" / "ivy.jar") * "not_possible"
Name Filter
~~~~~~~~~~~
The argument to the child and descendent selectors `*` and `**` is
actually a `NameFilter`. An implicit is used to convert a `String`
to a `NameFilter` that interprets `*` to represent zero or more
characters of any value. See the Name Filters section below for more
information.
Combining PathFinders
~~~~~~~~~~~~~~~~~~~~~
Another operation is concatenation of `PathFinder`\ s:
::
def multiPath(base: File): PathFinder =
(base / "src" / "main") +++
(base / "lib") +++
(base / "target" / "classes")
When evaluated using `get`, this will return `src/main/`, `lib/`,
and `target/classes/`. The concatenated finder supports all standard
methods. For example,
::
def jars(base: File): PathFinder =
(base / "lib" +++ base / "target") * "*.jar"
selects all jars directly in the "lib" and "target" directories.
A common problem is excluding version control directories. This can be
accomplished as follows:
::
def sources(base: File) =
( (base / "src") ** "*.scala") --- ( (base / "src") ** ".svn" ** "*.scala")
The first selector selects all Scala sources and the second selects all
sources that are a descendent of a `.svn` directory. The `---`
method removes all files returned by the second selector from the
sequence of files returned by the first selector.
Filtering
~~~~~~~~~
There is a `filter` method that accepts a predicate of type
`File => Boolean` and is non-strict:
::
// selects all directories under "src"
def srcDirs(base: File) = ( (base / "src") ** "*") filter { _.isDirectory }
// selects archives (.zip or .jar) that are selected by 'somePathFinder'
def archivesOnly(base: PathFinder) = base filter ClasspathUtilities.isArchive
Empty PathFinder
~~~~~~~~~~~~~~~~
`PathFinder.empty` is a `PathFinder` that returns the empty sequence
when `get` is called:
::
assert( PathFinder.empty.get == Seq[File]() )
PathFinder to String conversions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Convert a `PathFinder` to a String using one of the following methods:
- `toString` is for debugging. It puts the absolute path of each
component on its own line.
- `absString` gets the absolute paths of each component and separates
them by the platform's path separator.
- `getPaths` produces a `Seq[String]` containing the absolute paths
of each component
Mappings
~~~~~~~~
The packaging and file copying methods in sbt expect values of type
`Seq[(File,String)]` and `Seq[(File,File)]`, respectively. These are
mappings from the input file to its (String) path in the jar or its
(File) destination. This approach replaces the relative path approach
(using the `##` method) from earlier versions of sbt.
Mappings are discussed in detail on the :doc:`Mapping-Files` page.
.. _file-filter:
File Filters
------------
The argument to `*` and `**` is of type
`java.io.FileFilter <http://download.oracle.com/javase/6/docs/api/java/io/FileFilter.html>`_.
sbt provides combinators for constructing `FileFilter`\ s.
First, a String may be implicitly converted to a `FileFilter`. The
resulting filter selects files with a name matching the string, with a
`*` in the string interpreted as a wildcard. For example, the
following selects all Scala sources with the word "Test" in them:
::
def testSrcs(base: File): PathFinder = (base / "src") * "*Test*.scala"
There are some useful combinators added to `FileFilter`. The `||`
method declares alternative `FileFilter`\ s. The following example
selects all Java or Scala source files under "src":
::
def sources(base: File): PathFinder = (base / "src") ** ("*.scala" || "*.java")
The `--` method excludes a files matching a second filter from the
files matched by the first:
::
def imageResources(base: File): PathFinder =
(base/"src"/"main"/"resources") * ("*.png" -- "logo.png")
This will get `right.png` and `left.png`, but not `logo.png`, for
example.

View File

@ -1,14 +0,0 @@
==========================
Plugins and Best Practices
==========================
This part of the documentation has pages documenting particular sbt topics in detail.
Before reading anything in here, you will need the information in the
:doc:`Getting Started Guide </Getting-Started/Welcome>` as a foundation.
.. toctree::
:maxdepth: 2
Best-Practices
/Extending/Plugins-Best-Practices
/Extending/Plugins

View File

@ -1,131 +0,0 @@
==================
External Processes
==================
Usage
=====
`sbt` includes a process library to simplify working with external
processes. The library is available without import in build definitions
and at the interpreter started by the :doc:`consoleProject <Console-Project>` task.
To run an external command, follow it with an exclamation mark `!`:
::
"find project -name *.jar" !
An implicit converts the `String` to `sbt.ProcessBuilder`, which
defines the `!` method. This method runs the constructed command,
waits until the command completes, and returns the exit code.
Alternatively, the `run` method defined on `ProcessBuilder` runs the
command and returns an instance of `sbt.Process`, which can be used to
`destroy` the process before it completes. With no arguments, the
`!` method sends output to standard output and standard error. You can
pass a `Logger` to the `!` method to send output to the `Logger`:
::
"find project -name *.jar" ! log
Two alternative implicit conversions are from `scala.xml.Elem` or
`List[String]` to `sbt.ProcessBuilder`. These are useful for
constructing commands. An example of the first variant from the android
plugin:
::
<x> {dxPath.absolutePath} --dex --output={classesDexPath.absolutePath} {classesMinJarPath.absolutePath}</x> !
If you need to set the working directory or modify the environment, call
`sbt.Process` explicitly, passing the command sequence (command and
argument list) or command string first and the working directory second.
Any environment variables can be passed as a vararg list of key/value
String pairs.
::
Process("ls" :: "-l" :: Nil, Path.userHome, "key1" -> value1, "key2" -> value2) ! log
Operators are defined to combine commands. These operators start with
`#` in order to keep the precedence the same and to separate them from
the operators defined elsewhere in `sbt` for filters. In the following
operator definitions, `a` and `b` are subcommands.
- `a #&& b` Execute `a`. If the exit code is nonzero, return that
exit code and do not execute `b`. If the exit code is zero, execute
`b` and return its exit code.
- `a #|| b` Execute `a`. If the exit code is zero, return zero for
the exit code and do not execute `b`. If the exit code is nonzero,
execute `b` and return its exit code.
- `a #| b` Execute `a` and `b`, piping the output of `a` to the
input of `b`.
There are also operators defined for redirecting output to `File`\ s
and input from `File`\ s and `URL`\ s. In the following definitions,
`url` is an instance of `URL` and `file` is an instance of
`File`.
- `a #< url` or `url #> a` Use `url` as the input to `a`. `a`
may be a `File` or a command.
- `a #< file` or `file #> a` Use `file` as the input to `a`.
`a` may be a `File` or a command.
- `a #> file` or `file #< a` Write the output of `a` to `file`.
`a` may be a `File`, `URL`, or a command.
- `a #>> file` or `file #<< a` Append the output of `a` to
`file`. `a` may be a `File`, `URL`, or a command.
There are some additional methods to get the output from a forked
process into a `String` or the output lines as a `Stream[String]`.
Here are some examples, but see the `ProcessBuilder
API <../../api/sbt/ProcessBuilder.html>`_
for details.
::
val listed: String = "ls" !!
val lines2: Stream[String] = "ls" lines_!
Finally, there is a `cat` method to send the contents of `File`\ s
and `URL`\ s to standard output.
Examples
--------
Download a `URL` to a `File`:
::
url("http://databinder.net/dispatch/About") #> file("About.html") !
or
file("About.html") #< url("http://databinder.net/dispatch/About") !
Copy a `File`:
::
file("About.html") #> file("About_copy.html") !
or
file("About_copy.html") #< file("About.html") !
Append the contents of a `URL` to a `File` after filtering through
`grep`:
::
url("http://databinder.net/dispatch/About") #> "grep JSON" #>> file("About_JSON") !
or
file("About_JSON") #<< ( "grep JSON" #< url("http://databinder.net/dispatch/About") ) !
Search for uses of `null` in the source directory:
::
"find src -name *.scala -exec grep null {} ;" #| "xargs test -z" #&& "echo null-free" #|| "echo null detected" !
Use `cat`::
val spde = url("http://technically.us/spde/About")
val dispatch = url("http://databinder.net/dispatch/About")
val build = file("project/build.properties")
cat(spde, dispatch, build) #| "grep -i scala" !

View File

@ -1,133 +0,0 @@
==================
Proxy Repositories
==================
It's often the case that users wish to set up a maven/ivy proxy
repository inside their corporate firewall, and have developer
sbt instances resolve artifacts through such a proxy. Let's detail
what exact changes must be made for this to work.
Overview
========
The situation arises when many developers inside an organization
are attempting to resolve artifacts. Each developer's machine
will hit the internet and download an artifact, regardless of
whether or not another on the team has already done so. Proxy
repositories provide a single point of remote download for an
organization. In addition to control and security concerns,
Proxy repositories are primarily important for increased speed
across a team.
.. image:: proxy-cloud-setup.png
There are many good proxy repository solutions out there, with
the big three being (in alphabetical order):
* Archiva_
* Artifactory_
* Nexus_
Once you have a proxy repository installed and configured,
then it's time to configure sbt for your needs. Read the
note at the bottom about proxy issues with ivy repositories.
sbt Configuration
=================
sbt requires configuration in two places to make use of a
proxy repository. The first is the `~/.sbt/repositories`
file, and the second is the launcher script.
`~/.sbt/repositories`
-----------------------
The repositories file is an external configuration for the Launcher.
The exact syntax for the configuration file is detailed in the
:doc:`/Detailed-Topics/Launcher`.
Here's an example config:
.. code-block:: ini
[repositories]
local
my-ivy-proxy-releases: http://repo.company.com/ivy-releases/, [organization]/[module]/(scala_[scalaVersion]/)(sbt_[sbtVersion]/)[revision]/[type]s/[artifact](-[classifier]).[ext]
my-maven-proxy-releases: http://repo.company.com/maven-releases/
This example configuration has three repositories configured for sbt.
The first resolver is `local`, and is used so that artifacts pushed
using `publish-local` will be seen in other sbt projects.
The second resolver is `my-ivy-proxy-releases`. This repository
is used to resolve sbt *itself* from the company proxy repository,
as well as any sbt plugins that may be required. Note that the
ivy resolver pattern is important, make sure that yours matches the
one shown or you may not be able to resolve sbt plugins.
The final resolver is `my-maven-proxy-releases`. This repository
is a proxy for all standard maven repositories, including
maven central.
Launcher Script
---------------------
The sbt launcher supports two configuration options that
allow the usage of proxy repositories. The first is the
`sbt.override.build.repos` setting and the second is the
`sbt.repository.config` setting.
`sbt.override.build.repos`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This setting is used to specify that all sbt project added resolvers
should be ignored in favor of those configured in the `repositories`
configuration. Using this with a properly configured
`~/.sbt/repositories` file leads to only your proxy repository
used for builds.
It is specified like so:
.. code-block:: console
-Dsbt.override.build.repos=true
`sbt.repository.config`
~~~~~~~~~~~~~~~~~~~~~~~~~
If you are unable to create a `~/.sbt/repositories` file, due
to user permission errors or for convenience of developers, you
can modify the sbt start script directly with the following:
.. code-block:: console
-Dsbt.repository.config=<path-to-your-repo-file>
This is only necessary if users do not already have their own default
repository file.
Proxying Ivy Repositories
=========================
The most common mistake made when setting up a proxy repository for
sbt is the attempting to *merge* both *maven* and *ivy* repositories
into the *same* proxy repository. While some repository managers will
allow this, it's not recommended to do so.
Even if your company does not use ivy, sbt uses a custom layout to
handle binary compatibility constraints of its own plugins. To ensure
that these are resolved correctly, simple set up two virtual/proxy
repositories, one for maven and one for ivy.
Here's an example setup:
.. image:: proxy-ivy-mvn-setup.png
.. _Archiva: http://archiva.apache.org/
.. _Artifactory: http://www.jfrog.com/home/v_artifactory_opensource_overview
.. _Nexus: http://www.sonatype.org/nexus/

View File

@ -1,167 +0,0 @@
==========
Publishing
==========
This page describes how to publish your project. Publishing consists of
uploading a descriptor, such as an Ivy file or Maven POM, and artifacts,
such as a jar or war, to a repository so that other projects can specify
your project as a dependency.
The :key:`publish` action is used to publish your project to a remote
repository. To use publishing, you need to specify the repository to
publish to and the credentials to use. Once these are set up, you can
run :key:`publish`.
The :key:`publishLocal` action is used to publish your project to a local
Ivy repository. You can then use this project from other projects on the
same machine.
Define the repository
---------------------
To specify the repository, assign a repository to :key:`publishTo` and
optionally set the publishing style. For example, to upload to Nexus:
::
publishTo := Some("Sonatype Snapshots Nexus" at "https://oss.sonatype.org/content/repositories/snapshots")
To publish to a local repository:
::
publishTo := Some(Resolver.file("file", new File( "path/to/my/maven-repo/releases" )) )
Publishing to the users local maven repository:
::
publishTo := Some(Resolver.file("file", new File(Path.userHome.absolutePath+"/.m2/repository")))
If you're using Maven repositories you will also have to select the
right repository depending on your artifacts: SNAPSHOT versions go to
the /snapshot repository while other versions go to the /releases
repository. Doing this selection can be done by using the value of the
:key:`version` SettingKey:
::
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (version.value.trim.endsWith("SNAPSHOT"))
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
}
Credentials
-----------
There are two ways to specify credentials for such a repository. The
first is to specify them inline:
::
credentials += Credentials("Sonatype Nexus Repository Manager", "nexus.scala-tools.org", "admin", "admin123")
The second and better way is to load them from a file, for example:
::
credentials += Credentials(Path.userHome / ".ivy2" / ".credentials")
The credentials file is a properties file with keys `realm`, `host`,
`user`, and `password`. For example:
.. code-block:: text
realm=Sonatype Nexus Repository Manager
host=nexus.scala-tools.org
user=admin
password=admin123
Cross-publishing
----------------
To support multiple incompatible Scala versions, enable cross building
and do `+ publish` (see :doc:`Cross-Build`). See :doc:`Resolvers` for other
supported repository types.
Published artifacts
-------------------
By default, the main binary jar, a sources jar, and a API documentation
jar are published. You can declare other types of artifacts to publish
and disable or modify the default artifacts. See the :doc:`Artifacts` page
for details.
Modifying the generated POM
---------------------------
When :key:`publishMavenStyle` is `true`, a POM is generated by the
:key:`makePom` action and published to the repository instead of an Ivy
file. This POM file may be altered by changing a few settings. Set
:key:`pomExtra` to provide XML (`scala.xml.NodeSeq`) to insert directly
into the generated pom. For example:
::
pomExtra :=
<licenses>
<license>
<name>Apache 2</name>
<url>http://www.apache.org/licenses/LICENSE-2.0.txt</url>
<distribution>repo</distribution>
</license>
</licenses>
:key:`makePom` adds to the POM any Maven-style repositories you have
declared. You can filter these by modifying :key:`pomRepositoryFilter`,
which by default excludes local repositories. To instead only include
local repositories:
::
pomIncludeRepository := { (repo: MavenRepository) =>
repo.root.startsWith("file:")
}
There is also a :key:`pomPostProcess` setting that can be used to
manipulate the final XML before it is written. It's type is
`Node => Node`.
::
pomPostProcess := { (node: Node) =>
...
}
Publishing Locally
------------------
The :key:`publishLocal` command will publish to the local Ivy repository.
By default, this is in `${user.home}/.ivy2/local`. Other projects on
the same machine can then list the project as a dependency. For example,
if the SBT project you are publishing has configuration parameters like:
::
name := 'My Project'
organization := 'org.me'
version := '0.1-SNAPSHOT'
Then another project can depend on it:
::
libraryDependencies += "org.me" %% "my-project" % "0.1-SNAPSHOT"
The version number you select must end with `SNAPSHOT`, or you must
change the version number each time you publish. Ivy maintains a cache,
and it stores even local projects in that cache. If Ivy already has a
version cached, it will not check the local repository for updates,
unless the version number matches a `changing
pattern <http://ant.apache.org/ivy/history/2.3.0/concept.html#change>`_,
and `SNAPSHOT` is one such pattern.

View File

@ -1,204 +0,0 @@
=========
Resolvers
=========
Maven
-----
Resolvers for Maven2 repositories are added as follows:
.. code-block:: scala
resolvers +=
"Sonatype OSS Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots"
This is the most common kind of user-defined resolvers. The rest of this
page describes how to define other types of repositories.
Predefined
----------
A few predefined repositories are available and are listed below
- `DefaultMavenRepository` This is the main Maven repository at
http://repo1.maven.org/maven2/ and is included by default
- `JavaNet1Repository` This is the Maven 1 repository at
http://download.java.net/maven/1/
For example, to use the `java.net` repository, use the following
setting in your build definition:
::
resolvers += JavaNet1Repository
Predefined repositories will go under Resolver going forward so they are
in one place:
::
Resolver.sonatypeRepo("releases") // Or "snapshots"
Custom
------
sbt provides an interface to the repository types available in Ivy:
file, URL, SSH, and SFTP. A key feature of repositories in Ivy is using
`patterns <http://ant.apache.org/ivy/history/latest-milestone/concept.html#patterns>`_
to configure repositories.
Construct a repository definition using the factory in `sbt.Resolver`
for the desired type. This factory creates a `Repository` object that
can be further configured. The following table contains links to the Ivy
documentation for the repository type and the API documentation for the
factory and repository class. The SSH and SFTP repositories are
configured identically except for the name of the factory. Use
`Resolver.ssh` for SSH and `Resolver.sftp` for SFTP.
.. _Ivy filesystem: http://ant.apache.org/ivy/history/latest-milestone/resolver/filesystem.html
.. _filesystem factory: ../../api/sbt/Resolver$$file$.html
.. _Ivy sftp: http://ant.apache.org/ivy/history/latest-milestone/resolver/sftp.html
.. _FileRepository API: ../../api/sbt/FileRepository.html
.. _sftp factory: ../../api/sbt/Resolver$$Define.html
.. _SftpRepository API: ../../api/sbt/SftpRepository.html
.. _Ivy ssh: http://ant.apache.org/ivy/history/latest-milestone/resolver/ssh.html
.. _ssh factory: ../../api/sbt/Resolver$$Define.html
.. _SshRepository API: ../../api/sbt/SshRepository.html
.. _Ivy url: http://ant.apache.org/ivy/history/latest-milestone/resolver/url.html
.. _url factory: ../../api/sbt/Resolver$$url$.html
.. _URLRepository API: ../../api/sbt/URLRepository.html
========== ================= ================= ===================== =====================
Type Factory Ivy Docs Factory API Repository Class API
========== ================= ================= ===================== =====================
Filesystem `Resolver.file` `Ivy filesystem`_ `filesystem factory`_ `FileRepository API`_
SFTP `Resolver.sftp` `Ivy sftp`_ `sftp factory`_ `SftpRepository API`_
SSH `Resolver.ssh` `Ivy ssh`_ `ssh factory`_ `SshRepository API`_
URL `Resolver.url` `Ivy url`_ `url factory`_ `URLRepository API`_
========== ================= ================= ===================== =====================
Basic Examples
~~~~~~~~~~~~~~
These are basic examples that use the default Maven-style repository
layout.
Filesystem
^^^^^^^^^^
Define a filesystem repository in the `test` directory of the current
working directory and declare that publishing to this repository must be
atomic.
::
resolvers += Resolver.file("my-test-repo", file("test")) transactional()
URL
^^^
Define a URL repository at `"http://example.org/repo-releases/"`.
::
resolvers += Resolver.url("my-test-repo", url("http://example.org/repo-releases/"))
To specify an Ivy repository, use:
::
resolvers += Resolver.url("my-test-repo", url)(Resolver.ivyStylePatterns)
or customize the layout pattern described in the Custom Layout section
below.
SFTP and SSH Repositories
^^^^^^^^^^^^^^^^^^^^^^^^^
The following defines a repository that is served by SFTP from host
`"example.org"`:
::
resolvers += Resolver.sftp("my-sftp-repo", "example.org")
To explicitly specify the port:
::
resolvers += Resolver.sftp("my-sftp-repo", "example.org", 22)
To specify a base path:
::
resolvers += Resolver.sftp("my-sftp-repo", "example.org", "maven2/repo-releases/")
Authentication for the repositories returned by `sftp` and `ssh` can
be configured by the `as` methods.
To use password authentication:
::
resolvers += Resolver.ssh("my-ssh-repo", "example.org") as("user", "password")
or to be prompted for the password:
::
resolvers += Resolver.ssh("my-ssh-repo", "example.org") as("user")
To use key authentication:
::
resolvers += {
val keyFile: File = ...
Resolver.ssh("my-ssh-repo", "example.org") as("user", keyFile, "keyFilePassword")
}
or if no keyfile password is required or if you want to be prompted for
it:
::
resolvers += Resolver.ssh("my-ssh-repo", "example.org") as("user", keyFile)
To specify the permissions used when publishing to the server:
::
resolvers += Resolver.ssh("my-ssh-repo", "example.org") withPermissions("0644")
This is a chmod-like mode specification.
Custom Layout
~~~~~~~~~~~~~
These examples specify custom repository layouts using patterns. The
factory methods accept an `Patterns` instance that defines the
patterns to use. The patterns are first resolved against the base file
or URL. The default patterns give the default Maven-style layout.
Provide a different Patterns object to use a different layout. For
example:
::
resolvers += Resolver.url("my-test-repo", url)( Patterns("[organisation]/[module]/[revision]/[artifact].[ext]") )
You can specify multiple patterns or patterns for the metadata and
artifacts separately. You can also specify whether the repository should
be Maven compatible (as defined by Ivy). See the `patterns
API <../../api/sbt/Patterns$.html>`_ for
the methods to use.
For filesystem and URL repositories, you can specify absolute patterns
by omitting the base URL, passing an empty `Patterns` instance, and
using `ivys` and `artifacts`:
::
resolvers += Resolver.url("my-test-repo") artifacts
"http://example.org/[organisation]/[module]/[revision]/[artifact].[ext]"

View File

@ -1,101 +0,0 @@
====================
Running Project Code
====================
The :key:`run` and :key:`console` actions provide a means for running user
code in the same virtual machine as sbt. This page describes the
problems with doing so, how sbt handles these problems, what types of
code can use this feature, and what types of code must use a :doc:`forked jvm <Forking>`.
Skip to User Code if you just want to see when you should use a :doc:`forked jvm <Forking>`.
Problems
========
System.exit
-----------
User code can call `System.exit`, which normally shuts down the JVM.
Because the :key:`run` and :key:`console` actions run inside the same JVM as
sbt, this also ends the build and requires restarting sbt.
Threads
-------
User code can also start other threads. Threads can be left running
after the main method returns. In particular, creating a GUI creates
several threads, some of which may not terminate until the JVM
terminates. The program is not completed until either `System.exit` is
called or all non-daemon threads terminate.
Deserialization and class loading
---------------------------------
During deserialization, the wrong class loader might be used for various
complex reasons. This can happen in many scenarios, and running under
SBT is just one of them. This is discussed for instance in issues :issue:`163` and
:issue:`136`. The reason is
explained
`here <http://jira.codehaus.org/browse/GROOVY-1627?focusedCommentId=85900#comment-85900>`_.
sbt's Solutions
===============
System.exit
-----------
User code is run with a custom `SecurityManager` that throws a custom
`SecurityException` when `System.exit` is called. This exception is
caught by sbt. sbt then disposes of all top-level windows, interrupts
(not stops) all user-created threads, and handles the exit code. If the
exit code is nonzero, :key:`run` and :key:`console` complete unsuccessfully.
If the exit code is zero, they complete normally.
Threads
-------
sbt makes a list of all threads running before executing user code.
After the user code returns, sbt can then determine the threads created
by the user code. For each user-created thread, sbt replaces the
uncaught exception handler with a custom one that handles the custom
`SecurityException` thrown by calls to `System.exit` and delegates
to the original handler for everything else. sbt then waits for each
created thread to exit or for `System.exit` to be called. sbt handles
a call to `System.exit` as described above.
A user-created thread is one that is not in the `system` thread group
and is not an `AWT` implementation thread (e.g. `AWT-XAWT`,
`AWT-Windows`). User-created threads include the `AWT-EventQueue-*`
thread(s).
User Code
=========
Given the above, when can user code be run with the :key:`run` and
:key:`console` actions?
The user code cannot rely on shutdown hooks and at least one of the
following situations must apply for user code to run in the same JVM:
1. User code creates no threads.
2. User code creates a GUI and no other threads.
3. The program ends when user-created threads terminate on their own.
4. `System.exit` is used to end the program and user-created threads
terminate when interrupted.
5. No deserialization is done, or the deserialization code avoids
ensures that the right class loader is used, as in
https://github.com/NetLogo/NetLogo/blob/master/src/main/org/nlogo/util/ClassLoaderObjectInputStream.scala
or
https://github.com/scala/scala/blob/master/src/actors/scala/actors/remote/JavaSerializer.scala#L20.
The requirements on threading and shutdown hooks are required because
the JVM does not actually shut down. So, shutdown hooks cannot be run
and threads are not terminated unless they stop when interrupted. If
these requirements are not met, code must run in a :doc:`forked jvm <Forking>`.
The feature of allowing `System.exit` and multiple threads to be used
cannot completely emulate the situation of running in a separate JVM and
is intended for development. Program execution should be checked in a
:doc:`forked jvm <Forking>` when using multiple threads or `System.exit`.
As of sbt 0.13.1, multiple `run` instances can be managed. There can
only be one application that uses AWT at a time, however.

View File

@ -1,152 +0,0 @@
===============================
Scripts, REPL, and Dependencies
===============================
sbt has two alternative entry points that may be used to:
- Compile and execute a Scala script containing dependency declarations
or other sbt settings
- Start up the Scala REPL, defining the dependencies that should be on
the classpath
These entry points should be considered experimental. A notable
disadvantage of these approaches is the startup time involved.
Setup
=====
To set up these entry points, you can either use
`conscript <https://github.com/n8han/conscript>`_ or manually construct
the startup scripts. In addition, there is a `setup
script <https://github.com/paulp/xsbtscript>`_ for the script mode that
only requires a JRE installed.
Setup with Conscript
--------------------
Install `conscript <https://github.com/n8han/conscript>`_.
.. code-block:: console
cs sbt/sbt --branch 0.12.0
This will create two scripts: `screpl` and `scalas`.
Manual Setup
------------
Duplicate your standard `sbt` script, which was set up according to
:doc:`Setup </Getting-Started/Setup>`, as `scalas` and `screpl` (or
whatever names you like).
`scalas` is the script runner and should use `sbt.ScriptMain` as
the main class, by adding the `-Dsbt.main.class=sbt.ScriptMain`
parameter to the `java` command. Its command line should look like:
.. code-block:: console
java -Dsbt.main.class=sbt.ScriptMain -Dsbt.boot.directory=/home/user/.sbt/boot -jar sbt-launch.jar "$@"
For the REPL runner `screpl`, use `sbt.ConsoleMain` as the main
class:
.. code-block:: console
java -Dsbt.main.class=sbt.ConsoleMain -Dsbt.boot.directory=/home/user/.sbt/boot -jar sbt-launch.jar "$@"
In each case, `/home/user/.sbt/boot` should be replaced with wherever
you want sbt's boot directory to be; you might also need to give more
memory to the JVM via `-Xms512M -Xmx1536M` or similar options, just
like shown in :doc:`Setup </Getting-Started/Setup>`.
Usage
=====
sbt Script runner
-----------------
The script runner can run a standard Scala script, but with the
additional ability to configure sbt. sbt settings may be embedded in the
script in a comment block that opens with `/***`.
Example
~~~~~~~
Copy the following script and make it executable. You may need to adjust
the first line depending on your script name and operating system. When
run, the example should retrieve Scala, the required dependencies,
compile the script, and run it directly. For example, if you name it
`dispatch_example.scala`, you would do on Unix:
.. code-block:: console
chmod u+x dispatch_example.scala
./dispatch_example.scala
::
#!/usr/bin/env scalas
!#
/***
scalaVersion := "2.9.0-1"
libraryDependencies ++= Seq(
"net.databinder" %% "dispatch-twitter" % "0.8.3",
"net.databinder" %% "dispatch-http" % "0.8.3"
)
*/
import dispatch.{ json, Http, Request }
import dispatch.twitter.Search
import json.{ Js, JsObject }
def process(param: JsObject) = {
val Search.text(txt) = param
val Search.from_user(usr) = param
val Search.created_at(time) = param
"(" + time + ")" + usr + ": " + txt
}
Http.x((Search("#scala") lang "en") ~> (_ map process foreach println))
sbt REPL with dependencies
--------------------------
The arguments to the REPL mode configure the dependencies to use when
starting up the REPL. An argument may be either a jar to include on the
classpath, a dependency definition to retrieve and put on the classpath,
or a resolver to use when retrieving dependencies.
A dependency definition looks like:
::
organization%module%revision
Or, for a cross-built dependency:
::
organization%%module%revision
A repository argument looks like:
::
"id at url"
Example:
~~~~~~~~
To add the Sonatype snapshots repository and add Scalaz 7.0-SNAPSHOT to
REPL classpath:
.. code-block:: console
screpl "sonatype-releases at https://oss.sonatype.org/content/repositories/snapshots/" "org.scalaz%%scalaz-core%7.0-SNAPSHOT"
This syntax was a quick hack. Feel free to improve it. The relevant
class is
`IvyConsole <../../sxr/sbt/IvyConsole.scala.html>`_.

View File

@ -1,78 +0,0 @@
===========
Setup Notes
===========
Some notes on how to set up your `sbt` script.
Do not put `sbt-launch.jar` on your classpath.
------------------------------------------------
Do *not* put `sbt-launch.jar` in your `$SCALA_HOME/lib` directory,
your project's `lib` directory, or anywhere it will be put on a
classpath. It isn't a library.
Terminal encoding
-----------------
The character encoding used by your terminal may differ from Java's
default encoding for your platform. In this case, you will need to add
the option `-Dfile.encoding=<encoding>` in your `sbt` script to set
the encoding, which might look like:
.. code-block:: console
java -Dfile.encoding=UTF8
JVM heap, permgen, and stack sizes
----------------------------------
If you find yourself running out of permgen space or your workstation is
low on memory, adjust the JVM configuration as you would for any
application. For example a common set of memory-related options is:
.. code-block:: console
java -Xmx1536M -Xss1M -XX:+CMSClassUnloadingEnabled -XX:MaxPermSize=256m`
Boot directory
--------------
`sbt-launch.jar` is just a bootstrap; the actual meat of sbt, and the
Scala compiler and standard library, are downloaded to the shared
directory `$HOME/.sbt/boot/`.
To change the location of this directory, set the `sbt.boot.directory`
system property in your `sbt` script. A relative path will be resolved
against the current working directory, which can be useful if you want
to avoid sharing the boot directory between projects. For example, the
following uses the pre-0.11 style of putting the boot directory in
`project/boot/`:
.. code-block:: console
java -Dsbt.boot.directory=project/boot/
HTTP/HTTPS/FTP Proxy
--------------------
On Unix, sbt will pick up any HTTP, HTTPS, or FTP proxy settings from the standard
`http_proxy`, `https_proxy`, and `ftp_proxy` environment variables. If you are behind
a proxy requiring authentication, your `sbt` script must also pass flags to set the
`http.proxyUser` and `http.proxyPassword` properties for HTTP,
`ftp.proxyUser` and `ftp.proxyPassword` properties for FTP,
or `https.proxyUser` and `https.proxyPassword` properties for HTTPS.
For example,
.. code-block:: console
java -Dhttp.proxyUser=username -Dhttp.proxyPassword=mypassword
On Windows, your script should set properties for proxy host, port, and
if applicable, username and password. For example, for HTTP:
.. code-block:: console
java -Dhttp.proxyHost=myproxy -Dhttp.proxyPort=8080 -Dhttp.proxyUser=username -Dhttp.proxyPassword=mypassword
Replace `http` with `https` or `ftp` in the above command line to configure HTTPS or FTP.

View File

@ -1,129 +0,0 @@
==========================
Tasks/Settings: Motivation
==========================
This page motivates the task and settings system.
You should already know how to use tasks and settings, which are
described in the :doc:`getting started guide </Getting-Started/More-About-Settings>`
and on the :doc:`Tasks` page.
An important aspect of the task system is to combine two common, related steps in a build:
1. Ensure some other task is performed.
2. Use some result from that task.
Earlier versions of sbt configured these steps separately using
1. Dependency declarations
2. Some form of shared state
To see why it is advantageous to combine them, compare the situation to
that of deferring initialization of a variable in Scala. This Scala code
is a bad way to expose a value whose initialization is deferred:
::
// Define a variable that will be initialized at some point
// We don't want to do it right away, because it might be expensive
var foo: Foo = _
// Define a function to initialize the variable
def makeFoo(): Unit = ... initialize foo ...
Typical usage would be:
::
makeFoo()
doSomething( foo )
This example is rather exaggerated in its badness, but I claim it is
nearly the same situation as our two step task definitions. Particular
reasons this is bad include:
1. A client needs to know to call `makeFoo()` first.
2. `foo` could be changed by other code. There could be a
`def makeFoo2()`, for example.
3. Access to foo is not thread safe.
The first point is like declaring a task dependency, the second is like
two tasks modifying the same state (either project variables or files),
and the third is a consequence of unsynchronized, shared state.
In Scala, we have the built-in functionality to easily fix this: `lazy val`.
::
lazy val foo: Foo = ... initialize foo ...
with the example usage:
::
doSomething( foo )
Here, `lazy val` gives us thread safety, guaranteed initialization
before access, and immutability all in one, DRY construct. The task
system in sbt does the same thing for tasks (and more, but we won't go
into that here) that `lazy val` did for our bad example.
A task definition must declare its inputs and the type of its output.
sbt will ensure that the input tasks have run and will then provide
their results to the function that implements the task, which will
generate its own result. Other tasks can use this result and be assured
that the task has run (once) and be thread-safe and typesafe in the
process.
The general form of a task definition looks like:
::
myTask := {
val a: A = aTask.value
val b: B = bTask.value
... do something with a, b and generate a result ...
}
(This is only intended to be a discussion of the ideas behind tasks, so
see the :doc:`sbt Tasks </Detailed-Topics/Tasks>` page
for details on usage.) Here, `aTask` is assumed to produce a
result of type `A` and `bTask` is assumed to produce a result of
type `B`.
Application
-----------
As an example, consider generating a zip file containing the binary jar,
source jar, and documentation jar for your project. First, determine
what tasks produce the jars. In this case, the input tasks are
:key:`packageBin`, :key:`packageSrc`, and :key:`packageDoc` in the main
`Compile` scope. The result of each of these tasks is the File for the
jar that they generated. Our zip file task is defined by mapping these
package tasks and including their outputs in a zip file. As good
practice, we then return the File for this zip so that other tasks can
map on the zip task.
::
zip := {
val bin: File = (packageBin in Compile).value
val src: File = (packageSrc in Compile).value
val doc: File = (packageDoc in Compile).value
val out: File = zipPath.value
val inputs: Seq[(File,String)] = Seq(bin, src, doc) x Path.flat
IO.zip(inputs, out)
out
}
The `val inputs` line defines how the input files are mapped to paths
in the zip. See :doc:`/Detailed-Topics/Mapping-Files` for details.
The explicit types are not required, but are included for clarity.
The `zipPath` input would be a custom task to define the location of
the zip file. For example:
::
zipPath :=
target.value / "out.zip"

View File

@ -1,16 +0,0 @@
============================
Tasks, Commands, and Plugins
============================
This part of the documentation has pages documenting particular sbt topics in detail.
Before reading anything in here, you will need the information in the
:doc:`Getting Started Guide </Getting-Started/Welcome>` as a foundation.
.. toctree::
:maxdepth: 2
Tasks
/Extending/Input-Tasks
/Extending/Commands
Parsing-Input
/Extending/Build-State

View File

@ -1,659 +0,0 @@
=====
Tasks
=====
Tasks and settings are introduced in the :doc:`getting started guide </Getting-Started/Basic-Def>`,
which you may wish to read first.
This page has additional details and background and is intended more as a reference.
Introduction
============
Both settings and tasks produce values, but there are two major
differences between them:
1. Settings are evaluated at project load time. Tasks are executed on
demand, often in response to a command from the user.
2. At the beginning of project loading, settings and their dependencies
are fixed. Tasks can introduce new tasks during execution, however.
Features
========
There are several features of the task system:
1. By integrating with the settings system, tasks can be added, removed,
and modified as easily and flexibly as settings.
2. :doc:`Input Tasks </Extending/Input-Tasks>` use :doc:`parser combinators <Parsing-Input>` to define the syntax for their arguments.
This allows flexible syntax and tab-completions in the same way as :doc:`/Extending/Commands`.
3. Tasks produce values. Other tasks can access a task's value by calling `value` on it within a task definition.
4. Dynamically changing the structure of the task graph is possible.
Tasks can be injected into the execution graph based on the result of another task.
5. There are ways to handle task failure, similar to `try/catch/finally`.
6. Each task has access to its own Logger that by default persists the
logging for that task at a more verbose level than is initially
printed to the screen.
These features are discussed in detail in the following sections.
Defining a Task
===============
Hello World example (sbt)
-------------------------
build.sbt
::
lazy val hello = taskKey[Unit]("Prints 'Hello World'")
hello := println("hello world!")
Run "sbt hello" from command line to invoke the task. Run "sbt tasks" to
see this task listed.
Define the key
--------------
To declare a new task, define a lazy val of type `TaskKey`:
::
lazy val sampleTask = taskKey[Int]("A sample task.")
The name of the `val` is used when referring to the task in Scala code and at the command line.
The string passed to the `taskKey` method is a description of the task.
The type parameter passed to `taskKey` (here, `Int`) is the type of value produced by the task.
We'll define a couple of other keys for the examples:
::
lazy val intTask = taskKey[Int]("An int task")
lazy val stringTask = taskKey[String]("A string task")
The examples themselves are valid entries in a `build.sbt` or can be
provided as part of a sequence to `Project.settings` (see
:doc:`Full Configuration </Getting-Started/Full-Def>`).
Implement the task
------------------
There are three main parts to implementing a task once its key is
defined:
1. Determine the settings and other tasks needed by the task. They are
the task's inputs.
2. Define the code that implements the task in terms of these inputs.
3. Determine the scope the task will go in.
These parts are then combined just like the parts of a setting are combined.
Defining a basic task
~~~~~~~~~~~~~~~~~~~~~
A task is defined using `:=`
::
intTask := 1 + 2
stringTask := System.getProperty("user.name")
sampleTask := {
val sum = 1 + 2
println("sum: " + sum)
sum
}
As mentioned in the introduction, a task is evaluated on demand.
Each time `sampleTask` is invoked, for example, it will print the sum.
If the username changes between runs, `stringTask` will take different values in those separate runs.
(Within a run, each task is evaluated at most once.)
In contrast, settings are evaluated once on project load and are fixed until the next reload.
Tasks with inputs
~~~~~~~~~~~~~~~~~
Tasks with other tasks or settings as inputs are also defined using `:=`.
The values of the inputs are referenced by the `value` method. This method
is special syntax and can only be called when defining a task, such as in the
argument to `:=`. The following defines a task that adds one to the value
produced by `intTask` and returns the result.
::
sampleTask := intTask.value + 1
Multiple settings are handled similarly:
::
stringTask := "Sample: " + sampleTask.value + ", int: " + intTask.value
Task Scope
~~~~~~~~~~
As with settings, tasks can be defined in a specific scope. For example,
there are separate :key:`compile` tasks for the `compile` and `test`
scopes. The scope of a task is defined the same as for a setting. In the
following example, `test:sampleTask` uses the result of
`compile:intTask`.
::
sampleTask in Test := (intTask in Compile).value * 3
On precedence
~~~~~~~~~~~~~
As a reminder, infix method precedence is by the name of the method and postfix methods have lower precedence than infix methods.
1. Assignment methods have the lowest precedence. These are methods with
names ending in `=`, except for `!=`, `<=`, `>=`, and names
that start with `=`.
2. Methods starting with a letter have the next highest precedence.
3. Methods with names that start with a symbol and aren't included in 1.
have the highest precedence. (This category is divided further
according to the specific character it starts with. See the Scala
specification for details.)
Therefore, the the previous example is equivalent to the following:
::
(sampleTask in Test).:=( (intTask in Compile).value * 3 )
Additionally, the braces in the following are necessary:
::
helloTask := { "echo Hello" ! }
Without them, Scala interprets the line as `( helloTask.:=("echo Hello") ).!`
instead of the desired `helloTask.:=( "echo Hello".! )`.
Separating implementations
--------------------------
The implementation of a task can be separated from the binding.
For example, a basic separate definition looks like:
::
// Define a new, standalone task implemention
lazy val intTaskImpl: Initialize[Task[Int]] =
Def.task { sampleTask.value - 3 }
// Bind the implementation to a specific key
intTask := intTaskImpl.value
Note that whenever `.value` is used, it must be within a task definition, such as
within `Def.task` above or as an argument to `:=`.
Modifying an Existing Task
--------------------------
In the general case, modify a task by declaring the previous task as an
input.
::
// initial definition
intTask := 3
// overriding definition that references the previous definition
intTask := intTask.value + 1
Completely override a task by not declaring the previous task as an
input. Each of the definitions in the following example completely
overrides the previous one. That is, when `intTask` is run, it will
only print `#3`.
::
intTask := {
println("#1")
3
}
intTask := {
println("#2")
5
}
intTask := {
println("#3")
sampleTask.value - 3
}
.. _multiple-scopes:
Getting values from multiple scopes
===================================
Introduction
------------
The general form of an expression that gets values from multiple scopes is:
::
<setting-or-task>.all(<scope-filter>).value
The `all` method is implicitly added to tasks and settings.
It accepts a `ScopeFilter` that will select the `Scopes`.
The result has type `Seq[T]`, where `T` is the key's underlying type.
Example
-------
A common scenario is getting the sources for all subprojects for processing all at once, such as passing them to scaladoc.
The task that we want to obtain values for is :key:`sources` and we want to get the values in all non-root projects and in the `Compile` configuration.
This looks like:
::
lazy val core = project
lazy val util = project
lazy val root = project.settings(
sources := {
val filter = ScopeFilter( inProjects(core, util), inConfigurations(Compile) )
// each sources definition is of type Seq[File],
// giving us a Seq[Seq[File]] that we then flatten to Seq[File]
val allSources: Seq[Seq[File]] = sources.all(filter).value
allSources.flatten
}
)
The next section describes various ways to construct a ScopeFilter.
ScopeFilter
-----------
A basic `ScopeFilter` is constructed by the `ScopeFilter.apply` method.
This method makes a `ScopeFilter` from filters on the parts of a `Scope`: a `ProjectFilter`, `ConfigurationFilter`, and `TaskFilter`.
The simplest case is explicitly specifying the values for the parts:
::
val filter: ScopeFilter =
ScopeFilter(
inProjects( core, util ),
inConfigurations( Compile, Test )
)
Unspecified filters
~~~~~~~~~~~~~~~~~~~
If the task filter is not specified, as in the example above, the default is to select scopes without a specific task (global).
Similarly, an unspecified configuration filter will select scopes in the global configuration.
The project filter should usually be explicit, but if left unspecified, the current project context will be used.
More on filter construction
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The example showed the basic methods `inProjects` and `inConfigurations`.
This section describes all methods for constructing a `ProjectFilter`, `ConfigurationFilter`, or `TaskFilter`.
These methods can be organized into four groups:
* Explicit member list (`inProjects`, `inConfigurations`, `inTasks`)
* Global value (`inGlobalProject`, `inGlobalConfiguration`, `inGlobalTask`)
* Default filter (`inAnyProject`, `inAnyConfiguration`, `inAnyTask`)
* Project relationships (`inAggregates`, `inDependencies`)
See the `API documentation <../../api/sbt/ScopeFilter$$Make.html>`_ for details.
Combining ScopeFilters
~~~~~~~~~~~~~~~~~~~~~~
`ScopeFilters` may be combined with the `&&`, `||`, `--`, and `-` methods:
a && b
Selects scopes that match both `a` and `b`
a || b
Selects scopes that match either `a` or `b`
a -- b
Selects scopes that match `a` but not `b`
\-b
Selects scopes that do not match `b`
For example, the following selects the scope for the `Compile` and `Test` configurations of the `core` project
and the global configuration of the `util` project:
::
val filter: ScopeFilter =
ScopeFilter( inProjects(core), inConfigurations(Compile, Test)) ||
ScopeFilter( inProjects(util), inGlobalConfiguration )
More operations
---------------
The `all` method applies to both settings (values of type `Initialize[T]`)
and tasks (values of type `Initialize[Task[T]]`).
It returns a setting or task that provides a `Seq[T]`, as shown in this table:
==================== =========================
Target Result
==================== =========================
Initialize[T] Initialize[Seq[T]]
Initialize[Task[T]] Initialize[Task[Seq[T]]]
==================== =========================
This means that the `all` method can be combined with methods that construct tasks and settings.
Missing values
~~~~~~~~~~~~~~
Some scopes might not define a setting or task.
The `?` and `??` methods can help in this case.
They are both defined on settings and tasks and indicate what to do when a key is undefined.
`?`
On a setting or task with underlying type `T`, this accepts no arguments and returns a setting or task (respectively) of type `Option[T]`.
The result is `None` if the setting/task is undefined and `Some[T]` with the value if it is.
`??`
On a setting or task with underlying type `T`, this accepts an argument of type `T` and uses this argument if the setting/task is undefined.
The following contrived example sets the maximum errors to be the maximum of all aggregates of the current project.
::
maxErrors := {
// select the transitive aggregates for this project, but not the project itself
val filter: ScopeFilter =
ScopeFilter( inAggregates(ThisProject, includeRoot=false) )
// get the configured maximum errors in each selected scope,
// using 0 if not defined in a scope
val allVersions: Seq[Int] =
(maxErrors ?? 0).all(filter).value
allVersions.max
}
Multiple values from multiple scopes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The target of `all` is any task or setting, including anonymous ones.
This means it is possible to get multiple values at once without defining a new task or setting in each scope.
A common use case is to pair each value obtained with the project, configuration, or full scope it came from.
:key:`resolvedScoped`
Provides the full enclosing `ScopedKey` (which is a `Scope` + `AttributeKey[_]`)
:key:`thisProject`
Provides the `Project` associated with this scope (undefined at the global and build levels)
:key:`thisProjectRef`
Provides the `ProjectRef` for the context (undefined at the global and build levels)
:key:`configuration`
Provides the `Configuration` for the context (undefined for the global configuration)
For example, the following defines a task that prints non-Compile configurations that define
sbt plugins. This might be used to identify an incorrectly configured build (or not, since this is
a fairly contrived example):
::
// Select all configurations in the current project except for Compile
lazy val filter: ScopeFilter = ScopeFilter(
inProjects(ThisProject),
inAnyConfiguration -- inConfigurations(Compile)
)
// Define a task that provides the name of the current configuration
// and the set of sbt plugins defined in the configuration
lazy val pluginsWithConfig: Initialize[Task[ (String, Set[String]) ]] =
Def.task {
( configuration.value.name, definedSbtPlugins.value )
}
checkPluginsTask := {
val oddPlugins: Seq[(String, Set[String])] =
pluginsWithConfig.all(filter).value
// Print each configuration that defines sbt plugins
for( (config, plugins) <- oddPlugins if plugins.nonEmpty )
println(s"$config defines sbt plugins: ${plugins.mkString(", ")}")
}
Advanced Task Operations
========================
The examples in this section use the task keys defined in the previous section.
Streams: Per-task logging
-------------------------
Per-task loggers are part of a more general system for task-specific data called Streams.
This allows controlling the verbosity of stack traces and logging individually for tasks as well
as recalling the last logging for a task.
Tasks also have access to their own persisted binary or text data.
To use Streams, get the value of the :key:`streams` task. This is a
special task that provides an instance of
`TaskStreams <../../api/sbt/std/TaskStreams.html>`_
for the defining task. This type provides access to named binary and
text streams, named loggers, and a default logger. The default
`Logger <../../api/sbt/Logger.html>`_,
which is the most commonly used aspect, is obtained by the `log`
method:
::
myTask := {
val s: TaskStreams = streams.value
s.log.debug("Saying hi...")
s.log.info("Hello!")
}
You can scope logging settings by the specific task's scope:
::
logLevel in myTask := Level.Debug
traceLevel in myTask := 5
To obtain the last logging output from a task, use the `last` command:
.. code-block:: console
$ last myTask
[debug] Saying hi...
[info] Hello!
The verbosity with which logging is persisted is controlled using the
:key:`persistLogLevel` and :key:`persistTraceLevel` settings. The `last`
command displays what was logged according to these levels. The levels
do not affect already logged information.
Dynamic Computations with `Def.taskDyn`
---------------------------------------
It can be useful to use the result of a task to determine the
next tasks to evaluate. This is done using `Def.taskDyn`. The
result of `taskDyn` is called a dynamic task because it introduces
dependencies at runtime. The `taskDyn` method supports the same
syntax as `Def.task` and `:=` except that you return a task instead
of a plain value.
For example, ::
val dynamic = Def.taskDyn {
// decide what to evaluate based on the value of `stringTask`
if(stringTask.value == "dev")
// create the dev-mode task: this is only evaluated if the
// value of stringTask is "dev"
Def.task {
3
}
else
// create the production task: only evaluated if the value
// of the stringTask is not "dev"
Def.task {
intTask.value + 5
}
}
myTask := {
val num = dynamic.value
println(s"Number selected was $num")
}
The only static dependency of `myTask` is `stringTask`.
The dependency on `intTask` is only introduced in non-dev mode.
.. note::
A dynamic task cannot refer to itself or a circular dependency will result.
In the example above, there would be a circular dependency if the code passed to `taskDyn` referenced `myTask`.
Handling Failure
----------------
This section discusses the `failure`, `result`, and `andFinally`
methods, which are used to handle failure of other tasks.
`failure`
~~~~~~~~~~~
The `failure` method creates a new task that returns the `Incomplete` value
when the original task fails to complete normally. If the original task succeeds,
the new task fails.
`Incomplete <../../api/sbt/Incomplete.html>`_
is an exception with information about any tasks that caused the failure
and any underlying exceptions thrown during task execution.
For example:
::
intTask := error("Failed.")
intTask := {
println("Ignoring failure: " + intTask.failure.value)
3
}
This overrides the `intTask` so that the original exception is printed and the constant `3` is returned.
`failure` does not prevent other tasks that depend on the target
from failing. Consider the following example:
::
intTask := if(shouldSucceed) 5 else error("Failed.")
// Return 3 if intTask fails. If intTask succeeds, this task will fail.
aTask := intTask.failure.value - 2
// A new task that increments the result of intTask.
bTask := intTask.value + 1
cTask := aTask.value + bTask.value
The following table lists the results of each task depending on the initially invoked task:
============== =============== ============= ============== ============== ==============
invoked task intTask result aTask result bTask result cTask result overall result
============== =============== ============= ============== ============== ==============
intTask failure not run not run not run failure
aTask failure success not run not run success
bTask failure not run failure not run failure
cTask failure success failure failure failure
intTask success not run not run not run success
aTask success failure not run not run failure
bTask success not run success not run success
cTask success failure success failure failure
============== =============== ============= ============== ============== ==============
The overall result is always the same as the root task (the directly
invoked task). A `failure` turns a success into a failure, and a failure into an `Incomplete`.
A normal task definition fails when any of its inputs fail and computes its value otherwise.
`result`
~~~~~~~~~~
The `result` method creates a new task that returns the full `Result[T]` value for the original task.
`Result <../../api/sbt/Result.html>`_
has the same structure as `Either[Incomplete, T]` for a task result of
type `T`. That is, it has two subtypes:
- `Inc`, which wraps `Incomplete` in case of failure
- `Value`, which wraps a task's result in case of success.
Thus, the task created by `result` executes whether or not the original task succeeds or fails.
For example:
::
intTask := error("Failed.")
intTask := intTask.result.value match {
case Inc(inc: Incomplete) =>
println("Ignoring failure: " + inc)
3
case Value(v) =>
println("Using successful result: " + v)
v
}
This overrides the original `intTask` definition so that if the original task fails, the exception is printed and the constant `3` is returned. If it succeeds, the value is printed and returned.
andFinally
~~~~~~~~~~
The `andFinally` method defines a new task that runs the original task
and evaluates a side effect regardless of whether the original task
succeeded. The result of the task is the result of the original task.
For example:
::
intTask := error("I didn't succeed.")
lazy val intTaskImpl = intTask andFinally { println("andFinally") }
intTask := intTaskImpl.value
This modifies the original `intTask` to always print "andFinally" even
if the task fails.
Note that `andFinally` constructs a new task. This means that the new
task has to be invoked in order for the extra block to run. This is
important when calling andFinally on another task instead of overriding
a task like in the previous example. For example, consider this code:
::
intTask := error("I didn't succeed.")
lazy val intTaskImpl = intTask andFinally { println("andFinally") }
otherIntTask := intTaskImpl.value
If `intTask` is run directly, `otherIntTask` is never involved in
execution. This case is similar to the following plain Scala code:
::
def intTask(): Int =
error("I didn't succeed.")
def otherIntTask(): Int =
try { intTask() }
finally { println("finally") }
intTask()
It is obvious here that calling intTask() will never result in "finally"
being printed.

View File

@ -1,503 +0,0 @@
=======
Testing
=======
Basics
======
The standard source locations for testing are:
- Scala sources in `src/test/scala/`
- Java sources in `src/test/java/`
- Resources for the test classpath in `src/test/resources/`
The resources may be accessed from tests by using the `getResource`
methods of `java.lang.Class` or `java.lang.ClassLoader`.
The main Scala testing frameworks
(`specs2 <http://specs2.org/>`_,
`ScalaCheck <http://scalacheck.org/>`_, and
`ScalaTest <http://scalatest.org/>`_) provide an
implementation of the common test interface and only need to be added to
the classpath to work with sbt. For example, ScalaCheck may be used by
declaring it as a :doc:`managed dependency <Library-Management>`:
::
libraryDependencies += "org.scalacheck" %% "scalacheck" % "1.10.1" % "test"
The fourth component `"test"` is the :ref:`configuration <gsg-ivy-configurations>`
and means that ScalaCheck will only be on the test classpath and it
isn't needed by the main sources. This is generally good practice for
libraries because your users don't typically need your test dependencies
to use your library.
With the library dependency defined, you can then add test sources in
the locations listed above and compile and run tests. The tasks for
running tests are :key:`test` and :key:`testOnly`. The :key:`test` task
accepts no command line arguments and runs all tests:
.. code-block:: console
> test
testOnly
---------
The :key:`testOnly` task accepts a whitespace separated list of test names
to run. For example:
.. code-block:: console
> testOnly org.example.MyTest1 org.example.MyTest2
It supports wildcards as well:
.. code-block:: console
> testOnly org.example.*Slow org.example.MyTest1
testQuick
----------
The :key:`testQuick` task, like :key:`testOnly`, allows to filter the tests
to run to specific tests or wildcards using the same syntax to indicate
the filters. In addition to the explicit filter, only the tests that
satisfy one of the following conditions are run:
- The tests that failed in the previous run
- The tests that were not run before
- The tests that have one or more transitive dependencies, maybe in a
different project, recompiled.
Tab completion
~~~~~~~~~~~~~~
Tab completion is provided for test names based on the results of the
last `test:compile`. This means that a new sources aren't available
for tab completion until they are compiled and deleted sources won't be
removed from tab completion until a recompile. A new test source can
still be manually written out and run using :key:`testOnly`.
Other tasks
-----------
Tasks that are available for main sources are generally available for
test sources, but are prefixed with `test:` on the command line and
are referenced in Scala code with `in Test`. These tasks include:
- `test:compile`
- `test:console`
- `test:consoleQuick`
- `test:run`
- `test:runMain`
See :doc:`Running </Getting-Started/Running>` for details on these tasks.
Output
======
By default, logging is buffered for each test source file until all
tests for that file complete. This can be disabled by setting :key:`logBuffered`:
::
logBuffered in Test := false
Test Reports
------------
By default, sbt will generate JUnit XML test reports for all tests in the build, located
in the ``target/test-reports`` directory for a project. This can be disabled by
disabling the ``JUnitXmlReportPlugin``
::
val myProject = project in file(".") disablePlugins (plugins.JUnitXmlReportPlugin)
Options
=======
Test Framework Arguments
------------------------
Arguments to the test framework may be provided on the command line to
the :key:`testOnly` tasks following a `--` separator. For example:
.. code-block:: console
> testOnly org.example.MyTest -- -d -S
To specify test framework arguments as part of the build, add options
constructed by `Tests.Argument`:
::
testOptions in Test += Tests.Argument("-d", "-g")
To specify them for a specific test framework only:
::
testOptions in Test += Tests.Argument(TestFrameworks.ScalaCheck, "-d", "-g")
Setup and Cleanup
-----------------
Specify setup and cleanup actions using `Tests.Setup` and
`Tests.Cleanup`. These accept either a function of type `() => Unit`
or a function of type `ClassLoader => Unit`. The variant that accepts
a ClassLoader is passed the class loader that is (or was) used for
running the tests. It provides access to the test classes as well as the
test framework classes.
.. note::
When forking, the ClassLoader containing the test classes cannot be provided because it is in another JVM. Only use the `() => Unit` variants in this case.
Examples:
::
testOptions in Test += Tests.Setup( () => println("Setup") )
testOptions in Test += Tests.Cleanup( () => println("Cleanup") )
testOptions in Test += Tests.Setup( loader => ... )
testOptions in Test += Tests.Cleanup( loader => ... )
Disable Parallel Execution of Tests
-----------------------------------
By default, sbt runs all tasks in parallel. Because each test is mapped
to a task, tests are also run in parallel by default. To make tests
within a given project execute serially: ::
parallelExecution in Test := false
`Test` can be replaced with `IntegrationTest` to only execute integration tests serially.
Note that tests from different projects may still execute concurrently.
Filter classes
--------------
If you want to only run test classes whose name ends with "Test", use
`Tests.Filter`:
::
testOptions in Test := Seq(Tests.Filter(s => s.endsWith("Test")))
Forking tests
-------------
The setting:
::
fork in Test := true
specifies that all tests will be executed in a single external JVM. See
:doc:`Forking` for configuring standard options for forking. More control
over how tests are assigned to JVMs and what options to pass to those is
available with :key:`testGrouping` key. For example in build.sbt:
::
import Tests._
{
def groupByFirst(tests: Seq[TestDefinition]) =
tests groupBy (_.name(0)) map {
case (letter, tests) => new Group(letter.toString, tests, SubProcess(Seq("-Dfirst.letter"+letter)))
} toSeq
testGrouping in Test <<= groupByFirst( (definedTests in Test).value )
}
The tests in a single group are run sequentially. Control the number
of forked JVMs allowed to run at the same time by setting the
limit on `Tags.ForkedTestGroup` tag, which is 1 by default.
`Setup` and `Cleanup` actions cannot be provided with the actual
test class loader when a group is forked.
Additional test configurations
==============================
You can add an additional test configuration to have a separate set of
test sources and associated compilation, packaging, and testing tasks
and settings. The steps are:
- Define the configuration
- Add the tasks and settings
- Declare library dependencies
- Create sources
- Run tasks
The following two examples demonstrate this. The first example shows how
to enable integration tests. The second shows how to define a customized
test configuration. This allows you to define multiple types of tests
per project.
Integration Tests
-----------------
The following full build configuration demonstrates integration tests.
::
import sbt._
import Keys._
object B extends Build
{
lazy val root =
Project("root", file("."))
.configs( IntegrationTest )
.settings( Defaults.itSettings : _*)
.settings( libraryDependencies += specs )
lazy val specs = "org.specs2" %% "specs2" % "2.0" % "it,test"
}
- `configs(IntegrationTest)` adds the predefined integration test
configuration. This configuration is referred to by the name `it`.
- `settings( Defaults.itSettings : _* )` adds compilation, packaging,
and testing actions and settings in the `IntegrationTest`
configuration.
- `settings( libraryDependencies += specs )` adds specs to both the
standard `test` configuration and the integration test
configuration `it`. To define a dependency only for integration
tests, use `"it"` as the configuration instead of `"it,test"`.
The standard source hierarchy is used:
- `src/it/scala` for Scala sources
- `src/it/java` for Java sources
- `src/it/resources` for resources that should go on the integration
test classpath
The standard testing tasks are available, but must be prefixed with
`it:`. For example,
.. code-block:: console
> it:testOnly org.example.AnIntegrationTest
Similarly the standard settings may be configured for the
`IntegrationTest` configuration. If not specified directly, most
`IntegrationTest` settings delegate to `Test` settings by default.
For example, if test options are specified as:
::
testOptions in Test += ...
then these will be picked up by the `Test` configuration and in turn
by the `IntegrationTest` configuration. Options can be added
specifically for integration tests by putting them in the
`IntegrationTest` configuration:
::
testOptions in IntegrationTest += ...
Or, use `:=` to overwrite any existing options, declaring these to be
the definitive integration test options:
::
testOptions in IntegrationTest := Seq(...)
Custom test configuration
-------------------------
The previous example may be generalized to a custom test configuration.
::
import sbt._
import Keys._
object B extends Build
{
lazy val root =
Project("root", file("."))
.configs( FunTest )
.settings( inConfig(FunTest)(Defaults.testSettings) : _*)
.settings( libraryDependencies += specs )
lazy val FunTest = config("fun") extend(Test)
lazy val specs = "org.specs2" %% "specs2" % "2.0" % "fun"
}
Instead of using the built-in configuration, we defined a new one:
::
lazy val FunTest = config("fun") extend(Test)
The `extend(Test)` part means to delegate to `Test` for undefined
`CustomTest` settings. The line that adds the tasks and settings for
the new test configuration is:
::
settings( inConfig(FunTest)(Defaults.testSettings) : _*)
This says to add test and settings tasks in the `FunTest`
configuration. We could have done it this way for integration tests as
well. In fact, `Defaults.itSettings` is a convenience definition:
`val itSettings = inConfig(IntegrationTest)(Defaults.testSettings)`.
The comments in the integration test section hold, except with
`IntegrationTest` replaced with `FunTest` and `"it"` replaced with
`"fun"`. For example, test options can be configured specifically for
`FunTest`:
::
testOptions in FunTest += ...
Test tasks are run by prefixing them with `fun:`
.. code-block:: console
> fun:test
Additional test configurations with shared sources
--------------------------------------------------
An alternative to adding separate sets of test sources (and
compilations) is to share sources. In this approach, the sources are
compiled together using the same classpath and are packaged together.
However, different tests are run depending on the configuration.
::
import sbt._
import Keys._
object B extends Build {
lazy val root =
Project("root", file("."))
.configs( FunTest )
.settings( inConfig(FunTest)(Defaults.testTasks) : _*)
.settings(
libraryDependencies += specs,
testOptions in Test := Seq(Tests.Filter(unitFilter)),
testOptions in FunTest := Seq(Tests.Filter(itFilter))
)
def itFilter(name: String): Boolean = name endsWith "ITest"
def unitFilter(name: String): Boolean = (name endsWith "Test") && !itFilter(name)
lazy val FunTest = config("fun") extend(Test)
lazy val specs = "org.specs2" %% "specs2" % "2.0" % "test"
}
The key differences are:
- We are now only adding the test tasks
(`inConfig(FunTest)(Defaults.testTasks)`) and not compilation and
packaging tasks and settings.
- We filter the tests to be run for each configuration.
To run standard unit tests, run `test` (or equivalently,
`test:test`):
.. code-block:: console
> test
To run tests for the added configuration (here, `"fun"`), prefix it
with the configuration name as before:
.. code-block:: console
> fun:test
> fun:testOnly org.example.AFunTest
Application to parallel execution
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
One use for this shared-source approach is to separate tests that can
run in parallel from those that must execute serially. Apply the
procedure described in this section for an additional configuration.
Let's call the configuration `serial`:
::
lazy val Serial = config("serial") extend(Test)
Then, we can disable parallel execution in just that configuration
using:
::
parallelExecution in Serial := false
The tests to run in parallel would be run with :key:`test` and the ones to
run in serial would be run with `serial:test`.
JUnit
=====
Support for JUnit is provided by
`junit-interface <https://github.com/szeiger/junit-interface>`_. To add
JUnit support into your project, add the junit-interface dependency in
your project's main build.sbt file.
::
libraryDependencies += "com.novocode" % "junit-interface" % "0.9" % "test"
Extensions
==========
This page describes adding support for additional testing libraries and
defining additional test reporters. You do this by implementing `sbt`
interfaces (described below). If you are the author of the testing
framework, you can depend on the test interface as a provided
dependency. Alternatively, anyone can provide support for a test
framework by implementing the interfaces in a separate project and
packaging the project as an sbt :doc:`Plugin </Extending/Plugins>`.
Custom Test Framework
---------------------
The main Scala testing libraries have built-in support for sbt.
To add support for a different framework, implement the
`uniform test interface <http://github.com/sbt/test-interface>`_.
Custom Test Reporters
---------------------
Test frameworks report status and results to test reporters. You can
create a new test reporter by implementing either
`TestReportListener <../../api/sbt/TestReportListener.html>`_
or
`TestsListener <../../api/sbt/TestsListener.html>`_.
Using Extensions
----------------
To use your extensions in a project definition:
Modify the :key:`testFrameworks` setting to reference your test framework:
::
testFrameworks += new TestFramework("custom.framework.ClassName")
Specify the test reporters you want to use by overriding the
:key:`testListeners` setting in your project definition.
::
testListeners += customTestListener
where `customTestListener` is of type `sbt.TestReportListener`.

View File

@ -1,59 +0,0 @@
===================
Triggered Execution
===================
You can make a command run when certain files change by prefixing the
command with `~`. Monitoring is terminated when `enter` is pressed.
This triggered execution is configured by the :key:`watch` setting, but
typically the basic settings :key:`watchSources` and :key:`pollInterval` are
modified.
- :key:`watchSources` defines the files for a single project that are
monitored for changes. By default, a project watches resources and
Scala and Java sources.
- :key:`watchTransitiveSources` then combines the :key:`watchSources` for
the current project and all execution and classpath dependencies (see
:doc:`Full Configuration </Getting-Started/Full-Def>` for details on interProject dependencies).
- :key:`pollInterval` selects the interval between polling for changes in
milliseconds. The default value is `500 ms`.
Some example usages are described below.
Compile
=======
The original use-case was continuous compilation:
.. code-block:: console
> ~ test:compile
> ~ compile
Testing
=======
You can use the triggered execution feature to run any command or task.
One use is for test driven development, as suggested by Erick on the
mailing list.
The following will poll for changes to your source code (main or test)
and run :key:`testOnly` for the specified test.
.. code-block:: console
> ~ testOnly example.TestA
Running Multiple Commands
=========================
Occasionally, you may need to trigger the execution of multiple
commands. You can use semicolons to separate the commands to be
triggered.
The following will poll for source changes and run :key:`clean` and
:key:`test`.
.. code-block:: console
> ~ ;clean ;test

View File

@ -1,405 +0,0 @@
=======================================
Understanding Incremental Recompilation
=======================================
Compiling Scala code is slow, and sbt makes it often faster. By
understanding how, you can even understand how to make compilation even
faster. Modifying source files with many dependencies might require
recompiling only those source files—which might take, say, 5
seconds—instead of all the dependencies—which might take, say, 2
minutes. Often you can control which will be your case and make
development much faster by some simple coding practices.
In fact, improving Scala compilation times is one major goal of sbt, and
conversely the speedups it gives are one of the major motivations to use
it. A significant portion of sbt sources and development efforts deals
with strategies for speeding up compilation.
To reduce compile times, sbt uses two strategies:
1. reduce the overhead for restarting Scalac;
2. implement smart and transparent strategies for incremental
recompilation, so that only modified files and the needed
dependencies are recompiled.
3. sbt runs Scalac always in the same virtual machine. If one compiles
source code using sbt, keeps sbt alive, modifies source code and
triggers a new compilation, this compilation will be faster because
(part of) Scalac will have already been JIT-compiled. In the future,
sbt will reintroduce support for reusing the same compiler instance,
similarly to fsc.
4. When a source file `A.scala` is modified, sbt goes to great effort
to recompile other source files depending on `A.scala` only if
required - that is, only if the interface of `A.scala` was
modified. With other build management tools (especially for Java,
like ant), when a developer changes a source file in a
non-binary-compatible way, he needs to manually ensure that
dependencies are also recompiled - often by manually running the
`clean` command to remove existing compilation output; otherwise
compilation might succeed even when dependent class files might need
to be recompiled. What is worse, the change to one source might make
dependencies incorrect, but this is not discovered automatically: One
might get a compilation success with incorrect source code. Since
Scala compile times are so high, running `clean` is particularly
undesirable.
By organizing your source code appropriately, you can minimize the
amount of code affected by a change. sbt cannot determine precisely
which dependencies have to be recompiled; the goal is to compute a
conservative approximation, so that whenever a file must be recompiled,
it will, even though we might recompile extra files.
sbt heuristics
--------------
sbt tracks source dependencies at the granularity of source files. For
each source file, sbt tracks files which depend on it directly; if the
**interface** of classes, objects or traits in a file changes, all files
dependent on that source must be recompiled. At the moment sbt uses the
following algorithm to calculate source files dependent on a given source
file:
- dependencies introduced through inheritance are included *transitively*;
a dependency is introduced through inheritance if a class/trait in one
file inherits from a trait/class in another file
- all other direct dependencies are included; other dependencies are also
called "meber reference" dependencies because they are introduced by
referring to a member (class, method, type, etc.) defined in some other
source file
Here's an example illustrating the definition above::
//A.scala
class A {
def foo: Int = 123
}
//B.scala
class B extends A
//C.scala
class C extends B
//D.scala
class D(a: A)
//E.scala
class E(d: D)
There are the following dependencies through inheritance:
.. code-block:: none
B.scala -> A.scala
C.scala -> B.scala
There are also the following member reference dependencies:
.. code-block:: none
D.scala -> A.scala
E.scala -> D.scala
Now if the interface of `A.scala` is changed the following files
will get invalidated: `B.scala`, `C.scala`, `D.scala`. Both
`B.scala` and `C.scala` were included through transtive closure
of inheritance dependencies. The `E.scala` was not included because
`E.scala` doesn't depend directly on `A.scala`.
The distinction between depdencies by inheritance or member reference
is a new feature in sbt 0.13 and is responsible for improved recompilation
times in many cases where deep inheritance chains are not used extensively.
sbt does not instead track dependencies to source code at the
granularity of individual output `.class` files, as one might hope.
Doing so would be incorrect, because of some problems with sealed
classes (see below for discussion).
Dependencies on binary files are different - they are tracked both on
the `.class` level and on the source file level. Adding a new
implementation of a sealed trait to source file `A` affects all
clients of that sealed trait, and such dependencies are tracked at the
source file level.
Different sources are moreover recompiled together; hence a compile
error in one source implies that no bytecode is generated for any of
those. When a lot of files need to be recompiled and the compile fix is
not clear, it might be best to comment out the offending location (if
possible) to allow other sources to be compiled, and then try to figure
out how to fix the offending location—this way, trying out a possible
solution to the compile error will take less time, say 5 seconds instead
of 2 minutes.
What is included in the interface of a Scala class
--------------------------------------------------
It is surprisingly tricky to understand which changes to a class require
recompiling its clients. The rules valid for Java are much simpler (even
if they include some subtle points as well); trying to apply them to
Scala will prove frustrating. Here is a list of a few surprising points,
just to illustrate the ideas; this list is not intended to be complete.
1. Since Scala supports named arguments in method invocations, the name
of method arguments are part of its interface.
2. Adding a method to a trait requires recompiling all implementing
classes. The same is true for most changes to a method signature in a
trait.
3. Calls to `super.methodName` in traits are resolved to calls to an
abstract method called `fullyQualifiedTraitName$$super$methodName`;
such methods only exist if they are used. Hence, adding the first
call to `super.methodName` for a specific `methodName` changes
the interface. At present, this is not yet handled—see gh-466.
4. `sealed` hierarchies of case classes allow to check exhaustiveness
of pattern matching. Hence pattern matches using case classes must
depend on the complete hierarchy - this is one reason why
dependencies cannot be easily tracked at the class level (see Scala
issue `SI-2559 <https://issues.scala-lang.org/browse/SI-2559>`_ for
an example.)
Debugging an interface representation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you see spurious incremental recompilations or you want understand
what changes to an extracted interface cause incremental recompilation
then sbt 0.13 has the right tools for that.
In order to debug the interface representation and its changes as you
modify and recompile source code you need to do two things:
1. Enable incremental compiler's `apiDebug` option.
2. Add `diff-utils library <https://code.google.com/p/java-diff-utils/>`_
to sbt's classpath. Check documentation of `sbt.extraClasspath`
system property in the :doc:`Command-Line-Reference`.
.. warning:: Enabling the `apiDebug` option increases significantly
memory consumption and degrades performance of the
incremental compiler. The underlying reason is that in
order to produce meaningful debugging information about
interface differences incremental compiler has to retain
the full representation of the interface instead of just
hash sum as it does by default.
Keep this option enabled when you are debugging incremental
compiler problem only.
Below is complete transcript which shows how to enable interface debugging
in your project. First, we download the `diffutils` jar and pass it
to sbt:
.. code-block:: none
curl -O https://java-diff-utils.googlecode.com/files/diffutils-1.2.1.jar
sbt -Dsbt.extraClasspath=diffutils-1.2.1.jar
[info] Loading project definition from /Users/grek/tmp/sbt-013/project
[info] Set current project to sbt-013 (in build file:/Users/grek/tmp/sbt-013/)
> set incOptions := incOptions.value.copy(apiDebug = true)
[info] Defining *:incOptions
[info] The new value will be used by compile:incCompileSetup, test:incCompileSetup
[info] Reapplying settings...
[info] Set current project to sbt-013 (in build file:/Users/grek/tmp/sbt-013/)
Let's suppose you have the following source code in `Test.scala`::
class A {
def b: Int = 123
}
compile it and then change the `Test.scala` file so it looks like::
class A {
def b: String = "abc"
}
and run :key:`compile` task again. Now if you run `last compile` you should see
the following lines in the debugging log
.. code-block:: none
> last compile
[...]
[debug] Detected a change in a public API:
[debug] --- /Users/grek/tmp/sbt-013/Test.scala
[debug] +++ /Users/grek/tmp/sbt-013/Test.scala
[debug] @@ -23,7 +23,7 @@
[debug] ^inherited^ final def ##(): scala.this#Int
[debug] ^inherited^ final def synchronized[ java.lang.Object.T0 >: scala.this#Nothing <: scala.this#Any](x$1: <java.lang.Object.T0>): <java.lang.Object.T0>
[debug] ^inherited^ final def $isInstanceOf[ java.lang.Object.T0 >: scala.this#Nothing <: scala.this#Any](): scala.this#Boolean
[debug] ^inherited^ final def $asInstanceOf[ java.lang.Object.T0 >: scala.this#Nothing <: scala.this#Any](): <java.lang.Object.T0>
[debug] def <init>(): this#A
[debug] -def b: scala.this#Int
[debug] +def b: java.lang.this#String
[debug] }
You can see an unified diff of two interface textual represetantions. As you can see,
the incremental compiler detected a change to the return type of `b` method.
How to take advantage of sbt heuristics
---------------------------------------
The heuristics used by sbt imply the following user-visible
consequences, which determine whether a change to a class affects other
classes.
XXX Please note that this part of the documentation is a first draft;
part of the strategy might be unsound, part of it might be not yet
implemented.
1. Adding, removing, modifying `private` methods does not require
recompilation of client classes. Therefore, suppose you add a method
to a class with a lot of dependencies, and that this method is only
used in the declaring class; marking it `private` will prevent
recompilation of clients. However, this only applies to methods which
are not accessible to other classes, hence methods marked with
`private` or `private[this]`; methods which are private to a
package, marked with `private[name]`, are part of the API.
2. Modifying the interface of a non-private method requires recompiling
all clients, even if the method is not used.
3. Modifying one class does require recompiling dependencies of other
classes defined in the same file (unlike said in a previous version
of this guide). Hence separating different classes in different
source files might reduce recompilations.
4. Adding a method which did not exist requires recompiling all clients,
counterintuitively, due to complex scenarios with implicit
conversions. Hence in some cases you might want to start implementing
a new method in a separate, new class, complete the implementation,
and then cut-n-paste the complete implementation back into the
original source.
5. Changing the implementation of a method should *not* affect its
clients, unless the return type is inferred, and the new
implementation leads to a slightly different type being inferred.
Hence, annotating the return type of a non-private method explicitly,
if it is more general than the type actually returned, can reduce the
code to be recompiled when the implementation of such a method
changes. (Explicitly annotating return types of a public API is a
good practice in general.)
All the above discussion about methods also applies to fields and
members in general; similarly, references to classes also extend to
objects and traits.
Why changing the implementation of a method might affect clients, and why type annotations help
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This section explains why relying on type inference for return types of
public methods is not always appropriate. However this is an important
design issue, so we cannot give fixed rules. Moreover, this change is
often invasive, and reducing compilation times is not often a good
enough motivation. That is why we discuss also some of the implications
from the point of view of binary compatibility and software engineering.
Consider the following source file `A.scala`:
.. code-block:: scala
import java.io._
object A {
def openFiles(list: List[File]) =
list.map(name => new FileWriter(name))
}
Let us now consider the public interface of trait `A`. Note that the
return type of method `openFiles` is not specified explicitly, but
computed by type inference to be `List[FileWriter]`. Suppose that
after writing this source code, we introduce client code and then modify
`A.scala` as follows:
.. code-block:: scala
import java.io._
object A {
def openFiles(list: List[File]) =
Vector(list.map(name => new BufferedWriter(new FileWriter(name))): _*)
}
Type inference will now compute as result type
`Vector[BufferedWriter]`; in other words, changing the implementation
lead to a change of the public interface, with two undesirable
consequences:
1. Concerning our topic, client code needs to be recompiled, since
changing the return type of a method, in the JVM, is a
binary-incompatible interface change.
2. If our component is a released library, using our new version
requires recompiling all client code, changing the version number,
and so on. Often not good, if you distribute a library where binary
compatibility becomes an issue.
3. More in general, client code might now even be invalid. The following
code will for instance become invalid after the change:
::
val res: List[FileWriter] = A.openFiles(List(new File("foo.input")))
Also the following code will break:
.. code-block:: scala
val a: Seq[Writer] = new BufferedWriter(new FileWriter("bar.input"))
A.openFiles(List(new File("foo.input")))
How can we avoid these problems?
Of course, we cannot solve them in general: if we want to alter the
interface of a module, breakage might result. However, often we can
remove *implementation details* from the interface of a module. In the
example above, for instance, it might well be that the intended return
type is more general - namely `Seq[Writer]`. It might also not be the
case - this is a design choice to be decided on a case-by-case basis. In
this example I will assume however that the designer chooses
`Seq[Writer]`, since it is a reasonable choice both in the above
simplified example and in a real-world extension of the above code.
The client snippets above will now become
::
val res: Seq[Writer] =
A.openFiles(List(new File("foo.input")))
val a: Seq[Writer] =
new BufferedWriter(new FileWriter("bar.input")) +:
A.openFiles(List(new File("foo.input")))
XXX the rest of the section must be reintegrated or dropped: In general,
changing the return type of a method might be source-compatible, for
instance if the new type is more specific, or if it is less specific,
but still more specific than the type required by clients (note however
that making the type more specific might still invalidate clients in
non-trivial scenarios involving for instance type inference or implicit
conversions—for a more specific type, too many implicit conversions
might be available, leading to ambiguity); however, the bytecode for a
method call includes the return type of the invoked method, hence the
client code needs to be recompiled.
Hence, adding explicit return types on classes with many dependencies
might reduce the occasions where client code needs to be recompiled.
Moreover, this is in general a good development practice when interface
between different modules become important—specifying such interface
documents the intended behavior and helps ensuring binary compatibility,
which is especially important when the exposed interface is used by
other software component.
Why adding a member requires recompiling existing clients
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In Java adding a member does not require recompiling existing valid
source code. The same should seemingly hold also in Scala, but this is
not the case: implicit conversions might enrich class `Foo` with
method `bar` without modifying class `Foo` itself (see discussion in
issue gh-288 - XXX integrate more). However, if another method `bar`
is introduced in class `Foo`, this method should be used in preference
to the one added through implicit conversions. Therefore any class
depending on `Foo` should be recompiled. One can imagine more
fine-grained tracking of dependencies, but this is currently not
implemented.
Further references
------------------
The incremental compilation logic is implemented in
https://github.com/sbt/sbt/blob/0.13/compile/inc/src/main/scala/inc/Incremental.scala.
Some related documentation for sbt 0.7 is available at:
https://code.google.com/p/simple-build-tool/wiki/ChangeDetectionAndTesting.
Some discussion on the incremental recompilation policies is available
in issue gh-322 and gh-288.

View File

@ -1,195 +0,0 @@
=============
Update Report
=============
:key:`update` and related tasks produce a value of type
`sbt.UpdateReport <../../api/sbt/UpdateReport.html>`_
This data structure provides information about the resolved
configurations, modules, and artifacts. At the top level,
`UpdateReport` provides reports of type `ConfigurationReport` for
each resolved configuration. A `ConfigurationReport` supplies reports
(of type `ModuleReport`) for each module resolved for a given
configuration. Finally, a `ModuleReport` lists each successfully
retrieved `Artifact` and the `File` it was retrieved to as well as
the `Artifact`\ s that couldn't be downloaded. This missing
`Arifact` list is always empty for :key:`update`, which will fail if it is
non-empty. However, it may be non-empty for :key:`updateClassifiers` and
:key:`updateSbtClassifers`.
Filtering a Report and Getting Artifacts
========================================
A typical use of `UpdateReport` is to retrieve a list of files
matching a filter. A conversion of type
`UpdateReport => RichUpdateReport` implicitly provides these methods
for `UpdateReport`. The filters are defined by the
`DependencyFilter <../../api/sbt/DependencyFilter.html>`_,
`ConfigurationFilter <../../api/sbt/ConfigurationFilter.html>`_,
`ModuleFilter <../../api/sbt/ModuleFilter.html>`_,
and
`ArtifactFilter <../../api/sbt/ArtifactFilter.html>`_
types. Using these filter types, you can filter by the configuration
name, the module organization, name, or revision, and the artifact name,
type, extension, or classifier.
The relevant methods (implicitly on `UpdateReport`) are:
::
def matching(f: DependencyFilter): Seq[File]
def select(configuration: ConfigurationFilter = ..., module: ModuleFilter = ..., artifact: ArtifactFilter = ...): Seq[File]
Any argument to `select` may be omitted, in which case all values are
allowed for the corresponding component. For example, if the
`ConfigurationFilter` is not specified, all configurations are
accepted. The individual filter types are discussed below.
Filter Basics
-------------
Configuration, module, and artifact filters are typically built by
applying a `NameFilter` to each component of a `Configuration`,
`ModuleID`, or `Artifact`. A basic `NameFilter` is implicitly
constructed from a String, with `*` interpreted as a wildcard.
::
import sbt._
// each argument is of type NameFilter
val mf: ModuleFilter = moduleFilter(organization = "*sbt*", name = "main" | "actions", revision = "1.*" - "1.0")
// unspecified arguments match everything by default
val mf: ModuleFilter = moduleFilter(organization = "net.databinder")
// specifying "*" is the same as omitting the argument
val af: ArtifactFilter = artifactFilter(name = "*", `type` = "source", extension = "jar", classifier = "sources")
val cf: ConfigurationFilter = configurationFilter(name = "compile" | "test")
Alternatively, these filters, including a `NameFilter`, may be
directly defined by an appropriate predicate (a single-argument function
returning a Boolean).
::
import sbt._
// here the function value of type String => Boolean is implicitly converted to a NameFilter
val nf: NameFilter = (s: String) => s.startsWith("dispatch-")
// a Set[String] is a function String => Boolean
val acceptConfigs: Set[String] = Set("compile", "test")
// implicitly converted to a ConfigurationFilter
val cf: ConfigurationFilter = acceptConfigs
val mf: ModuleFilter = (m: ModuleID) => m.organization contains "sbt"
val af: ArtifactFilter = (a: Artifact) => a.classifier.isEmpty
ConfigurationFilter
-------------------
A configuration filter essentially wraps a `NameFilter` and is
explicitly constructed by the `configurationFilter` method:
::
def configurationFilter(name: NameFilter = ...): ConfigurationFilter
If the argument is omitted, the filter matches all configurations.
Functions of type `String => Boolean` are implicitly convertible to a
`ConfigurationFilter`. As with `ModuleFilter`, `ArtifactFilter`,
and `NameFilter`, the `&`, `|`, and `-` methods may be used to
combine `ConfigurationFilter`\ s.
::
import sbt._
val a: ConfigurationFilter = Set("compile", "test")
val b: ConfigurationFilter = (c: String) => c.startsWith("r")
val c: ConfigurationFilter = a | b
(The explicit types are optional here.)
ModuleFilter
------------
A module filter is defined by three `NameFilter`\ s: one for the
organization, one for the module name, and one for the revision. Each
component filter must match for the whole module filter to match. A
module filter is explicitly constructed by the `moduleFilter` method:
::
def moduleFilter(organization: NameFilter = ..., name: NameFilter = ..., revision: NameFilter = ...): ModuleFilter
An omitted argument does not contribute to the match. If all arguments
are omitted, the filter matches all `ModuleID`\ s. Functions of type
`ModuleID => Boolean` are implicitly convertible to a
`ModuleFilter`. As with `ConfigurationFilter`, `ArtifactFilter`,
and `NameFilter`, the `&`, `|`, and `-` methods may be used to
combine `ModuleFilter`\ s:
::
import sbt._
val a: ModuleFilter = moduleFilter(name = "dispatch-twitter", revision = "0.7.8")
val b: ModuleFilter = moduleFilter(name = "dispatch-*")
val c: ModuleFilter = b - a
(The explicit types are optional here.)
ArtifactFilter
--------------
An artifact filter is defined by four `NameFilter`\ s: one for the
name, one for the type, one for the extension, and one for the
classifier. Each component filter must match for the whole artifact
filter to match. An artifact filter is explicitly constructed by the
`artifactFilter` method:
::
def artifactFilter(name: NameFilter = ..., `type`: NameFilter = ..., extension: NameFilter = ..., classifier: NameFilter = ...): ArtifactFilter
Functions of type `Artifact => Boolean` are implicitly convertible to
an `ArtifactFilter`. As with `ConfigurationFilter`,
`ModuleFilter`, and `NameFilter`, the `&`, `|`, and `-`
methods may be used to combine `ArtifactFilter`\ s:
::
import sbt._
val a: ArtifactFilter = artifactFilter(classifier = "javadoc")
val b: ArtifactFilter = artifactFilter(`type` = "jar")
val c: ArtifactFilter = b - a
(The explicit types are optional here.)
DependencyFilter
----------------
A `DependencyFilter` is typically constructed by combining other
`DependencyFilter`\ s together using `&&`, `||`, and `--`.
Configuration, module, and artifact filters are `DependencyFilter`\ s
themselves and can be used directly as a `DependencyFilter` or they
can build up a `DependencyFilter`. Note that the symbols for the
`DependencyFilter` combining methods are doubled up to distinguish
them from the combinators of the more specific filters for
configurations, modules, and artifacts. These double-character methods
will always return a `DependencyFilter`, whereas the single character
methods preserve the more specific filter type. For example:
::
import sbt._
val df: DependencyFilter =
configurationFilter(name = "compile" | "test") && artifactFilter(`type` = "jar") || moduleFilter(name = "dispatch-*")
Here, we used `&&` and `||` to combine individual component filters
into a dependency filter, which can then be provided to the
`UpdateReport.matches` method. Alternatively, the
`UpdateReport.select` method may be used, which is equivalent to
calling `matches` with its arguments combined with `&&`.

View File

@ -1,23 +0,0 @@
===============
Detailed Topics
===============
This part of the documentation has pages documenting particular sbt topics in detail.
Before reading anything in here, you will need the information in the
:doc:`Getting Started Guide </Getting-Started/Welcome>` as a foundation.
Other resources include the :doc:`Examples </Examples/index>` and
:doc:`extending sbt </Extending/index>` areas on the wiki, and the
`API Documentation <../../api/index.html>`_
.. toctree::
:maxdepth: 2
Command-Details-Index
Configuration-Index
Dependency-Management-Index
Tasks-and-Commands
Plugins-and-Best-Practices
Advanced-Index
/Architecture/index
/Launcher/index

Binary file not shown.

Before

Width:  |  Height:  |  Size: 39 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 35 KiB

View File

@ -1,25 +0,0 @@
===============
Dormant Pages
===============
If you check out the documentation as a git repository, there's a `Dormant`
directory (this one) which contains:
- "redirect" pages (empty pages that point to some new page). If you
want to rename a page and think it has lots of incoming links from
outside the wiki, you could leave the old page name in here. The
directory name is not part of the link so it's safe to move the old
page into the `Dormant` directory.
- "clipboard" pages that contain some amount of useful text, that needs
to be extracted and organized, maybe moved to existing pages or the
:doc:`/faq` or maybe there's a new page that should exist. Basically content
that may be good but needs massaging into the big picture.
Ideally, pages in here have a note at the top pointing to alternative
content and explaining the status of the page.
.. toctree::
:maxdepth: 2
:glob:
*

View File

@ -1,336 +0,0 @@
*Wiki Maintenance Note:* This page has been *mostly* replaced by
:doc:`/Getting-Started/Full-Def` and other pages. It has some obsolete
terminology:
- we now avoid referring to build definition as "configuration" to
avoid confusion with compile configurations
- we now avoid referring to basic/light/quick vs. full configuration,
in favor of ".sbt build definition files" and ".scala build
definition files"
However, it may still be worth combing this page for examples or points
that are not made in new pages. Some stuff that may not be elsewhere:
- discussion of cycles
- discussion of build-level settings
- discussion of omitting or augmenting defaults
Also, the discussion of configuration delegation which is teased here,
needs to exist somewhere.
After extracting useful content, this page could simply be a redirect
(delete the content, link to the new pages about build definition).
There is a related page [[Introduction to Full Configurations]] which
could benefit from cleanup at the same time.
Full Configuration (Draft)
==========================
A full configuration consists of one or more Scala source files that
define concrete Builds. A Build defines project relationships and
configurations.
By Example
----------
Create a file with extension `.scala` in your `project/` directory
(such as `<your-project>/project/Build.scala`).
A sample `project/Build.scala`:
::
import sbt._
object MyBuild extends Build {
// Declare a project in the root directory of the build with ID "root".
// Declare an execution dependency on sub1.
lazy val root = Project("root", file(".")) aggregate(sub1)
// Declare a project with ID 'sub1' in directory 'a'.
// Declare a classpath dependency on sub2 in the 'test' configuration.
lazy val sub1: Project = Project("sub1", file("a")) dependsOn(sub2 % "test")
// Declare a project with ID 'sub2' in directory 'b'.
// Declare a configuration dependency on the root project.
lazy val sub2 = Project("sub2", file("b"), delegates = root :: Nil)
}
Cycles
~~~~~~
(It is probably best to skip this section and come back after reading
about project relationships. It is near the example for easier
reference.)
The configuration dependency `sub2 -> root` is specified as an
argument to the `delegates` parameter of `Project`, which is by-name
and of type `Seq[ProjectReference]` because by-name repeated
parameters are not allowed in Scala. There are also corresponding
by-name parameters `aggregate` and `dependencies` for execution and
classpath dependencies. By-name parameters, being non-strict, are useful
when there are cycles between the projects, as is the case for `root`
and `sub2`. In the example, there is a *configuration* dependency
`sub2 -> root`, a *classpath* dependency `sub1 -> sub2`, and an
*execution* dependency `root -> sub1`. This causes cycles at the
Scala-level, but not within a particular dependency type, which is not
allowed.
Defining Projects
-----------------
An internal project is defined by constructing an instance of
`Project`. The minimum information for a new project is its ID string
and base directory. For example:
::
import sbt._
object MyBuild extends Build {
lazy val projectA = Project("a", file("subA"))
}
This constructs a project definition for a project with ID 'a' and located in the `subA/` directory. Here, `file(...)` is equivalent to `new File(...)` and is resolved relative to the build's base directory.
There are additional optional parameters to the Project constructor.
These parameters configure the project and declare project
relationships, as discussed in the next sections.
Project Settings
----------------
A full build definition can configure settings for a project, just like
a light configuration. Unlike a light configuration, the default
settings can be replaced or manipulated and sequences of settings can be
manipulated. In addition, a light configuration has default imports
defined. A full definition needs to import these explicitly. In
particular, all keys (like `name` and `version`) need to be imported
from `sbt.Keys`.
No defaults
~~~~~~~~~~~
For example, to define a build from scratch (with no default settings or
tasks):
::
import sbt._
import Keys._
object MyBuild extends Build {
lazy val projectA = Project("a", file("subA"), settings = Seq(name := "From Scratch"))
}
Augment Defaults
~~~~~~~~~~~~~~~~
To augment the default settings, the following Project definitions are
equivalent:
::
lazy val a1 = Project("a", file("subA")) settings(name := "Additional", version := "1.0")
lazy val a2 = Project("a", file("subA"),
settings = Defaults.defaultSettings ++ Seq(name := "Additional", version := "1.0")
)
Select Defaults
~~~~~~~~~~~~~~~
Web support is now split out into a plugin. With the plugin declared,
its settings can be selected like:
::
import sbt_
import Keys._
object MyBuild extends Build {
lazy val projectA = Project("a", file("subA"), settings = Web.webSettings)
}
Settings defined in `.sbt` files are appended to the settings for each
`Project` definition.
Build-level Settings
~~~~~~~~~~~~~~~~~~~~
Lastly, settings can be defined for the entire build. In general, these
are used when a setting is not defined for a project. These settings are
declared either by augmenting `Build.settings` or defining settings in
the scope of the current build. For example, to set the shell prompt to
be the id for the current project, the following setting can be added to
a `.sbt` file:
::
shellPrompt in ThisBuild := { s => Project.extract(s).currentProject.id + "> " }
(The value is a function `State => String`. `State` contains
everything about the build and will be discussed elsewhere.)
Alternatively, the setting can be defined in `Build.settings`:
::
import sbt._
import Keys._
object MyBuild extends Build {
override lazy val settings = super.settings :+
(shellPrompt := { s => Project.extract(s).currentProject.id + "> " })
...
}
Project Relationships
---------------------
There are three kinds of project relationships in sbt. These are
described by execution, classpath, and configuration dependencies.
Project References
~~~~~~~~~~~~~~~~~~
When defining a dependency on another project, you provide a
`ProjectReference`. In the simplest case, this is a `Project`
object. (Technically, there is an implicit conversion
`Project => ProjectReference`) This indicates a dependency on a
project within the same build. It is possible to declare a dependency on
a project in a directory separate from the current build, in a git
repository, or in a project packaged into a jar and accessible via
http/https. These are referred to as external builds and projects. You
can reference the root project in an external build with
`RootProject`:
.. code-block:: text
RootProject( file("/home/user/a-project") )
RootProject( uri("git://github.com/dragos/dupcheck.git") )
or a specific project within the external build can be referenced using
a `ProjectRef`:
::
ProjectRef( uri("git://github.com/dragos/dupcheck.git"), "project-id")
The fragment part of the git URI can be used to specify a specific
branch or tag. For example:
::
RootProject( uri("git://github.com/typesafehub/sbteclipse.git#v1.2") )
Ultimately, a `RootProject` is resolved to a `ProjectRef` once the
external project is loaded. Additionally, there are implicit conversions
`URI => RootProject` and `File => RootProject` so that URIs and
Files can be used directly. External, remote builds are retrieved or
checked out to a staging directory in the user's `.sbt` directory so
that they can be manipulated like local builds. Examples of using
project references follow in the next sections.
When using external projects, the `sbt.boot.directory` should be set
(see [[Setup\|Getting Started Setup]]) so that unnecessary
recompilations do not occur (see gh-35).
Execution Dependency
~~~~~~~~~~~~~~~~~~~~
If project A has an execution dependency on project B, then when you
execute a task on project A, it will also be run on project B. No
ordering of these tasks is implied. An execution dependency is declared
using the `aggregate` method on `Project`. For example:
::
lazy val root = Project(...) aggregate(sub1)
lazy val sub1 = Project(...) aggregate(sub2)
lazy val sub2 = Project(...) aggregate(ext)
lazy val ext = uri("git://github.com/dragos/dupcheck.git")
If 'clean' is executed on `sub2`, it will also be executed on `ext`
(the locally checked out version). If 'clean' is executed on `root`,
it will also be executed on `sub1`, `sub2`, and `ext`.
Aggregation can be controlled more finely by configuring the
`aggregate` setting. This setting is of type `Aggregation`:
::
sealed trait Aggregation
final case class Implicit(enabled: Boolean) extends Aggregation
final class Explicit(val deps: Seq[ProjectReference], val transitive: Boolean) extends Aggregation
This key can be set in any scope, including per-task scopes. By default,
aggregation is disabled for `run`, `console-quick`, `console`, and
`console-project`. Re-enabling it from the command line for the
current project for `run` would look like:
.. code-block:: console
> set aggregate in run := true
(There is an implicit `Boolean => Implicit` where `true` translates
to `Implicit(true)` and `false` translates to `Implicit(false)`).
Similarly, aggregation can be disabled for the current project using:
.. code-block:: console
> set aggregate in clean := false
`Explicit` allows finer control over the execution dependencies and
transitivity. An instance is normally constructed using
`Aggregation.apply`. No new projects may be introduced here (that is,
internal references have to be defined already in the Build's
`projects` and externals must be a dependency in the Build
definition). For example, to declare that `root/clean` aggregates
`sub1/clean` and `sub2/clean` intransitively (that is, excluding
`ext` even though `sub2` aggregates it):
.. code-block:: scala
> set aggregate in clean := Aggregation(Seq(sub1, sub2), transitive = false)
Classpath Dependencies
~~~~~~~~~~~~~~~~~~~~~~
A classpath dependency declares that a project needs the full classpath
of another project on its classpath. Typically, this implies that the
dependency will ensure its classpath is up-to-date, such as by fetching
dependencies and recompiling modified sources.
A classpath dependency declaration consists of a project reference and
an optional configuration mapping. For example, to use project b's
`compile` configuration from project a's `test` configuration:
::
lazy val a = Project(...) dependsOn(b % "test->compile")
lazy val b = Project(...)
"test->compile"` may be shortened to `"test"` in this case. The
`%` call may be omitted, in which case the mapping is
`"compile->compile"` by default.
A useful configuration declaration is `test->test`. This means to use
a dependency's test classes on the dependent's test classpath.
Multiple declarations may be separated by a semicolon. For example, the
following says to use the main classes of `b` for the compile
classpath of `a` as well as the test classes of `b` for the test
classpath of `a`:
::
lazy val a = Project(...) dependsOn(b % "compile;test->test")
lazy val b = Project(...)
Configuration Dependencies
~~~~~~~~~~~~~~~~~~~~~~~~~~
Suppose project A has a configuration dependency on project B. If a
setting is not found on project A, it will be looked up in project B.
This is one aspect of delegation and will be described in detail
elsewhere.

View File

@ -1,137 +0,0 @@
*Wiki Maintenance Note:* This page has been *mostly* replaced by
[[Getting Started Full Def]] and other pages. See the note at the top of
[[Full Configuration]] for details. If we can establish (or cause to be
true) that everything in here is covered elsewhere, this page can be
empty except for links to the new pages.
There are two types of file for configuring a build: a `build.sbt`
file in you project root directory, or a `Build.scala` file in your
`project/` directory. The former is often referred to as a "light",
"quick" or "basic" configuration and the latter is often referred to as
"full" configuration. This page is about "full" configuration.
Naming the Scala build file
===========================
`Build.scala` is the typical name for this build file but in reality
it can be called anything that ends with `.scala` as it is a standard
Scala source file and sbt will detect and use it regardless of its name.
Overview of what goes in the file
=================================
The most basic form of this file defines one object which extends
`sbt.Build` e.g.:
::
import sbt._
object AnyName extends Build {
val anyName = Project("anyname", file("."))
// Declarations go here
}
There needs to be at least one `sbt.Project` defined and in this case
we are giving it an arbitrary name and saying that it can be found in
the root of this project. In other words we are saying that this is a
build file to build the current project.
The declarations define any number of objects which can be used by sbt
to determine what to build and how to build it.
Most of the time you are not telling sbt what to do, you are simply
declaring the dependencies of your project and the particular settings
you require. sbt then uses this information to determine how to carry
out the tasks you give it when you interact with sbt on the command
line. For this reason the order of declarations tends to be unimportant.
When you define something and assign it to a val the name of the val is
often irrelevant. By defining it and making it part of an object, sbt
can then interrogate it and extract the information it requires. So, for
example, the line:
::
val apachenet = "commons-net" % "commons-net" % "2.0"
defines a dependency and assigns it to the val `apachenet` but, unless
you refer to that val again in the build file, the name of it is of no
significance to sbt. sbt simply sees that the dependency object exists
and uses it when it needs it.
Combining "light" and "full" configuration files
================================================
It is worth noting at this stage that you can have both a `build.sbt`
file and a `Build.scala` file for the same project. If you do this,
sbt will append the configurations in `build.sbt` to those in the
`Build.scala` file. In fact you can also have multiple ".sbt" files in
your root directory and they are all appended together.
A simple example comparing a "light" and "full" configuration of the same project
=================================================================================
Here is a short "light" `build.sbt` file which defines a build project
with a single test dependency on "scalacheck":
::
name := "My Project"
version := "1.0"
organization := "org.myproject"
scalaVersion := "2.9.0-1"
libraryDependencies += "org.scalatest" % "scalatest_2.9.0" % "1.4.1" % "test"
Here is an equivalent "full" `Build.scala` file which defines exactly
the same thing:
::
import sbt._
import Keys._
object MyProjectBuild extends Build {
val mySettings = Defaults.defaultSettings ++ Seq(
name := "My Project",
version := "1.0",
organization := "org.myproject",
scalaVersion := "2.9.0-1",
libraryDependencies += "org.scalatest" % "scalatest_2.9.0" % "1.4.1" % "test"
)
val myProject = Project("MyProject", file("."), settings = mySettings)
}
Note that we have to explicitly declare the build and project and we
have to explicitly append our settings to the default settings. All of
this work is done for us when we use a "light" build file.
To understand what is really going on you may find it helpful to see
this `Build.scala` without the imports and associated implicit
conversions:
::
object MyProjectBuild extends sbt.Build {
val mySettings = sbt.Defaults.defaultSettings ++ scala.Seq(
sbt.Keys.name := "My Project",
sbt.Keys.version := "1.0",
sbt.Keys.organization := "org.myproject",
sbt.Keys.scalaVersion := "2.9.0-1",
sbt.Keys.libraryDependencies += sbt.toGroupID("org.scalatest").%("scalatest_2.9.0").%("1.4.1").%("test")
)
val myProject = sbt.Project("MyProject", new java.io.File("."), settings = mySettings)
}

View File

@ -1,324 +0,0 @@
*Wiki Maintenance Note:* This page is a dumping ground for little bits
of text, examples, and information that needs to find a new home
somewhere else on the wiki.
Snippets of docs that need to move to another page
==================================================
Temporarily change the logging level and configure how stack traces are
displayed by modifying the `log-level` or `trace-level` settings:
.. code-block:: console
> set logLevel := Level.Warn
Valid `Level` values are `Debug, Info, Warn, Error`.
You can run an action for multiple versions of Scala by prefixing the
action with `+`. See [[Cross Build]] for details. You can temporarily
switch to another version of Scala using `++ <version>`. This version
does not have to be listed in your build definition, but it does have to
be available in a repository. You can also include the initial command
to run after switching to that version. For example:
.. code-block:: console
> ++2.9.1 console-quick
...
Welcome to Scala version 2.9.1.final (Java HotSpot(TM) Server VM, Java 1.6.0).
...
scala>
...
> ++2.8.1 console-quick
...
Welcome to Scala version 2.8.1 (Java HotSpot(TM) Server VM, Java 1.6.0).
...
scala>
Manual Dependency Management
============================
Manually managing dependencies involves copying any jars that you want
to use to the `lib` directory. sbt will put these jars on the
classpath during compilation, testing, running, and when using the
interpreter. You are responsible for adding, removing, updating, and
otherwise managing the jars in this directory. No modifications to your
project definition are required to use this method unless you would like
to change the location of the directory you store the jars in.
To change the directory jars are stored in, change the
`unmanaged-base` setting in your project definition. For example, to
use `custom_lib/`:
::
unmanagedBase := baseDirectory.value / "custom_lib"
If you want more control and flexibility, override the
`unmanaged-jars` task, which ultimately provides the manual
dependencies to sbt. The default implementation is roughly:
::
unmanagedJars in Compile := (baseDirectory.value ** "*.jar").classpath
If you want to add jars from multiple directories in addition to the
default directory, you can do:
::
unmanagedJars in Compile ++= {
val base = baseDirectory.value
val baseDirectories = (base / "libA") +++ (base / "b" / "lib") +++ (base / "libC")
val customJars = (baseDirectories ** "*.jar") +++ (base / "d" / "my.jar")
customJars.classpath
}
See [[Paths]] for more information on building up paths.
Resolver.withDefaultResolvers method
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To use the local and Maven Central repositories, but not the Scala Tools
releases repository:
::
externalResolvers :=
Resolver.withDefaultResolvers(resolvers.value, mavenCentral = true, scalaTools = false)
Explicit URL
~~~~~~~~~~~~
If your project requires a dependency that is not present in a
repository, a direct URL to its jar can be specified with the `from`
method as follows:
::
libraryDependencies += "slinky" % "slinky" % "2.1" from "http://slinky2.googlecode.com/svn/artifacts/2.1/slinky.jar"
The URL is only used as a fallback if the dependency cannot be found
through the configured repositories. Also, when you publish a project, a
pom or ivy.xml is created listing your dependencies; the explicit URL is
not included in this published metadata.
Disable Transitivity
~~~~~~~~~~~~~~~~~~~~
By default, sbt fetches all dependencies, transitively. (That is, it
downloads the dependencies of the dependencies you list.)
In some instances, you may find that the dependencies listed for a
project aren't necessary for it to build. Avoid fetching artifact
dependencies with `intransitive()`, as in this example:
::
libraryDependencies += "org.apache.felix" % "org.apache.felix.framework" % "1.8.0" intransitive()
Classifiers
~~~~~~~~~~~
You can specify the classifer for a dependency using the `classifier`
method. For example, to get the jdk15 version of TestNG:
::
libraryDependencies += "org.testng" % "testng" % "5.7" classifier "jdk15"
To obtain particular classifiers for all dependencies transitively, run
the `update-classifiers` task. By default, this resolves all artifacts
with the `sources` or `javadoc` classifer. Select the classifiers to
obtain by configuring the `transitive-classifiers` setting. For
example, to only retrieve sources:
::
transitiveClassifiers := Seq("sources")
Extra Attributes
~~~~~~~~~~~~~~~~
[Extra attributes] can be specified by passing key/value pairs to the
`extra` method.
To select dependencies by extra attributes:
::
libraryDependencies += "org" % "name" % "rev" extra("color" -> "blue")
To define extra attributes on the current project:
::
projectID ~= { id =>
id extra("color" -> "blue", "component" -> "compiler-interface")
}
Inline Ivy XML
~~~~~~~~~~~~~~
sbt additionally supports directly specifying the configurations or
dependencies sections of an Ivy configuration file inline. You can mix
this with inline Scala dependency and repository declarations.
For example:
::
ivyXML :=
<dependencies>
<dependency org="javax.mail" name="mail" rev="1.4.2">
<exclude module="activation"/>
</dependency>
</dependencies>
Ivy Home Directory
~~~~~~~~~~~~~~~~~~
By default, sbt uses the standard Ivy home directory location
`${user.home}/.ivy2/`. This can be configured machine-wide, for use by
both the sbt launcher and by projects, by setting the system property
`sbt.ivy.home` in the sbt startup script (described in
[[Setup\|Getting Started Setup]]).
For example:
.. code-block:: console
java -Dsbt.ivy.home=/tmp/.ivy2/ ...
Checksums
~~~~~~~~~
sbt ([through Ivy]) verifies the checksums of downloaded files by
default. It also publishes checksums of artifacts by default. The
checksums to use are specified by the *checksums* setting.
To disable checksum checking during update:
::
checksums in update := Nil
To disable checksum creation during artifact publishing:
::
checksums in publishLocal := Nil
checksums in publish := Nil
The default value is:
::
checksums := Seq("sha1", "md5")
Publishing
~~~~~~~~~~
Finally, see [[Publishing]] for how to publish your project.
Maven/Ivy
---------
For this method, create the configuration files as you would for Maven
(`pom.xml`) or Ivy (`ivy.xml` and optionally `ivysettings.xml`).
External configuration is selected by using one of the following
expressions.
Ivy settings (resolver configuration)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
externalIvySettings()
or
::
externalIvySettings(baseDirectory(_ / "custom-settings-name.xml"))
Ivy file (dependency configuration)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
externalIvyFile()
or
::
externalIvyFile(baseDirectory(_ / "custom-name.xml"))
Because Ivy files specify their own configurations, sbt needs to know
which configurations to use for the compile, runtime, and test
classpaths. For example, to specify that the Compile classpath should
use the 'default' configuration:
::
classpathConfiguration in Compile := config("default")
Maven pom (dependencies only)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
::
externalPom()
or
::
externalPom(baseDirectory(_ / "custom-name.xml"))
Full Ivy Example
~~~~~~~~~~~~~~~~
For example, a `build.sbt` using external Ivy files might look like:
::
externalIvySettings()
externalIvyFile( baseDirectory { base => base / "ivyA.xml"} )
classpathConfiguration in Compile := Compile
classpathConfiguration in Test := Test
classpathConfiguration in Runtime := Runtime
Known limitations
~~~~~~~~~~~~~~~~~
Maven support is dependent on Ivy's support for Maven POMs. Known issues
with this support:
- Specifying `relativePath` in the `parent` section of a POM will
produce an error.
- Ivy ignores repositories specified in the POM. A workaround is to
specify repositories inline or in an Ivy `ivysettings.xml` file.
Configuration dependencies
~~~~~~~~~~~~~~~~~~~~~~~~~~
The GSG on multi-project builds doesn't describe delegation among
configurations. The FAQ entry about porting multi-project build from 0.7
mentions "configuration dependencies" but there's nothing really to link
to that explains them.
These should be FAQs (maybe just pointing to topic pages)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- Run your program in its own VM
- Run your program with a particular version of Scala
- Run your webapp within an embedded jetty server
- Create a WAR that can be deployed to an external app server

View File

@ -1,407 +0,0 @@
*Wiki Maintenance Note:* This page has been partly replaced by [[Getting
Started Basic Def]] and [[Getting Started More About Settings]]. It has
some obsolete terminology:
- we now avoid referring to build definition as "configuration" to
avoid confusion with compile configurations
- we now avoid referring to basic/light/quick vs. full configuration,
in favor of ".sbt build definition files" and ".scala build
definition files"
However, it may still be worth combing this page for examples or points
that are not made in new pages. We may want to add FAQs or topic pages
to supplement the Getting Started pages with some of that information.
After doing so, this page could simply be a redirect (delete the
content, link to the new pages about build definition).
Introduction
------------
A build definition is written in Scala. There are two types of
definitions: light and full. A light definition
is a quick way of configuring a build, consisting of a list of Scala
expressions describing project settings. A :doc:`full definition <Full-Configuration>` is
made up of one or more Scala source files that describe relationships
between projects and introduce new configurations and settings. This
page introduces the `Setting` type, which is used by light and full
definitions for general configuration.
Introductory Examples
~~~~~~~~~~~~~~~~~~~~~
Basic examples of each type of definition are shown below for the
purpose of getting an idea of what they look like, not for full
comprehension of details, which are described at :doc:`light definition <Basic-Configuration>`
and :doc:`full definition <Full-Configuration>`.
`<base>/build.sbt` (light)
::
name := "My Project"
libraryDependencies += "junit" % "junit" % "4.8" % "test"
`<base>/project/Build.scala` (full)
::
import sbt._
import Keys._
object MyBuild extends Build
{
lazy val root = Project("root", file(".")) dependsOn(sub)
lazy val sub = Project("sub", file("sub")) settings(
name := "My Project",
libraryDependencies += "junit" % "junit" % "4.8" % "test"
)
}
Important Settings Background
-----------------------------
The fundamental type of a configurable in sbt is a `Setting[T]`. Each
line in the `build.sbt` example above is of this type. The arguments
to the `settings` method in the `Build.scala` example are of type
`Setting[T]`. Specifically, the `name` setting has type
`Setting[String]` and the `libraryDependencies` setting has type
`Setting[Seq[ModuleID]]`, where `ModuleID` represents a dependency.
Throughout the documentation, many examples show a setting, such as:
::
libraryDependencies += "junit" % "junit" % "4.8" % "test"
This setting expression either goes in a :doc:`light definition <Basic-Configuration>`
`(build.sbt)` as is or in the `settings` of a `Project` instance
in a :doc:`full definition <Full-Configuration>`
`(Build.scala)` as shown in the example. This is an important point to
understanding the context of examples in the documentation. (That is,
you now know where to copy and paste examples now.)
A `Setting[T]` describes how to initialize a setting of type `T`.
The settings shown in the examples are expressions, not statements. In
particular, there is no hidden mutable map that is being modified. Each
`Setting[T]` is a value that describes an update to a map. The actual
map is rarely directly referenced by user code. It is not the final map
that is usually important, but the operations on the map.
To emphasize this, the setting in the following `Build.scala` fragment
*is ignored* because it is a value that need to be included in the
`settings` of a `Project`. (Unfortunately, Scala will discard
non-Unit values to get Unit, which is why there is no compile error.)
::
object Bad extends Build {
libraryDependencies += "junit" % "junit" % "4.8" % "test"
}
::
object Good extends Build
{
lazy val root = Project("root", file(".")) settings(
libraryDependencies += "junit" % "junit" % "4.8" % "test"
)
}
Declaring a Setting
-------------------
There is fundamentally one type of initialization, represented by the
`<<=` method. The other initialization methods `:=`, `+=`,
`++=`, `<+=`, `<++=`, and `~=` are convenience methods that can
be defined in terms of `<<=`.
The motivation behind the method names is:
- All methods end with `=` to obtain the lowest possible infix
precedence.
- A method starting with `<` indicates that the initialization uses
other settings.
- A single `+` means a single value is expected and will be appended
to the current sequence.
- `++` means a `Seq[T]` is expected. The sequence will be appended
to the current sequence.
The following sections include descriptions and examples of each
initialization method. The descriptions use "will initialize" or "will
append" to emphasize that they construct a value describing an update
and do not mutate anything. Each setting may be directly included in a
light configuration (build.sbt), appropriately separated by blank lines.
For a full configuration (Build.scala), the setting must go in a
settings Seq as described in the previous section. Information about the
types of the left and right hand sides of the methods follows this
section.
:=
~~
`:=` is used to define a setting that overwrites any previous value
without referring to other settings. For example, the following defines
a setting that will set *name* to "My Project" regardless of whether
*name* has already been initialized.
::
name := "My Project"
No other settings are used. The value assigned is just a constant.
+= and ++=
~~~~~~~~~~
`+=` is used to define a setting that will append a single value to
the current sequence without referring to other settings. For example,
the following defines a setting that will append a JUnit dependency to
*libraryDependencies*. No other settings are referenced.
::
libraryDependencies += "junit" % "junit" % "4.8" % "test"
The related method `++=` appends a sequence to the current sequence,
also without using other settings. For example, the following defines a
setting that will add dependencies on ScalaCheck and specs to the
current list of dependencies. Because it will append a `Seq`, it uses
++= instead of +=.
::
libraryDependencies ++= Seq(
"org.scala-tools.testing" %% "scalacheck" % "1.9" % "test",
"org.scala-tools.testing" %% "specs" % "1.6.8" % "test"
)
)
The types involved in += and ++= are constrained by the existence of an
implicit parameter of type Append.Value[A,B] in the case of += or
Append.Values[A,B] in the case of ++=. Here, B is the type of the value
being appended and A is the type of the setting that the value is being
appended to. See
`Append <../../api/sbt/Append$.html>`_
for the provided instances.
~=
~~
`~=` is used to transform the current value of a setting. For example,
the following defines a setting that will remove `-Y` compiler options
from the current list of compiler options.
::
scalacOptions in Compile ~= { (options: Seq[String]) =>
options filterNot ( _ startsWith "-Y" )
}
The earlier declaration of JUnit as a library dependency using `+=`
could also be written as:
::
libraryDependencies ~= { (deps: Seq[ModuleID]) =>
deps :+ ("junit" % "junit" % "4.8" % "test")
}
<<=
~~~
The most general method is <<=. All other methods can be implemented in
terms of <<=. <<= defines a setting using other settings, possibly
including the previous value of the setting being defined. For example,
declaring JUnit as a dependency using <<= would look like:
::
libraryDependencies <<= libraryDependencies apply { (deps: Seq[ModuleID]) =>
// Note that :+ is a method on Seq that appends a single value
deps :+ ("junit" % "junit" % "4.8" % "test")
}
This defines a setting that will apply the provided function to the
previous value of *libraryDependencies*. `apply` and `Seq[ModuleID]`
are explicit for demonstration only and may be omitted.
<+= and <++=
~~~~~~~~~~~~
The <+= method is a hybrid of the += and <<= methods. Similarly, <++= is
a hybrid of the ++= and <<= methods. These methods are convenience
methods for using other settings to append to the current value of a
setting.
For example, the following will add a dependency on the Scala compiler
to the current list of dependencies. Because the *scalaVersion* setting
is used, the method is <+= instead of +=.
::
libraryDependencies <+= scalaVersion( "org.scala-lang" % "scala-compiler" % _ )
This next example adds a dependency on the Scala compiler to the current
list of dependencies. Because another setting (*scalaVersion*) is used
and a Seq is appended, the method is <++=.
::
libraryDependencies <++= scalaVersion { sv =>
("org.scala-lang" % "scala-compiler" % sv) ::
("org.scala-lang" % "scala-swing" % sv) ::
Nil
}
The types involved in <+= and <++=, like += and ++=, are constrained by
the existence of an implicit parameter of type Append.Value[A,B] in the
case of <+= or Append.Values[A,B] in the case of <++=. Here, B is the
type of the value being appended and A is the type of the setting that
the value is being appended to. See
`Append <../../api/sbt/Append$.html>`_
for the provided instances.
Setting types
-------------
This section provides information about the types of the left and
right-hand sides of the initialization methods. It is currently
incomplete.
Setting Keys
~~~~~~~~~~~~
The left hand side of a setting definition is of type
`ScopedSetting <../../api/sbt/ScopedSetting.html>`_.
This type has two parts: a key (of type
`SettingKey <../../api/sbt/SettingKey.html>`_)
and a scope (of type
`Scope <../../api/sbt/Scope$.html>`_). An
unspecified scope is like using `this` to refer to the current
context. The previous examples on this page have not defined an explicit
scope. See [[Inspecting Settings]] for details on the axes that make up
scopes.
The target (the value on the left) of a method like `:=` identifies
one of the main constructs in sbt: a setting, a task, or an input task.
It is not an actual setting or task, but a key representing a setting or
task. A setting is a value assigned when a project is loaded. A task is
a unit of work that is run on-demand after a project is loaded and
produces a value. An input task, previously known as a method task in
sbt 0.7 and earlier, accepts an input string and produces a task to be
run. (The renaming is because it can accept arbitrary input in 0.10+ and
not just a space-delimited sequence of arguments like in 0.7.)
A setting key has type
`SettingKey <../../api/sbt/SettingKey.html>`_,
a task key has type
`TaskKey <../../api/sbt/TaskKey.html>`_,
and an input task has type
`InputKey <../../api/sbt/InputKey.html>`_.
The remainder of this section only discusses settings. See [[Tasks]] and
[[Input Tasks]] for details on the other types (those pages assume an
understanding of this page).
To construct a
`ScopedSetting <../../api/sbt/ScopedSetting.html>`_,
select the key and then scope it using the `in` method (see the
`ScopedSetting <../../api/sbt/ScopedSetting.html>`_
for API details). For example, the setting for compiler options for the
test sources is referenced using the *scalacOptions* key and the
`Test` configuration in the current project.
::
val ref: ScopedSetting[Seq[String]] = scalacOptions in Test
The current project doesn't need to be explicitly specified, since that
is the default in most cases. Some settings are specific to a task, in
which case the task should be specified as part of the scope as well.
For example, the compiler options used for the *console* task for test
sources is referenced like:
::
val ref: ScopedSetting[Seq[String]] = scalacOptions in Test in console
In these examples, the type of the setting reference key is given
explicitly and the key is assigned to a value to emphasize that it is a
normal (immutable) Scala value and can be manipulated and passed around
as such.
Computing the value for a setting
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The right hand side of a setting definition varies by the initialization
method used. In the case of :=, +=, ++=, and ~=, the type of the
argument is straightforward (see the
`ScopedSetting <../../api/sbt/ScopedSetting.html>`_
API). For <<=, <+=, and <++=, the type is `Initialize[T]` (for <<= and
<+=) or `Initialize[Seq[T]]` (for <++=). This section discusses the
`Initialize <../../api/sbt/Init$Initialize.html>`_
type.
A value of type `Initialize[T]` represents a computation that takes
the values of other settings as inputs. For example, in the following
setting, the argument to <<= is of type `Initialize[File]`:
::
scalaSource in Compile <<= baseDirectory {
(base: File) => base / "src"
}
This example can be written more explicitly as:
::
{
val key: ScopedSetting[File] = scalaSource.in(Compile)
val init: Initialize[File] = baseDirectory.apply( (base: File) => base / "src" )
key.<<=(init)
}
To construct a value of type `Initialize`, construct a tuple of up to
nine input `ScopedSetting`\ s. Then, define the function that will
compute the value of the setting given the values for these input
settings.
::
val path: Initialize[File] =
(baseDirectory, name, version).apply( (base: File, n: String, v: String) =>
base / (n + "-" + v + ".jar")
)
This example takes the base directory, project name, and project version
as inputs. The keys for these settings are defined in [sbt.Keys], along
with all other built-in keys. The argument to the `apply` method is a
function that takes the values of those settings and computes a new
value. In this case, that value is the path of a jar.
Initialize[Task[T]]
~~~~~~~~~~~~~~~~~~~
To initialize tasks, the procedure is similar. There are a few
differences. First, the inputs are of type [ScopedTaskable]. The means
that either settings
(`ScopedSetting <../../api/sbt/ScopedSetting.html>`_)
or tasks ([ScopedTask]) may be used as the input to a task. Second, the
name of the method used is `map` instead of `apply` and the
resulting value is of type `Initialize[Task[T]]`. In the following
example, the inputs are the [report\|Update-Report] produced by the
*update* task and the context *configuration*. The function computes the
locations of the dependencies for that configuration.
::
val mainDeps: Initialize[Task[File]] =
(update, configuration).map( (report: UpdateReport, config: Configuration) =>
report.select(configuration = config.name)
)
As before, *update* and *configuration* are defined in
`Keys <../../sxr/sbt/Keys.scala.html>`_.
*update* is of type `TaskKey[UpdateReport]` and *configuration* is of
type `SettingKey[Configuration]`.

View File

@ -1,71 +0,0 @@
========================
Advanced Command Example
========================
This is an advanced example showing some of the power of the new
settings system. It shows how to temporarily modify all declared
dependencies in the build, regardless of where they are defined. It
directly operates on the final `Seq[Setting[_]]` produced from every
setting involved in the build.
The modifications are applied by running *canonicalize*. A *reload* or
using *set* reverts the modifications, requiring *canonicalize* to be
run again.
This particular example shows how to transform all declared dependencies
on ScalaCheck to use version 1.8. As an exercise, you might try
transforming other dependencies, the repositories used, or the scalac
options used. It is possible to add or remove settings as well.
This kind of transformation is possible directly on the settings of
Project, but it would not include settings automatically added from
plugins or build.sbt files. What this example shows is doing it
unconditionally on all settings in all projects in all builds, including
external builds.
::
import sbt._
import Keys._
object Canon extends Plugin
{
// Registers the canonicalize command in every project
override def settings = Seq(commands += canonicalize)
// Define the command. This takes the existing settings (including any session settings)
// and applies 'f' to each Setting[_]
def canonicalize = Command.command("canonicalize") { (state: State) =>
val extracted = Project.extract(state)
import extracted._
val transformed = session.mergeSettings map ( s => f(s) )
val newStructure = Load.reapply(transformed, structure)
Project.setProject(session, newStructure, state)
}
// Transforms a Setting[_].
def f(s: Setting[_]): Setting[_] = s.key.key match {
// transform all settings that modify libraryDependencies
case Keys.libraryDependencies.key =>
// hey scalac. T == Seq[ModuleID]
s.asInstanceOf[Setting[Seq[ModuleID]]].mapInit(mapLibraryDependencies)
// preserve other settings
case _ => s
}
// This must be idempotent because it gets applied after every transformation.
// That is, if the user does:
// libraryDependencies += a
// libraryDependencies += b
// then this method will be called for Seq(a) and Seq(a,b)
def mapLibraryDependencies(key: ScopedKey[Seq[ModuleID]], value: Seq[ModuleID]): Seq[ModuleID] =
value map mapSingle
// This is the fundamental transformation.
// Here we map all declared ScalaCheck dependencies to be version 1.8
def mapSingle(module: ModuleID): ModuleID =
if(module.name == "scalacheck")
module.copy(revision = "1.8")
else
module
}

View File

@ -1,73 +0,0 @@
Advanced Configurations Example
-------------------------------
This is an example :doc:`full build definition </Getting-Started/Full-Def>` that
demonstrates using Ivy configurations to group dependencies.
The `utils` module provides utilities for other modules. It uses Ivy
configurations to group dependencies so that a dependent project doesn't
have to pull in all dependencies if it only uses a subset of
functionality. This can be an alternative to having multiple utilities
modules (and consequently, multiple utilities jars).
In this example, consider a `utils` project that provides utilities
related to both Scalate and Saxon. It therefore needs both Scalate and
Saxon on the compilation classpath and a project that uses all of the
functionality of 'utils' will need these dependencies as well. However,
project `a` only needs the utilities related to Scalate, so it doesn't
need Saxon. By depending only on the `scalate` configuration of
`utils`, it only gets the Scalate-related dependencies.
::
import sbt._
import Keys._
object B extends Build
{
/********** Projects ************/
// An example project that only uses the Scalate utilities.
lazy val a = Project("a", file("a")) dependsOn(utils % "compile->scalate")
// An example project that uses the Scalate and Saxon utilities.
// For the configurations defined here, this is equivalent to doing dependsOn(utils),
// but if there were more configurations, it would select only the Scalate and Saxon
// dependencies.
lazy val b = Project("b", file("b")) dependsOn(utils % "compile->scalate,saxon")
// Defines the utilities project
lazy val utils = Project("utils", file("utils")) settings(utilsSettings : _*)
def utilsSettings: Seq[Setting[_]] =
// Add the src/common/scala/ compilation configuration.
inConfig(Common)(Defaults.configSettings) ++
// Publish the common artifact
addArtifact(artifact in (Common, packageBin), packageBin in Common) ++ Seq(
// We want our Common sources to have access to all of the dependencies on the classpaths
// for compile and test, but when depended on, it should only require dependencies in 'common'
classpathConfiguration in Common := CustomCompile,
// Modify the default Ivy configurations.
// 'overrideConfigs' ensures that Compile is replaced by CustomCompile
ivyConfigurations := overrideConfigs(Scalate, Saxon, Common, CustomCompile)(ivyConfigurations.value),
// Put all dependencies without an explicit configuration into Common (optional)
defaultConfiguration := Some(Common),
// Declare dependencies in the appropriate configurations
libraryDependencies ++= Seq(
"org.fusesource.scalate" % "scalate-core" % "1.5.0" % "scalate",
"org.squeryl" %% "squeryl" % "0.9.4" % "scalate",
"net.sf.saxon" % "saxon" % "8.7" % "saxon"
)
)
/********* Configurations *******/
lazy val Scalate = config("scalate") extend(Common) describedAs("Dependencies for using Scalate utilities.")
lazy val Common = config("common") describedAs("Dependencies required in all configurations.")
lazy val Saxon = config("saxon") extend(Common) describedAs("Dependencies for using Saxon utilities.")
// Define a customized compile configuration that includes
// dependencies defined in our other custom configurations
lazy val CustomCompile = config("compile") extend(Saxon, Common, Scalate)
}

View File

@ -1,156 +0,0 @@
Full Configuration Example
--------------------------
Full configurations are written in Scala, so this example would be
placed as project/Build.scala, not build.sbt. The build can be split
into multiple files.
::
import sbt._
import Keys._
object BuildSettings {
val buildOrganization = "odp"
val buildVersion = "2.0.29"
val buildScalaVersion = "2.9.0-1"
val buildSettings = Seq (
organization := buildOrganization,
version := buildVersion,
scalaVersion := buildScalaVersion,
shellPrompt := ShellPrompt.buildShellPrompt
)
}
// Shell prompt which show the current project,
// git branch and build version
object ShellPrompt {
object devnull extends ProcessLogger {
def info (s: => String) {}
def error (s: => String) { }
def buffer[T] (f: => T): T = f
}
def currBranch = (
("git status -sb" lines_! devnull headOption)
getOrElse "-" stripPrefix "## "
)
val buildShellPrompt = {
(state: State) => {
val currProject = Project.extract (state).currentProject.id
"%s:%s:%s> ".format (
currProject, currBranch, BuildSettings.buildVersion
)
}
}
}
object Resolvers {
val sunrepo = "Sun Maven2 Repo" at "http://download.java.net/maven/2"
val sunrepoGF = "Sun GF Maven2 Repo" at "http://download.java.net/maven/glassfish"
val oraclerepo = "Oracle Maven2 Repo" at "http://download.oracle.com/maven"
val oracleResolvers = Seq (sunrepo, sunrepoGF, oraclerepo)
}
object Dependencies {
val logbackVer = "0.9.16"
val grizzlyVer = "1.9.19"
val logbackcore = "ch.qos.logback" % "logback-core" % logbackVer
val logbackclassic = "ch.qos.logback" % "logback-classic" % logbackVer
val jacksonjson = "org.codehaus.jackson" % "jackson-core-lgpl" % "1.7.2"
val grizzlyframwork = "com.sun.grizzly" % "grizzly-framework" % grizzlyVer
val grizzlyhttp = "com.sun.grizzly" % "grizzly-http" % grizzlyVer
val grizzlyrcm = "com.sun.grizzly" % "grizzly-rcm" % grizzlyVer
val grizzlyutils = "com.sun.grizzly" % "grizzly-utils" % grizzlyVer
val grizzlyportunif = "com.sun.grizzly" % "grizzly-portunif" % grizzlyVer
val sleepycat = "com.sleepycat" % "je" % "4.0.92"
val apachenet = "commons-net" % "commons-net" % "2.0"
val apachecodec = "commons-codec" % "commons-codec" % "1.4"
val scalatest = "org.scalatest" % "scalatest_2.9.0" % "1.4.1" % "test"
}
object CDAP2Build extends Build {
import Resolvers._
import Dependencies._
import BuildSettings._
// Sub-project specific dependencies
val commonDeps = Seq (
logbackcore,
logbackclassic,
jacksonjson,
scalatest
)
val serverDeps = Seq (
grizzlyframwork,
grizzlyhttp,
grizzlyrcm,
grizzlyutils,
grizzlyportunif,
sleepycat,
scalatest
)
val pricingDeps = Seq (apachenet, apachecodec, scalatest)
lazy val cdap2 = Project (
"cdap2",
file ("."),
settings = buildSettings
) aggregate (common, server, compact, pricing, pricing_service)
lazy val common = Project (
"common",
file ("cdap2-common"),
settings = buildSettings ++ Seq (libraryDependencies ++= commonDeps)
)
lazy val server = Project (
"server",
file ("cdap2-server"),
settings = buildSettings ++ Seq (resolvers := oracleResolvers,
libraryDependencies ++= serverDeps)
) dependsOn (common)
lazy val pricing = Project (
"pricing",
file ("cdap2-pricing"),
settings = buildSettings ++ Seq (libraryDependencies ++= pricingDeps)
) dependsOn (common, compact, server)
lazy val pricing_service = Project (
"pricing-service",
file ("cdap2-pricing-service"),
settings = buildSettings
) dependsOn (pricing, server)
lazy val compact = Project (
"compact",
file ("compact-hashmap"),
settings = buildSettings
)
}
External Builds
---------------
- `Mojolly Backchat Build <http://gist.github.com/1021873>`_
- `Scalaz
Build <https://github.com/scalaz/scalaz/blob/master/project/ScalazBuild.scala>`_
- Source Code Generation
- Generates Scaladoc and Scala X-Ray HTML Sources, with a unified view
of source from all sub-projects
- Builds an archive will the artifacts from all modules
- "Roll your own" approach to appending the Scala version to the module
id of dependencies to allow using snapshot releases of Scala.

View File

@ -1,199 +0,0 @@
=========================
Basic Definition Examples
=========================
Listed here are some examples of settings (each setting is independent).
See :doc:`.sbt build definition </Getting-Started/Basic-Def>` for details.
*Please note* that blank lines are used to separate individual settings.
Avoid using blank lines within a single multiline expression. As
explained in :doc:`.sbt build definition </Getting-Started/Basic-Def>`, each
setting is otherwise a normal Scala expression with expected type
`sbt.SettingDefinition <../../api/sbt/Init$SettingsDefinition.html>`_.
::
// set the name of the project
name := "My Project"
version := "1.0"
organization := "org.myproject"
// set the Scala version used for the project
scalaVersion := "2.9.0-SNAPSHOT"
// set the main Scala source directory to be <base>/src
scalaSource in Compile := baseDirectory.value / "src"
// set the Scala test source directory to be <base>/test
scalaSource in Test := baseDirectory.value / "test"
// add a test dependency on ScalaCheck
libraryDependencies += "org.scala-tools.testing" %% "scalacheck" % "1.8" % "test"
// add compile dependencies on some dispatch modules
libraryDependencies ++= Seq(
"net.databinder" %% "dispatch-meetup" % "0.7.8",
"net.databinder" %% "dispatch-twitter" % "0.7.8"
)
// Set a dependency based partially on a val.
{
val libosmVersion = "2.5.2-RC1"
libraryDependencies += "net.sf.travelingsales" % "osmlib" % libosmVersion from "http://downloads.sourceforge.net/project/travelingsales/libosm/"+libosmVersion+"/libosm-"+libosmVersion+".jar"
}
// reduce the maximum number of errors shown by the Scala compiler
maxErrors := 20
// increase the time between polling for file changes when using continuous execution
pollInterval := 1000
// append several options to the list of options passed to the Java compiler
javacOptions ++= Seq("-source", "1.5", "-target", "1.5")
// append -deprecation to the options passed to the Scala compiler
scalacOptions += "-deprecation"
// define the statements initially evaluated when entering 'console', 'consoleQuick', or 'consoleProject'
initialCommands := """
import System.{currentTimeMillis => now}
def time[T](f: => T): T = {
val start = now
try { f } finally { println("Elapsed: " + (now - start)/1000.0 + " s") }
}
"""
// set the initial commands when entering 'console' or 'consoleQuick', but not 'consoleProject'
initialCommands in console := "import myproject._"
// set the main class for packaging the main jar
// 'run' will still auto-detect and prompt
// change Compile to Test to set it for the test jar
mainClass in (Compile, packageBin) := Some("myproject.MyMain")
// set the main class for the main 'run' task
// change Compile to Test to set it for 'test:run'
mainClass in (Compile, run) := Some("myproject.MyMain")
// add <base>/input to the files that '~' triggers on
watchSources += baseDirectory.value / "input"
// add a maven-style repository
resolvers += "name" at "url"
// add a sequence of maven-style repositories
resolvers ++= Seq("name" at "url")
// define the repository to publish to
publishTo := Some("name" at "url")
// set Ivy logging to be at the highest level
ivyLoggingLevel := UpdateLogging.Full
// disable updating dynamic revisions (including -SNAPSHOT versions)
offline := true
// set the prompt (for this build) to include the project id.
shellPrompt in ThisBuild := { state => Project.extract(state).currentRef.project + "> " }
// set the prompt (for the current project) to include the username
shellPrompt := { state => System.getProperty("user.name") + "> " }
// disable printing timing information, but still print [success]
showTiming := false
// disable printing a message indicating the success or failure of running a task
showSuccess := false
// change the format used for printing task completion time
timingFormat := {
import java.text.DateFormat
DateFormat.getDateTimeInstance(DateFormat.SHORT, DateFormat.SHORT)
}
// disable using the Scala version in output paths and artifacts
crossPaths := false
// fork a new JVM for 'run' and 'test:run'
fork := true
// fork a new JVM for 'test:run', but not 'run'
fork in Test := true
// add a JVM option to use when forking a JVM for 'run'
javaOptions += "-Xmx2G"
// only use a single thread for building
parallelExecution := false
// Execute tests in the current project serially
// Tests from other projects may still run concurrently.
parallelExecution in Test := false
// set the location of the JDK to use for compiling Java code.
// if 'fork' is true, this is used for 'run' as well
javaHome := Some(file("/usr/lib/jvm/sun-jdk-1.6"))
// Use Scala from a directory on the filesystem instead of retrieving from a repository
scalaHome := Some(file("/home/user/scala/trunk/"))
// don't aggregate clean (See FullConfiguration for aggregation details)
aggregate in clean := false
// only show warnings and errors on the screen for compilations.
// this applies to both test:compile and compile and is Info by default
logLevel in compile := Level.Warn
// only show warnings and errors on the screen for all tasks (the default is Info)
// individual tasks can then be more verbose using the previous setting
logLevel := Level.Warn
// only store messages at info and above (the default is Debug)
// this is the logging level for replaying logging with 'last'
persistLogLevel := Level.Debug
// only show 10 lines of stack traces
traceLevel := 10
// only show stack traces up to the first sbt stack frame
traceLevel := 0
// add SWT to the unmanaged classpath
unmanagedJars in Compile += Attributed.blank(file("/usr/share/java/swt.jar"))
// publish test jar, sources, and docs
publishArtifact in Test := true
// disable publishing of main docs
publishArtifact in (Compile, packageDoc) := false
// change the classifier for the docs artifact
artifactClassifier in packageDoc := Some("doc")
// Copy all managed dependencies to <build-root>/lib_managed/
// This is essentially a project-local cache and is different
// from the lib_managed/ in sbt 0.7.x. There is only one
// lib_managed/ in the build root (not per-project).
retrieveManaged := true
/* Specify a file containing credentials for publishing. The format is:
realm=Sonatype Nexus Repository Manager
host=nexus.scala-tools.org
user=admin
password=admin123
*/
credentials += Credentials(Path.userHome / ".ivy2" / ".credentials")
// Directly specify credentials for publishing.
credentials += Credentials("Sonatype Nexus Repository Manager", "nexus.scala-tools.org", "admin", "admin123")
// Exclude transitive dependencies, e.g., include log4j without including logging via jdmk, jmx, or jms.
libraryDependencies +=
"log4j" % "log4j" % "1.2.15" excludeAll(
ExclusionRule(organization = "com.sun.jdmk"),
ExclusionRule(organization = "com.sun.jmx"),
ExclusionRule(organization = "javax.jms")
)

View File

@ -1,17 +0,0 @@
========
Examples
========
This section of the documentation has example sbt build definitions and code.
Contributions are welcome!
You may want to read the :doc:`Getting Started Guide </Getting-Started/Welcome>`
as a foundation for understanding the examples.
.. toctree::
:maxdepth: 1
Quick-Configuration-Examples
Full-Configuration-Example
Advanced-Configurations-Example
Advanced-Command-Example

View File

@ -1,291 +0,0 @@
=============
Build Loaders
=============
Build loaders are the means by which sbt resolves, builds, and
transforms build definitions. Each aspect of loading may be customized
for special applications. Customizations are specified by overriding the
*buildLoaders* methods of your build definition's Build object. These
customizations apply to external projects loaded by the build, but not
the (already loaded) Build in which they are defined. Also documented on
this page is how to manipulate inter-project dependencies from a
setting.
Custom Resolver
---------------
The first type of customization introduces a new resolver. A resolver
provides support for taking a build URI and retrieving it to a local
directory on the filesystem. For example, the built-in resolver can
checkout a build using git based on a git URI, use a build in an
existing local directory, or download and extract a build packaged in a
jar file. A resolver has type:
::
ResolveInfo => Option[() => File]
The resolver should return None if it cannot handle the URI or Some
containing a function that will retrieve the build. The ResolveInfo
provides a staging directory that can be used or the resolver can
determine its own target directory. Whichever is used, it should be
returned by the loading function. A resolver is registered by passing it
to *BuildLoader.resolve* and overriding *Build.buildLoaders* with the
result:
::
...
object Demo extends Build {
...
override def buildLoaders =
BuildLoader.resolve(demoResolver) ::
Nil
def demoResolver: BuildLoader.ResolveInfo => Option[() => File] = ...
}
API Documentation
~~~~~~~~~~~~~~~~~
Relevant API documentation for custom resolvers:
- `ResolveInfo <../../api/index.html#sbt.BuildLoader$$ResolveInfo>`_
- `BuildLoader <../../api/sbt/BuildLoader$.html>`_
Full Example
~~~~~~~~~~~~
::
import sbt._
import Keys._
object Demo extends Build
{
// Define a project that depends on an external project with a custom URI
lazy val root = Project("root", file(".")).dependsOn(
uri("demo:a")
)
// Register the custom resolver
override def buildLoaders =
BuildLoader.resolve(demoResolver) ::
Nil
// Define the custom resolver, which handles the 'demo' scheme.
// The resolver's job is to produce a directory containing the project to load.
// A subdirectory of info.staging can be used to create new local
// directories, such as when doing 'git clone ...'
def demoResolver(info: BuildLoader.ResolveInfo): Option[() => File] =
if(info.uri.getScheme != "demo")
None
else
{
// Use a subdirectory of the staging directory for the new local build.
// The subdirectory name is derived from a hash of the URI,
// and so identical URIs will resolve to the same directory (as desired).
val base = RetrieveUnit.temporary(info.staging, info.uri)
// Return a closure that will do the actual resolution when requested.
Some(() => resolveDemo(base, info.uri.getSchemeSpecificPart))
}
// Construct a sample project on the fly with the name specified in the URI.
def resolveDemo(base: File, ssp: String): File =
{
// Only create the project if it hasn't already been created.
if(!base.exists)
IO.write(base / "build.sbt", template.format(ssp))
base
}
def template = """
name := "%s"
version := "1.0"
"""
}
Custom Builder
--------------
Once a project is resolved, it needs to be built and then presented to
sbt as an instance of `sbt.BuildUnit`. A custom builder has type:
::
BuildInfo => Option[() => BuildUnit]
A builder returns None if it does not want to handle the build
identified by the `BuildInfo`. Otherwise, it provides a function that
will load the build when evaluated. Register a builder by passing it to
*BuildLoader.build* and overriding *Build.buildLoaders* with the result:
::
...
object Demo extends Build {
...
override def buildLoaders =
BuildLoader.build(demoBuilder) ::
Nil
def demoBuilder: BuildLoader.BuildInfo => Option[() => BuildUnit] = ...
}
API Documentation
~~~~~~~~~~~~~~~~~
Relevant API documentation for custom builders:
- `BuildInfo <../../api/sbt/BuildLoader$$BuildInfo.html>`_
- `BuildLoader <../../api/sbt/BuildLoader$.html>`_
- `BuildUnit <../../api/index.html#sbt.Load$$BuildUnit>`_
Example
~~~~~~~
This example demonstrates the structure of how a custom builder could
read configuration from a pom.xml instead of the standard .sbt files and
project/ directory.
::
... imports ...
object Demo extends Build
{
lazy val root = Project("root", file(".")) dependsOn( file("basic-pom-project") )
override def buildLoaders =
BuildLoader.build(demoBuilder) ::
Nil
def demoBuilder: BuildInfo => Option[() => BuildUnit] = info =>
if(pomFile(info).exists)
Some(() => pomBuild(info))
else
None
def pomBuild(info: BuildInfo): BuildUnit =
{
val pom = pomFile(info)
val model = readPom(pom)
val n = Project.normalizeProjectID(model.getName)
val base = Option(model.getProjectDirectory) getOrElse info.base
val root = Project(n, base) settings( pomSettings(model) : _*)
val build = new Build { override def projects = Seq(root) }
val loader = this.getClass.getClassLoader
val definitions = new LoadedDefinitions(info.base, Nil, loader, build :: Nil, Nil)
val plugins = new LoadedPlugins(info.base / "project", Nil, loader, Nil, Nil)
new BuildUnit(info.uri, info.base, definitions, plugins)
}
def readPom(file: File): Model = ...
def pomSettings(m: Model): Seq[Setting[_]] = ...
def pomFile(info: BuildInfo): File = info.base / "pom.xml"
Custom Transformer
------------------
Once a project has been loaded into an `sbt.BuildUnit`, it is
transformed by all registered transformers. A custom transformer has
type:
::
TransformInfo => BuildUnit
A transformer is registered by passing it to *BuildLoader.transform* and
overriding *Build.buildLoaders* with the result:
::
...
object Demo extends Build {
...
override def buildLoaders =
BuildLoader.transform(demoTransformer) ::
Nil
def demoBuilder: BuildLoader.TransformInfo => BuildUnit = ...
}
API Documentation
~~~~~~~~~~~~~~~~~
Relevant API documentation for custom transformers:
- `TransformInfo <../../api/index.html#sbt.BuildLoader$$TransformInfo>`_
- `BuildLoader <../../api/sbt/BuildLoader$.html>`_
- `BuildUnit <../../api/index.html#sbt.Load$$BuildUnit>`_
Manipulating Project Dependencies in Settings
=============================================
The :key:`buildDependencies` setting, in the Global scope, defines the
aggregation and classpath dependencies between projects. By default,
this information comes from the dependencies defined by `Project`
instances by the `aggregate` and `dependsOn` methods. Because
:key:`buildDependencies` is a setting and is used everywhere dependencies
need to be known (once all projects are loaded), plugins and build
definitions can transform it to manipulate inter-project dependencies at
setting evaluation time. The only requirement is that no new projects
are introduced because all projects are loaded before settings get
evaluated. That is, all Projects must have been declared directly in a
Build or referenced as the argument to `Project.aggregate` or
`Project.dependsOn`.
The BuildDependencies type
--------------------------
The type of the :key:`buildDependencies` setting is
`BuildDependencies <../../api/sbt/BuildDependencies.html>`_.
`BuildDependencies` provides mappings from a project to its aggregate
or classpath dependencies. For classpath dependencies, a dependency has
type `ClasspathDep[ProjectRef]`, which combines a `ProjectRef` with
a configuration (see `ClasspathDep <../../api/sbt/ClasspathDep.html>`_
and `ProjectRef <../../api/sbt/ProjectRef.html>`_). For aggregate
dependencies, the type of a dependency is just `ProjectRef`.
The API for `BuildDependencies` is not extensive, covering only a
little more than the minimum required, and related APIs have more of an
internal, unpolished feel. Most manipulations consist of modifying the
relevant map (classpath or aggregate) manually and creating a new
`BuildDependencies` instance.
Example
~~~~~~~
As an example, the following replaces a reference to a specific build
URI with a new URI. This could be used to translate all references to a
certain git repository to a different one or to a different mechanism,
like a local directory.
::
buildDependencies in Global := {
val deps = (buildDependencies in Global).value
val oldURI = uri("...") // the URI to replace
val newURI = uri("...") // the URI replacing oldURI
def substitute(dep: ClasspathDep[ProjectRef]): ClasspathDep[ProjectRef] =
if(dep.project.build == oldURI)
ResolvedClasspathDependency(ProjectRef(newURI, dep.project.project), dep.configuration)
else
dep
val newcp =
for( (proj, deps) <- deps.cp) yield
(proj, deps map substitute)
new BuildDependencies(newcp, deps.aggregate)
}
It is not limited to such basic translations, however. The configuration
a dependency is defined in may be modified and dependencies may be added
or removed. Modifying :key:`buildDependencies` can be combined with
modifying :key:`libraryDependencies` to convert binary dependencies to and
from source dependencies, for example.

View File

@ -1,227 +0,0 @@
=================
State and actions
=================
`State <../../api/sbt/State$.html>`_ is the entry point to all available
information in sbt. The key methods are:
- `definedCommands: Seq[Command]` returns all registered Command
definitions
- `remainingCommands: Seq[String]` returns the remaining commands to
be run
- `attributes: AttributeMap` contains generic data.
The action part of a command performs work and transforms `State`. The
following sections discuss `State => State` transformations. As
mentioned previously, a command will typically handle a parsed value as
well: `(State, T) => State`.
Command-related data
--------------------
A Command can modify the currently registered commands or the commands
to be executed. This is done in the action part by transforming the
(immutable) State provided to the command. A function that registers
additional power commands might look like:
::
val powerCommands: Seq[Command] = ...
val addPower: State => State =
(state: State) =>
state.copy(definedCommands =
(state.definedCommands ++ powerCommands).distinct
)
This takes the current commands, appends new commands, and drops
duplicates. Alternatively, State has a convenience method for doing the
above:
::
val addPower2 = (state: State) => state ++ powerCommands
Some examples of functions that modify the remaining commands to
execute:
::
val appendCommand: State => State =
(state: State) =>
state.copy(remainingCommands = state.remainingCommands :+ "cleanup")
val insertCommand: State => State =
(state: State) =>
state.copy(remainingCommands = "next-command" +: state.remainingCommands)
The first adds a command that will run after all currently specified
commands run. The second inserts a command that will run next. The
remaining commands will run after the inserted command completes.
To indicate that a command has failed and execution should not continue,
return `state.fail`.
::
(state: State) => {
val success: Boolean = ...
if(success) state else state.fail
}
Project-related data
--------------------
Project-related information is stored in `attributes`. Typically,
commands won't access this directly but will instead use a convenience
method to extract the most useful information:
::
val state: State
val extracted: Extracted = Project.extract(state)
import extracted._
`Extracted <../../api/sbt/Extracted.html>`_ provides:
- Access to the current build and project (`currentRef`)
- Access to initialized project setting data (`structure.data`)
- Access to session `Setting`\ s and the original, permanent settings
from `.sbt` and `.scala` files (`session.append` and
`session.original`, respectively)
- Access to the current `Eval <../../api/sbt/compiler/Eval.html>`_
instance for evaluating Scala expressions in the build context.
Project data
------------
All project data is stored in `structure.data`, which is of type
`sbt.Settings[Scope]`. Typically, one gets information of type `T`
in the following way:
::
val key: SettingKey[T]
val scope: Scope
val value: Option[T] = key in scope get structure.data
Here, a `SettingKey[T]` is typically obtained from
`Keys <../../api/sbt/Keys$.html>`_ and is the same type that is used to
define settings in `.sbt` files, for example.
`Scope <../../api/sbt/Scope.html>`_ selects the scope the key is
obtained for. There are convenience overloads of `in` that can be used
to specify only the required scope axes. See
`Structure.scala <../../sxr/sbt/Structure.scala.html>`_ for where `in` and
other parts of the settings interface are defined. Some examples:
::
import Keys._
val extracted: Extracted
import extracted._
// get name of current project
val nameOpt: Option[String] = name in currentRef get structure.data
// get the package options for the `test:packageSrc` task or Nil if none are defined
val pkgOpts: Seq[PackageOption] = packageOptions in (currentRef, Test, packageSrc) get structure.data getOrElse Nil
`BuildStructure <../../api/sbt/Load$$BuildStructure.html>`_ contains
information about build and project relationships. Key members are:
::
units: Map[URI, LoadedBuildUnit]
root: URI
A `URI` identifies a build and `root` identifies the initial build
loaded. `LoadedBuildUnit <../../api/sbt/Load$$LoadedBuildUnit.html>`_
provides information about a single build. The key members of
`LoadedBuildUnit` are:
::
// Defines the base directory for the build
localBase: File
// maps the project ID to the Project definition
defined: Map[String, ResolvedProject]
`ResolvedProject <../../api/sbt/ResolvedProject.html>`_ has the same
information as the `Project` used in a `project/Build.scala` except
that `ProjectReferences <../../api/sbt/ProjectReference.html>`_ are
resolved to `ProjectRef`\ s.
Classpaths
----------
Classpaths in sbt 0.10+ are of type `Seq[Attributed[File]]`. This
allows tagging arbitrary information to classpath entries. sbt currently
uses this to associate an `Analysis` with an entry. This is how it
manages the information needed for multi-project incremental
recompilation. It also associates the ModuleID and Artifact with managed
entries (those obtained by dependency management). When you only want
the underlying `Seq[File]`, use `files`:
::
val attributedClasspath: Seq[Attribute[File]] = ...
val classpath: Seq[File] = attributedClasspath.files
Running tasks
-------------
It can be useful to run a specific project task from a
:doc:`command <Commands>` (*not from another task*) and get its
result. For example, an IDE-related command might want to get the
classpath from a project or a task might analyze the results of a
compilation. The relevant method is `Project.evaluateTask`, which has
the following signature:
::
def evaluateTask[T](taskKey: ScopedKey[Task[T]], state: State,
checkCycles: Boolean = false, maxWorkers: Int = ...): Option[Result[T]]
For example,
::
val eval: State => State = (state: State) => {
// This selects the main 'compile' task for the current project.
// The value produced by 'compile' is of type inc.Analysis,
// which contains information about the compiled code.
val taskKey = Keys.compile in Compile
// Evaluate the task
// None if the key is not defined
// Some(Inc) if the task does not complete successfully (Inc for incomplete)
// Some(Value(v)) with the resulting value
val result: Option[Result[inc.Analysis]] = Project.evaluateTask(taskKey, state)
// handle the result
result match
{
case None => // Key wasn't defined.
case Some(Inc(inc)) => // error detail, inc is of type Incomplete, use Incomplete.show(inc.tpe) to get an error message
case Some(Value(v)) => // do something with v: inc.Analysis
}
}
For getting the test classpath of a specific project, use this key:
::
val projectRef: ProjectRef = ...
val taskKey: Task[Seq[Attributed[File]]] =
Keys.fullClasspath in (projectRef, Test)
Using State in a task
---------------------
To access the current State from a task, use the :key:`state` task as an input.
For example,
::
myTask := ... state.value ...

View File

@ -1,127 +0,0 @@
============================================
Creating Command Line Applications Using sbt
============================================
There are several components of sbt that may be used to create a command
line application. The :doc:`launcher </Detailed-Topics/Launcher>` and
the :doc:`command system <Commands>` are the two main ones illustrated
here.
As described on the :doc:`launcher page </Detailed-Topics/Launcher>`, a launched
application implements the xsbti.AppMain interface and defines a brief
configuration file that users pass to the launcher to run the
application. To use the command system, an application sets up a
:doc:`State <Build-State>` instance that provides
:doc:`command implementations <Commands>` and the initial commands to
run. A minimal hello world example is given below.
Hello World Example
===================
There are three files in this example:
1. build.sbt
2. Main.scala
3. hello.build.properties
To try out this example:
1. Put the first two files in a new directory
2. Run `sbt publishLocal` in that directory
3. Run `sbt @path/to/hello.build.properties` to run the application.
Like for sbt itself, you can specify commands from the command line
(batch mode) or run them at an prompt (interactive mode).
Build Definition: build.sbt
~~~~~~~~~~~~~~~~~~~~~~~~~~~
The build.sbt file should define the standard settings: name, version,
and organization. To use the sbt command system, a dependency on the
`command` module is needed. To use the task system, add a dependency
on the `task-system` module as well.
::
organization := "org.example"
name := "hello"
version := "0.1-SNAPSHOT"
libraryDependencies += "org.scala-sbt" % "command" % "0.12.0"
Application: Main.scala
~~~~~~~~~~~~~~~~~~~~~~~
The application itself is defined by implementing `xsbti.AppMain <../../api/xsbti/AppMain.html>`_. The basic steps are
1. Provide command definitions. These are the commands that are available for users to run.
2. Define initial commands. These are the commands that are initially scheduled to run. For example, an application will typically add anything specified on the command line (what sbt calls batch mode) and if no commands are defined, enter interactive mode by running the 'shell' command.
3. Set up logging. The default setup in the example rotates the log file after each user interaction and sends brief logging to the console and verbose logging to the log file.
::
package org.example
import sbt._
import java.io.{File, PrintWriter}
final class Main extends xsbti.AppMain
{
/** Defines the entry point for the application.
* The call to `initialState` sets up the application.
* The call to runLogged starts command processing. */
def run(configuration: xsbti.AppConfiguration): xsbti.MainResult =
MainLoop.runLogged( initialState(configuration) )
/** Sets up the application by constructing an initial State instance with the supported commands
* and initial commands to run. See the State API documentation for details. */
def initialState(configuration: xsbti.AppConfiguration): State =
{
val commandDefinitions = hello +: BasicCommands.allBasicCommands
val commandsToRun = Hello +: "iflast shell" +: configuration.arguments.map(_.trim)
State( configuration, commandDefinitions, Set.empty, None, commandsToRun, State.newHistory,
AttributeMap.empty, initialGlobalLogging, State.Continue )
}
// defines an example command. see the Commands page for details.
val Hello = "hello"
val hello = Command.command(Hello) { s =>
s.log.info("Hello!")
s
}
/** Configures logging to log to a temporary backing file as well as to the console.
* An application would need to do more here to customize the logging level and
* provide access to the backing file (like sbt's last command and logLevel setting).*/
def initialGlobalLogging: GlobalLogging =
GlobalLogging.initial(MainLogging.globalDefault _, File.createTempFile("hello", "log"))
}
Launcher configuration file: hello.build.properties
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The launcher needs a configuration file in order to retrieve and run an
application.
`hello.build.properties`
.. code-block:: ini
[scala]
version: 2.9.1
[app]
org: org.example
name: hello
version: 0.1-SNAPSHOT
class: org.example.Main
components: xsbti
cross-versioned: true
[repositories]
local
maven-central
typesafe-ivy-releases: http://repo.typesafe.com/typesafe/ivy-releases/, [organization]/[module]/[revision]/[type]s/[artifact](-[classifier]).[ext]

View File

@ -1,198 +0,0 @@
========
Commands
========
What is a "command"?
====================
A "command" looks similar to a task: it's a named operation that can be executed from the sbt console.
However, a command's implementation takes as its parameter the entire state of the build (represented by :doc:`/Extending/Build-State`) and computes a new :doc:`/Extending/Build-State`. This means that a command can look at or modify other sbt settings, for example. Typically, you would resort to a command when you need to do something that's impossible in a regular task.
Introduction
============
There are three main aspects to commands:
1. The syntax used by the user to invoke the command, including:
- Tab completion for the syntax
- The parser to turn input into an appropriate data structure
2. The action to perform using the parsed data structure. This action
transforms the build `State <../../api/sbt/State.html>`_.
3. Help provided to the user
In sbt, the syntax part, including tab completion, is specified with
parser combinators. If you are familiar with the parser combinators in
Scala's standard library, these are very similar. The action part is a
function `(State, T) => State`, where `T` is the data structure
produced by the parser. See the :doc:`/Detailed-Topics/Parsing-Input`
page for how to use the parser combinators.
`State <../../api/sbt/State.html>`_ provides access to the build state,
such as all registered `Command`\ s, the remaining commands to
execute, and all project-related information. See :doc:`Build-State`
for details on State.
Finally, basic help information may be provided that is used by the
`help` command to display command help.
Defining a Command
==================
A command combines a function `State => Parser[T]` with an action
`(State, T) => State`. The reason for `State => Parser[T]` and not
simply `Parser[T]` is that often the current `State` is used to
build the parser. For example, the currently loaded projects (provided
by `State`) determine valid completions for the `project` command.
Examples for the general and specific cases are shown in the following
sections.
See `Command.scala <../../sxr/sbt/Command.scala.html>`_ for the source
API details for constructing commands.
General commands
----------------
General command construction looks like:
::
val action: (State, T) => State = ...
val parser: State => Parser[T] = ...
val command: Command = Command("name")(parser)(action)
No-argument commands
--------------------
There is a convenience method for constructing commands that do not
accept any arguments.
::
val action: State => State = ...
val command: Command = Command.command("name")(action)
Single-argument command
-----------------------
There is a convenience method for constructing commands that accept a
single argument with arbitrary content.
::
// accepts the state and the single argument
val action: (State, String) => State = ...
val command: Command = Command.single("name")(action)
Multi-argument command
----------------------
There is a convenience method for constructing commands that accept
multiple arguments separated by spaces.
::
val action: (State, Seq[String]) => State = ...
// <arg> is the suggestion printed for tab completion on an argument
val command: Command = Command.args("name", "<arg>")(action)
Full Example
============
The following example is a valid `project/Build.scala` that adds
commands to a project. To try it out:
1. Copy the following build definition into `project/Build.scala` for
a new project.
2. Run sbt on the project.
3. Try out the `hello`, `helloAll`, `failIfTrue`, `color`,
and `printState` commands.
4. Use tab-completion and the code below as guidance.
::
import sbt._
import Keys._
// imports standard command parsing functionality
import complete.DefaultParsers._
object CommandExample extends Build
{
// Declare a single project, adding several new commands, which are discussed below.
lazy override val projects = Seq(root)
lazy val root = Project("root", file(".")) settings(
commands ++= Seq(hello, helloAll, failIfTrue, changeColor, printState)
)
// A simple, no-argument command that prints "Hi",
// leaving the current state unchanged.
def hello = Command.command("hello") { state =>
println("Hi!")
state
}
// A simple, multiple-argument command that prints "Hi" followed by the arguments.
// Again, it leaves the current state unchanged.
def helloAll = Command.args("helloAll", "<name>") { (state, args) =>
println("Hi " + args.mkString(" "))
state
}
// A command that demonstrates failing or succeeding based on the input
def failIfTrue = Command.single("failIfTrue") {
case (state, "true") => state.fail
case (state, _) => state
}
// Demonstration of a custom parser.
// The command changes the foreground or background terminal color
// according to the input.
lazy val change = Space ~> (reset | setColor)
lazy val reset = token("reset" ^^^ "\033[0m")
lazy val color = token( Space ~> ("blue" ^^^ "4" | "green" ^^^ "2") )
lazy val select = token( "fg" ^^^ "3" | "bg" ^^^ "4" )
lazy val setColor = (select ~ color) map { case (g, c) => "\033[" + g + c + "m" }
def changeColor = Command("color")(_ => change) { (state, ansicode) =>
print(ansicode)
state
}
// A command that demonstrates getting information out of State.
def printState = Command.command("printState") { state =>
import state._
println(definedCommands.size + " registered commands")
println("commands to run: " + show(remainingCommands))
println()
println("original arguments: " + show(configuration.arguments))
println("base directory: " + configuration.baseDirectory)
println()
println("sbt version: " + configuration.provider.id.version)
println("Scala version (for sbt): " + configuration.provider.scalaProvider.version)
println()
val extracted = Project.extract(state)
import extracted._
println("Current build: " + currentRef.build)
println("Current project: " + currentRef.project)
println("Original setting count: " + session.original.size)
println("Session setting count: " + session.append.size)
state
}
def show[T](s: Seq[T]) =
s.map("'" + _ + "'").mkString("[", ", ", "]")
}

View File

@ -1,303 +0,0 @@
===========
Input Tasks
===========
Input Tasks parse user input and produce a task to run.
:doc:`/Detailed-Topics/Parsing-Input/` describes how to use the parser
combinators that define the input syntax and tab completion. This page
describes how to hook those parser combinators into the input task
system.
Input Keys
==========
A key for an input task is of type `InputKey` and represents the input
task like a `SettingKey` represents a setting or a `TaskKey`
represents a task. Define a new input task key using the
`inputKey.apply` factory method:
::
// goes in project/Build.scala or in build.sbt
val demo = inputKey[Unit]("A demo input task.")
The definition of an input task is similar to that of a normal task, but it can
also use the result of a :doc:`Parser </Detailed-Topics/Parsing-Input>` applied to
user input. Just as the special `value` method gets the value of a
setting or task, the special `parsed` method gets the result of a `Parser`.
Basic Input Task Definition
===========================
The simplest input task accepts a space-delimited sequence of arguments.
It does not provide useful tab completion and parsing is basic. The built-in
parser for space-delimited arguments is constructed via the `spaceDelimited`
method, which accepts as its only argument the label to present to the user
during tab completion.
For example, the following task prints the current Scala version and then echoes
the arguments passed to it on their own line.
::
demo := {
// get the result of parsing
val args: Seq[String] = spaceDelimited("<arg>").parsed
// Here, we also use the value of the `scalaVersion` setting
println("The current Scala version is " + scalaVersion.value)
println("The arguments to demo were:")
args foreach println
}
Input Task using Parsers
========================
The Parser provided by the `spaceDelimited` method does not provide
any flexibility in defining the input syntax. Using a custom parser
is just a matter of defining your own `Parser` as described on the
:doc:`/Detailed-Topics/Parsing-Input` page.
Constructing the Parser
-----------------------
The first step is to construct the actual `Parser` by defining a value
of one of the following types:
* `Parser[I]`: a basic parser that does not use any settings
* `Initialize[Parser[I]]`: a parser whose definition depends on one or more settings
* `Initialize[State => Parser[I]]`: a parser that is defined using both settings and the current :doc:`state <Build-State>`
We already saw an example of the first case with `spaceDelimited`, which doesn't use any settings in its definition.
As an example of the third case, the following defines a contrived `Parser` that uses the
project's Scala and sbt version settings as well as the state. To use these settings, we
need to wrap the Parser construction in `Def.setting` and get the setting values with the
special `value` method:
::
import complete.DefaultParsers._
val parser: Initialize[State => Parser[(String,String)]] =
Def.setting {
(state: State) =>
( token("scala" <~ Space) ~ token(scalaVersion.value) ) |
( token("sbt" <~ Space) ~ token(sbtVersion.value) ) |
( token("commands" <~ Space) ~
token(state.remainingCommands.size.toString) )
}
This Parser definition will produce a value of type `(String,String)`.
The input syntax defined isn't very flexible; it is just a demonstration. It
will produce one of the following values for a successful parse
(assuming the current Scala version is |scalaRelease|, the current sbt version is
|release|, and there are 3 commands left to run):
.. parsed-literal::
("scala", "|scalaRelease|")
("sbt", "|release|")
("commands", "3")
Again, we were able to access the current Scala and sbt version for the project because
they are settings. Tasks cannot be used to define the parser.
Constructing the Task
---------------------
Next, we construct the actual task to execute from the result of the
`Parser`. For this, we define a task as usual, but we can access the
result of parsing via the special `parsed` method on `Parser`.
The following contrived example uses the previous example's output (of
type `(String,String)`) and the result of the `package` task to
print some information to the screen.
::
demo := {
val (tpe, value) = parser.parsed
println("Type: " + tpe)
println("Value: " + value)
println("Packaged: " + packageBin.value.getAbsolutePath)
}
The InputTask type
==================
It helps to look at the `InputTask` type to understand more advanced usage of input tasks.
The core input task type is:
::
class InputTask[T](val parser: State => Parser[Task[T]])
Normally, an input task is assigned to a setting and you work with `Initialize[InputTask[T]]`.
Breaking this down,
1. You can use other settings (via `Initialize`) to construct an input task.
2. You can use the current `State` to construct the parser.
3. The parser accepts user input and provides tab completion.
4. The parser produces the task to run.
So, you can use settings or `State` to construct the parser that defines an input task's command line syntax.
This was described in the previous section.
You can then use settings, `State`, or user input to construct the task to run.
This is implicit in the input task syntax.
Using other input tasks
=======================
The types involved in an input task are composable, so it is possible to reuse input tasks.
The `.parsed` and `.evaluated` methods are defined on InputTasks to make this more convenient in common situations:
* Call `.parsed` on an `InputTask[T]` or `Initialize[InputTask[T]]` to get the `Task[T]` created after parsing the command line
* Call `.evaluated` on an `InputTask[T]` or `Initialize[InputTask[T]]` to get the value of type `T` from evaluating that task
In both situations, the underlying `Parser` is sequenced with other parsers in the input task definition.
In the case of `.evaluated`, the generated task is evaluated.
The following example applies the `run` input task, a literal separator parser `--`, and `run` again.
The parsers are sequenced in order of syntactic appearance,
so that the arguments before `--` are passed to the first `run` and the ones after are passed to the second.
::
val run2 = inputKey[Unit](
"Runs the main class twice with different argument lists separated by --")
val separator: Parser[String] = "--"
run2 := {
val one = (run in Compile).evaluated
val sep = separator.parsed
val two = (run in Compile).evaluated
}
For a main class Demo that echoes its arguments, this looks like:
::
$ sbt
> run2 a b -- c d
[info] Running Demo c d
[info] Running Demo a b
c
d
a
b
Preapplying input
=================
Because `InputTasks` are built from `Parsers`, it is possible to generate a new `InputTask` by applying some input programmatically.
(It is also possible to generate a `Task`, which is covered in the next section.)
Two convenience methods are provided on `InputTask[T]` and `Initialize[InputTask[T]]` that accept the String to apply.
* `partialInput` applies the input and allows further input, such as from the command line
* `fullInput` applies the input and terminates parsing, so that further input is not accepted
In each case, the input is applied to the input task's parser.
Because input tasks handle all input after the task name, they usually require initial whitespace to be provided in the input.
Consider the example in the previous section.
We can modify it so that we:
* Explicitly specify all of the arguments to the first `run`. We use `name` and `version` to show that settings can be used to define and modify parsers.
* Define the initial arguments passed to the second `run`, but allow further input on the command line.
NOTE: the current implementation of `:=` doesn't actually support applying input derived from settings yet.
::
lazy val run2 = inputKey[Unit]("Runs the main class twice: " +
"once with the project name and version as arguments"
"and once with command line arguments preceded by hard coded values.")
// The argument string for the first run task is ' <name> <version>'
lazy val firstInput: Initialize[String] =
Def.setting(s" ${name.value} ${version.value}")
// Make the first arguments to the second run task ' red blue'
lazy val secondInput: String = " red blue"
run2 := {
val one = (run in Compile).fullInput(firstInput.value).evaluated
val two = (run in Compile).partialInput(secondInput).evaluated
}
For a main class Demo that echoes its arguments, this looks like:
::
$ sbt
> run2 green
[info] Running Demo demo 1.0
[info] Running Demo red blue green
demo
1.0
red
blue
green
Get a Task from an InputTask
============================
The previous section showed how to derive a new `InputTask` by applying input.
In this section, applying input produces a `Task`.
The `toTask` method on `Initialize[InputTask[T]]` accepts the `String` input to apply and produces a task that can be used normally.
For example, the following defines a plain task `runFixed` that can be used by other tasks or run directly without providing any input, ::
lazy val runFixed = taskKey[Unit]("A task that hard codes the values to `run`")
runFixed := {
val _ = (run in Compile).toTask(" blue green").value
println("Done!")
}
For a main class Demo that echoes its arguments, running `runFixed` looks like:
::
$ sbt
> runFixed
[info] Running Demo blue green
blue
green
Done!
Each call to `toTask` generates a new task, but each task is configured the same as the original `InputTask` (in this case, `run`) but with different input applied.
For example, ::
lazy val runFixed2 = taskKey[Unit]("A task that hard codes the values to `run`")
fork in run := true
runFixed2 := {
val x = (run in Compile).toTask(" blue green").value
val y = (run in Compile).toTask(" red orange").value
println("Done!")
}
The different `toTask` calls define different tasks that each run the project's main class in a new jvm.
That is, the `fork` setting configures both, each has the same classpath, and each run the same main class.
However, each task passes different arguments to the main class.
For a main class Demo that echoes its arguments, the output of running `runFixed2` might look like:
::
$ sbt
> runFixed2
[info] Running Demo blue green
[info] Running Demo red orange
blue
green
red
orange
Done!

View File

@ -1,271 +0,0 @@
======================
Plugins Best Practices
======================
*This page is intended primarily for sbt plugin authors.*
A plugin developer should strive for consistency and ease of use.
Specifically:
- Plugins should play well with other plugins. Avoiding namespace
clashes (in both sbt and Scala) is paramount.
- Plugins should follow consistent conventions. The experiences of an
sbt *user* should be consistent, no matter what plugins are pulled
in.
Here are some current plugin best practices. **NOTE:** Best practices
are evolving, so check back frequently.
Don't use default package
---------------------------
Users who have their build files in some package will not be able to
use your plugin if it's defined in default (no-name) package.
Avoid older `sbt.Plugin` mechanism
----------------------------------
sbt has deprecated the old `sbt.Plugin` mechanism in favor of `sbt.AutoPlugin`.
The new mechanism features a set of user-level controls and dependency declarations
that cleans up a lot of long-standing issues with plugins.
Reuse existing keys
-------------------
sbt has a number of `predefined keys <../../api/sbt/Keys%24.html>`_.
Where possible, reuse them in your plugin. For instance, don't define:
::
val sourceFiles = settingKey[Seq[File]]("Some source files")
Instead, simply reuse sbt's existing :key:`sources` key.
Avoid namespace clashes
-----------------------
Sometimes, you need a new key, because there is no existing sbt key. In
this case, use a plugin-specific prefix, both in the (string) key name
used in the sbt namespace and in the Scala `val`. There are two
acceptable ways to accomplish this goal.
Just use a `val` prefix
~~~~~~~~~~~~~~~~~~~~~~~~~
::
package sbtobfuscate
object Plugin extends sbt.Plugin {
val obfuscateStylesheet = settingKey[File]("Obfuscate stylesheet")
}
In this approach, every `val` starts with `obfuscate`. A user of the
plugin would refer to the settings like this:
::
obfuscateStylesheet := ...
Use a nested object
~~~~~~~~~~~~~~~~~~~
::
package sbtobfuscate
object Plugin extends sbt.Plugin {
object ObfuscateKeys {
val stylesheet = SettingKey[File]("obfuscateStylesheet")
}
}
In this approach, all non-common settings are in a nested object. A user
of the plugin would refer to the settings like this:
::
import ObfuscateKeys._ // place this at the top of build.sbt
stylesheet := ...
Configuration Advice
--------------------
Due to usability concerns from the shell, you could opt out of
task-scoping described in this section, if your plugin makes heavy use
of the shell. Using configuration-scoping the user could discover your
tasks using tab completion:
.. code-block:: console
coffee:[tab]
This method no longer works with per-task keys, but there's a pending
case, so hopefully it will be addressed in the future.
When to define your own configuration
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If your plugin introduces a new concept (even if that concept reuses an
existing key), you want your own configuration. For instance, suppose
you've built a plugin that produces PDF files from some kind of markup,
and your plugin defines a target directory to receive the resulting
PDFs. That target directory is scoped in its own configuration, so it is
distinct from other target directories. Thus, these two definitions use
the same *key*, but they represent distinct *values*. So, in a user's
`build.sbt`, we might see:
::
target in PDFPlugin := baseDirectory.value / "mytarget" / "pdf"
target in Compile := baseDirectory.value / "mytarget"
In the PDF plugin, this is achieved with an `inConfig` definition:
::
val settings: Seq[sbt.Project.Setting[_]] = inConfig(LWM)(Seq(
target := baseDirectory.value / "target" / "docs" # the default value
))
When *not* to define your own configuration.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you're merely adding to existing definitions, don't define your own
configuration. Instead, reuse an existing one *or* scope by the main
task (see below).
::
val akka = config("akka") // This isn't needed.
val akkaStartCluster = TaskKey[Unit]("akkaStartCluster")
target in akkaStartCluster := ... // This is ok.
akkaStartCluster in akka := ... // BAD. No need for a Config for plugin-specific task.
Configuration Cat says "Configuration is for configuration"
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
When defining a new type of configuration, e.g.
::
val Config = config("profile")
should be used to create a "cross-task" configuration. The task
definitions don't change in this case, but the default configuration
does. For example, the `profile` configuration can extend the test
configuration with additional settings and changes to allow profiling in
sbt. Plugins should not create arbitrary Configurations, but utilize
them for specific purposes and builds.
Configurations actually tie into dependency resolution (with Ivy) and
can alter generated pom files.
Configurations should *not* be used to namespace keys for a plugin. e.g.
::
val Config = config("my-plugin")
val pluginKey = settingKey[String]("A plugin specific key")
val settings = pluginKey in Config // DON'T DO THIS!
Playing nice with configurations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Whether you ship with a configuration or not, a plugin should strive to
support multiple configurations, including those created by the build
user. Some tasks that are tied to a particular configuration can be
re-used in other configurations. While you may not see the need
immediately in your plugin, some project may and will ask you for the
flexibility.
Provide raw settings and configured settings
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Split your settings by the configuration axis like so:
::
val obfuscate = TaskKey[Seq[File]]("obfuscate")
val obfuscateSettings = inConfig(Compile)(baseObfuscateSettings)
val baseObfuscateSettings: Seq[Setting[_]] = Seq(
obfuscate := ... (sources in obfuscate).value ...,
sources in obfuscate := sources.value
)
The `baseObfuscateSettings` value provides base configuration for the
plugin's tasks. This can be re-used in other configurations if projects
require it. The `obfuscateSettings` value provides the default
`Compile` scoped settings for projects to use directly. This gives the
greatest flexibility in using features provided by a plugin. Here's how
the raw settings may be reused:
::
Project.inConfig(Test)(sbtObfuscate.Plugin.baseObfuscateSettings)
Alternatively, one could provide a utility method to load settings in a
given configuration:
::
def obfuscateSettingsIn(c: Configuration): Seq[Project.Setting[_]] =
inConfig(c)(baseObfuscateSettings)
This could be used as follows:
::
seq(obfuscateSettingsIn(Test): _*)
Using a 'main' task scope for settings
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Sometimes you want to define some settings for a particular 'main' task
in your plugin. In this instance, you can scope your settings using the
task itself.
::
val obfuscate = TaskKey[Seq[File]]("obfuscate")
val obfuscateSettings = inConfig(Compile)(baseObfuscateSettings)
val baseObfuscateSettings: Seq[Setting[_]] = Seq(
obfuscate := ... (sources in obfuscate).value ...,
sources in obfuscate := sources.value
)
In the above example, `sources in obfuscate` is scoped under the main
task, `obfuscate`.
Mucking with Global build state
-------------------------------
There may be times when you need to muck with global build state. The
general rule is *be careful what you touch*.
First, make sure your user does not include global build configuration in
*every* project but rather in the build itself. e.g.
::
object MyBuild extends Build {
override lazy val settings = super.settings ++ MyPlugin.globalSettings
val main = project(file("."), "root") settings(MyPlugin.globalSettings:_*) // BAD!
}
Global settings should *not* be placed into a `build.sbt` file.
When overriding global settings, care should be taken to ensure previous
settings from other plugins are not ignored. e.g. when creating a new
:key:`onLoad` handler, ensure that the previous :key:`onLoad` handler is not
removed.
::
object MyPlugin extends Plugin {
val globalSettigns: Seq[Setting[_]] = Seq(
onLoad in Global := (onLoad in Global).value andThen { state =>
... return new state ...
}
)
}

View File

@ -1,431 +0,0 @@
=======
Plugins
=======
Introduction
============
A plugin is essentially a way to use external code in a build
definition. A plugin can be a library used to implement a task. For
example, you might use
`Knockoff <https://github.com/tristanjuricek/knockoff/>`_ to write a
markdown processing task. A plugin can define a sequence of sbt Settings
that are automatically added to all projects or that are explicitly
declared for selected projects. For example, a plugin might add a
`proguard` task and associated (overridable) settings.
Also, :doc:`Commands` can be added with the :key:`commands` setting
The :doc:`Plugins-Best-Practices` page describes the
currently evolving guidelines to writing sbt plugins. See also the
general :doc:`/Detailed-Topics/Best-Practices`.
Using a binary sbt plugin
=========================
A common situation is using a binary plugin published to a repository.
Create `project/plugins.sbt` with the desired sbt plugins, any general
dependencies, and any necessary repositories:
::
addSbtPlugin("org.example" % "plugin" % "1.0")
addSbtPlugin("org.example" % "another-plugin" % "2.0")
// plain library (not an sbt plugin) for use in the build definition
libraryDependencies += "org.example" % "utilities" % "1.3"
resolvers += "Example Plugin Repository" at "http://example.org/repo/"
See the rest of the page for more information on creating and using
plugins.
By Description
==============
A plugin definition is a project in `<main-project>/project/`. This
project's classpath is the classpath used for build definitions in
`<main-project>/project/` and any `.sbt` files in the project's base
directory. It is also used for the `eval` and `set` commands.
Specifically,
1. Managed dependencies declared by the `project/` project are
retrieved and are available on the build definition classpath, just
like for a normal project.
2. Unmanaged dependencies in `project/lib/` are available to the build
definition, just like for a normal project.
3. Sources in the `project/` project are the build definition files
and are compiled using the classpath built from the managed and
unmanaged dependencies.
4. Project dependencies can be declared in `project/plugins.sbt` or
`project/project/Build.scala` and will be available to the build
definition sources. Think of `project/project/` as the build
definition for the build definition.
The build definition classpath is searched for `sbt/sbt.plugins`
descriptor files containing the names of Plugin implementations. A
Plugin is a module that defines settings to automatically inject to
projects. Additionally, all Plugin modules are wildcard imported for the
`eval` and `set` commands and `.sbt` files. A Plugin
implementation is not required to produce a plugin, however. It is a
convenience for plugin consumers and because of the automatic nature, it
is not always appropriate.
The `reload plugins` command changes the current build to
`<current-build>/project/`. This allows manipulating the build
definition project like a normal project. `reload return` changes back
to the original build. Any session settings for the plugin definition
project that have not been saved are dropped.
*Note*: At runtime, all plugins for all builds are loaded in a separate, parent class loader of the class loaders for builds.
This means that plugins will not see classes or resources from build definitions.
Global plugins
--------------
The :sublit:`|globalPluginsBase|` directory is treated as a global plugin
definition project. It is a normal sbt project whose classpath is
available to all sbt project definitions for that user as described
above for per-project plugins.
By Example
==========
Using a library in a build definition
-------------------------------------
As an example, we'll add the Grizzled Scala library as a plugin.
Although this does not provide sbt-specific functionality, it
demonstrates how to declare plugins.
1a) Manually managed
~~~~~~~~~~~~~~~~~~~~
1. Download the jar manually from
https://oss.sonatype.org/content/repositories/releases/org/clapper/grizzled-scala\_2.8.1/1.0.4/grizzled-scala\_2.8.1-1.0.4.jar
2. Put it in `project/lib/`
1b) Automatically managed: direct editing approach
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Edit `project/plugins.sbt` to contain:
::
libraryDependencies += "org.clapper" %% "grizzled-scala" % "1.0.4"
If sbt is running, do `reload`.
1c) Automatically managed: command line approach
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
We can change to the plugins project in `project/` using
`reload plugins`.
.. code-block:: console
$ sbt
> reload plugins
[info] Set current project to default (in build file:/Users/harrah/demo2/project/)
>
Then, we can add dependencies like usual and save them to
`project/plugins.sbt`. It is useful, but not required, to run
:key:`update` to verify that the dependencies are correct.
.. code-block:: console
> set libraryDependencies += "org.clapper" %% "grizzled-scala" % "1.0.4"
...
> update
...
> session save
...
To switch back to the main project:
.. code-block:: console
> reload return
[info] Set current project to root (in build file:/Users/harrah/demo2/)
1d) Project dependency
~~~~~~~~~~~~~~~~~~~~~~
This variant shows how to use sbt's external project support to declare a source dependency on a plugin.
This means that the plugin will be built from source and used on the classpath.
Edit `project/plugins.sbt`
::
lazy val root = project.in( file(".") ).dependsOn( assemblyPlugin )
lazy val assemblyPlugin = uri("git://github.com/sbt/sbt-assembly")
If sbt is running, run `reload`.
Note that this approach can be useful used when developing a plugin.
A project that uses the plugin will rebuild the plugin on `reload`.
This saves the intermediate steps of :key:`publishLocal` and :key:`update`.
It can also be used to work with the development version of a plugin from its repository.
It is recommended to explicitly specify the commit or tag by appending it to the repository as a fragment:
::
lazy val assemblyPlugin = uri("git://github.com/sbt/sbt-assembly#0.9.1")
One caveat to using this method is that the local sbt will try to run the remote plugin's build. It
is quite possible that the plugin's own build uses a different sbt version, as many plugins cross-publish for
several sbt versions. As such, it is recommended to stick with binary artifacts when possible.
2) Use the library
~~~~~~~~~~~~~~~~~~
Grizzled Scala is ready to be used in build definitions. This includes
the `eval` and `set` commands and `.sbt` and `project/*.scala`
files.
.. code-block:: console
> eval grizzled.sys.os
In a `build.sbt` file:
::
import grizzled.sys._
import OperatingSystem._
libraryDependencies ++=
if(os ==Windows)
("org.example" % "windows-only" % "1.0") :: Nil
else
Nil
Creating a plugin
=================
Introduction
------------
A minimal plugin is a Scala library that is built against the version of
Scala that sbt runs (currently, |scalaRelease|) or a Java library. Nothing
special needs to be done for this type of library, as shown in the
previous section. A more typical plugin will provide sbt tasks,
commands, or settings. This kind of plugin may provide these settings
automatically or make them available for the user to explicitly
integrate.
Description
-----------
To make a plugin, create a project and configure `sbtPlugin` to
`true`. Then, write the plugin code and publish your project to a
repository. The plugin can be used as described in the previous section.
* Automatically importing selective names to `.sbt` files.
* Specifying plugin dependencies.
* Automatically activating itself when all dependencies are present.
* Specifying `projectSettings`, `buildSettings`, and `globalSettings` as appropriate.
When an AutoPlugin provides a stable field such as `val` or `object` named `autoImport`,
the contents of the field are wildcard imported in in `set`, `eval`, and `.sbt` files. Typically,
this is used to provide new keys (SettingKey, TaskKey, or InputKey) or
core methods without requiring an import or qualification.
The AutoPlugin's `projectSettings` is automatically appended to each project's settings, when its dependencies also exist on that project.
The `requires` method defines the dependencies to other plugins.
The `trigger` method defines the conditions by which this plugin's settings are automatically activated.
The `buildSettings` is appended to each build's settings (that is, `in ThisBuild`).
The `globalSettings` is appended once to the global settings (`in Global`).
These allow a plugin to automatically provide new functionality or new defaults.
One main use of this feature is to globally add commands, such as for IDE plugins.
Use `globalSettings` to define the default value of a setting.
Example Plugin
--------------
An example of a typical plugin:
`build.sbt`:
::
sbtPlugin := true
name := "sbt-obfuscate"
organization := "org.example"
`Plugin.scala`:
::
package sbtobfuscate
import sbt._
object Plugin extends AutoPlugin
{
// by definging autoImport, these are automatically imported into user's `*.sbt`
object autoImport
{
// configuration points, like the built in `version`, `libraryDependencies`, or `compile`
val obfuscate = taskKey[Seq[File]]("Obfuscates files.")
val obfuscateLiterals = settingKey[Boolean]("Obfuscate literals.")
// default values for the tasks and settings
lazy val baseObfuscateSettings: Seq[sbt.Def.Setting[_]] = Seq(
obfuscate := {
Obfuscate(sources.value, (obfuscateLiterals in obfuscate).value)
},
obfuscateLiterals in obfuscate := false
)
}
import autoImport._
override def requires = sbt.plugins.JvmModule
// This plugin is automatically enabled for projects which are JvmModules.
override def trigger = allRequirements
// a group of settings that are automatically added to projects.
override val projectSettings =
inConfig(Compile)(baseObfucscateSettings) ++
inConfig(Test)(baseObfuscateSettings)
}
object Obfuscate
{
def apply(sources: Seq[File]): Seq[File] := sources
}
Usage example
-------------
A build definition that uses the plugin might look like:
`obfuscate.sbt`
::
obfuscateLiterals in obfuscate := true
Root Plugins
------------
Some plugins should always be explicitly enabled on projects. Sbt calls these root plugins, i.e. plugins
that are "root" nodes in the plugin depdendency graph. `AutoPlugin` by default defines a root plugin.
Example command root plugin
----------------------
A basic plugin that adds commands looks like:
`build.sbt`
::
sbtPlugin := true
name := "sbt-sample"
organization := "org.example"
`Plugin.scala`
::
package sbtsample
import sbt._
import Keys._
object Plugin extends AutoPlugin
{
override lazy val projectSettings = Seq(commands += myCommand)
lazy val myCommand =
Command.command("hello") { (state: State) =>
println("Hi!")
state
}
}
This example demonstrates how to take a Command (here, `myCommand`)
and distribute it in a plugin. Note that multiple commands can be
included in one plugin (for example, use `commands ++= Seq(a,b)`). See
:doc:`Commands` for defining more useful commands, including ones that
accept arguments and affect the execution state.
For a user to consume this plugin, it requires an explicit include via the `Project` instance.
Here's what their local sbt will look like.
`build.sbt`
::
val root = Project("example-plugin-usage", file(".")).setPlugins(MyPlugin)
The `setPlugins` method allows projects to explicitly define the `RootPlugin`s they wish to consume.
`AutoPlugin`s are automatically added to the project as appropriate.
Projects can also exclude any type of plugin using the `disablePlugins` method. For example, if
we wish to remove the JvmModule settings (`compile`,`test`,`run`), we modify our `build.sbt` as
follows:
::
val root = Project("example-plugin-usage", file(".")).setPlugins(MyPlugin).disablePlugins(plugins.JvmModule)
Global plugins example
----------------------
The simplest global plugin definition is declaring a library or plugin
in :sublit:`|globalPluginsBase|\ build.sbt`:
::
libraryDependencies += "org.example" %% "example-plugin" % "0.1"
This plugin will be available for every sbt project for the current
user.
In addition:
1. Jars may be placed directly in :sublit:`|globalPluginsBase|\ lib/` and will be
available to every build definition for the current user.
2. Dependencies on plugins built from source may be declared in
:sublit:`|globalPluginsBase|\ project/Build.scala` as described at
:doc:`/Getting-Started/Full-Def`.
3. A Plugin may be directly defined in Scala source files in
:sublit:`|globalPluginsBase|`, such as :sublit:`|globalPluginsBase|\ MyPlugin.scala`.
:sublit:`|globalPluginsBase|\ /build.sbt` should contain `sbtPlugin := true`.
This can be used for quicker turnaround when developing a plugin
initially:
1. Edit the global plugin code
2. `reload` the project you want to use the modified plugin in
3. sbt will rebuild the plugin and use it for the project.
Additionally, the plugin will be available in other projects on
the machine without recompiling again. This approach skips the
overhead of :key:`publishLocal` and cleaning the plugins directory
of the project using the plugin.
These are all consequences of :sublit:`|globalPluginsBase|` being a standard
project whose classpath is added to every sbt project's build
definition.
Best Practices
==============
If you're a plugin writer, please consult the :doc:`Plugins-Best-Practices`
page; it contains a set of guidelines to help you ensure that your
plugin is consistent with and plays well with other plugins.
.. |globalBase| replace:: ~/.sbt/|version|/
.. |globalPluginsBase| replace:: |globalBase|\ plugins/

View File

@ -1,232 +0,0 @@
=============
Settings Core
=============
This page describes the core settings engine a bit. This may be useful
for using it outside of sbt. It may also be useful for understanding how
sbt works internally.
The documentation is comprised of two parts. The first part shows an
example settings system built on top of the settings engine. The second
part comments on how sbt's settings system is built on top of the
settings engine. This may help illuminate what exactly the core settings
engine provides and what is needed to build something like the sbt
settings system.
Example
-------
Setting up
~~~~~~~~~~
To run this example, first create a new project with the following
build.sbt file:
::
libraryDependencies += "org.scala-sbt" %% "collections" % sbtVersion.value
resolvers += sbtResolver.value
Then, put the following examples in source files
`SettingsExample.scala` and `SettingsUsage.scala`. Finally, run sbt
and enter the REPL using :key:`console`. To see the output described below,
enter `SettingsUsage`.
Example Settings System
~~~~~~~~~~~~~~~~~~~~~~~
The first part of the example defines the custom settings system. There
are three main parts:
1. Define the Scope type.
2. Define a function that converts that Scope (plus an AttributeKey) to
a String.
3. Define a delegation function that defines the sequence of Scopes in
which to look up a value.
There is also a fourth, but its usage is likely to be specific to sbt at
this time. The example uses a trivial implementation for this part.
`SettingsExample.scala`
::
import sbt._
/** Define our settings system */
// A basic scope indexed by an integer.
final case class Scope(index: Int)
// Extend the Init trait.
// (It is done this way because the Scope type parameter is used everywhere in Init.
// Lots of type constructors would become binary, which as you may know requires lots of type lambdas
// when you want a type function with only one parameter.
// That would be a general pain.)
object SettingsExample extends Init[Scope]
{
// Provides a way of showing a Scope+AttributeKey[_]
val showFullKey: Show[ScopedKey[_]] = new Show[ScopedKey[_]] {
def apply(key: ScopedKey[_]) = key.scope.index + "/" + key.key.label
}
// A sample delegation function that delegates to a Scope with a lower index.
val delegates: Scope => Seq[Scope] = { case s @ Scope(index) =>
s +: (if(index <= 0) Nil else delegates(Scope(index-1)) )
}
// Not using this feature in this example.
val scopeLocal: ScopeLocal = _ => Nil
// These three functions + a scope (here, Scope) are sufficient for defining our settings system.
}
Example Usage
~~~~~~~~~~~~~
This part shows how to use the system we just defined. The end result is
a `Settings[Scope]` value. This type is basically a mapping
`Scope -> AttributeKey[T] -> Option[T]`. See the `Settings API
documentation <../../api/sbt/Settings.html>`_
for details.
`SettingsUsage.scala`
::
/** Usage Example **/
import sbt._
import SettingsExample._
import Types._
object SettingsUsage
{
// Define some keys
val a = AttributeKey[Int]("a")
val b = AttributeKey[Int]("b")
// Scope these keys
val a3 = ScopedKey(Scope(3), a)
val a4 = ScopedKey(Scope(4), a)
val a5 = ScopedKey(Scope(5), a)
val b4 = ScopedKey(Scope(4), b)
// Define some settings
val mySettings: Seq[Setting[_]] = Seq(
setting( a3, value( 3 ) ),
setting( b4, map(a4)(_ * 3)),
update(a5)(_ + 1)
)
// "compiles" and applies the settings.
// This can be split into multiple steps to access intermediate results if desired.
// The 'inspect' command operates on the output of 'compile', for example.
val applied: Settings[Scope] = make(mySettings)(delegates, scopeLocal, showFullKey)
// Show results.
for(i <- 0 to 5; k <- Seq(a, b)) {
println( k.label + i + " = " + applied.get( Scope(i), k) )
}
This produces the following output when run: ::
a0 = None
b0 = None
a1 = None
b1 = None
a2 = None
b2 = None
a3 = Some(3)
b3 = None
a4 = Some(3)
b4 = Some(9)
a5 = Some(4)
b5 = Some(9)
- For the None results, we never defined the value and there was no
value to delegate to.
- For a3, we explicitly defined it to be 3.
- a4 wasn't defined, so it delegates to a3 according to our delegates
function.
- b4 gets the value for a4 (which delegates to a3, so it is 3) and
multiplies by 3
- a5 is defined as the previous value of a5 + 1 and since no previous
value of a5 was defined, it delegates to a4, resulting in 3+1=4.
- b5 isn't defined explicitly, so it delegates to b4 and is therefore
equal to 9 as well
sbt Settings Discussion
-----------------------
Scopes
~~~~~~
.. _Global: ../../api/sbt/Global$.html
.. _This: ../../api/sbt/This$.html
.. _Select: ../../api/sbt/Select.html
sbt defines a more complicated scope than the one shown here for the
standard usage of settings in a build. This scope has four components:
the project axis, the configuration axis, the task axis, and the extra
axis. Each component may be
`Global`_ (no specific value), `This`_ (current context), or `Select`_
(containing a specific value). sbt resolves `This_` to either
`Global`_ or `Select`_ depending on the context.
For example, in a project, a `This`_ project axis becomes a
`Select`_ referring to the defining project. All other axes that are
`This`_ are translated to `Global`_.
Functions like inConfig and inTask transform This into a
`Select`_ for a specific value. For example, `inConfig(Compile)(someSettings)`
translates the configuration axis for all settings in *someSettings* to
be `Select(Compile)` if the axis value is `This`_.
So, from the example and from sbt's scopes, you can see that the core
settings engine does not impose much on the structure of a scope. All it
requires is a delegates function `Scope => Seq[Scope]` and a
`display` function. You can choose a scope type that makes sense for
your situation.
Constructing settings
~~~~~~~~~~~~~~~~~~~~~
The *app*, *value*, *update*, and related methods are the core methods
for constructing settings. This example obviously looks rather different
from sbt's interface because these methods are not typically used
directly, but are wrapped in a higher-level abstraction.
With the core settings engine, you work with HLists to access other
settings. In sbt's higher-level system, there are wrappers around HList
for TupleN and FunctionN for N = 1-9 (except Tuple1 isn't actually
used). When working with arbitrary arity, it is useful to make these
wrappers at the highest level possible. This is because once wrappers
are defined, code must be duplicated for every N. By making the wrappers
at the top-level, this requires only one level of duplication.
Additionally, sbt uniformly integrates its task engine into the settings
system. The underlying settings engine has no notion of tasks. This is
why sbt uses a `SettingKey` type and a `TaskKey` type. Methods on an
underlying `TaskKey[T]` are basically translated to operating on an
underlying `SettingKey[Task[T]]` (and they both wrap an underlying
`AttributeKey`).
For example, `a := 3` for a SettingKey *a* will very roughly translate
to `setting(a, value(3))`. For a TaskKey *a*, it will roughly
translate to `setting(a, value( task { 3 } ) )`. See
`main/Structure.scala <../../sxr/sbt/Structure.scala>`_
for details.
Settings definitions
~~~~~~~~~~~~~~~~~~~~
sbt also provides a way to define these settings in a file (build.sbt
and Build.scala). This is done for build.sbt using basic parsing and
then passing the resulting chunks of code to `compile/Eval.scala`. For
all definitions, sbt manages the classpaths and recompilation process to
obtain the settings. It also provides a way for users to define project,
task, and configuration delegation, which ends up being used by the
delegates function.

View File

@ -1,5 +0,0 @@
=============
Extending sbt
=============
Pages previously listed here are now included under :doc:`Detailed Topics </Detailed-Topics/index>`.

View File

@ -1,294 +0,0 @@
=========================
`.sbt` Build Definition
=========================
This page describes sbt build definitions, including some "theory" and
the syntax of `build.sbt`. It assumes you know how to :doc:`use sbt <Running>` and have read the previous pages in the
:doc:`Getting Started Guide <index.html>`.
`.sbt` vs. `.scala` Build Definition
----------------------------------
An sbt build definition can contain files ending in `.sbt`, located in
the base directory of a project, and files ending in `.scala`, located in the
`project/` subdirectory of the base directory.
This page discusses `.sbt` files, which are suitable for most cases.
The `.scala` files are typically used for sharing code across `.sbt` files and for more complex build definitions.
See :doc:`.scala build definition <Full-Def>` (later in Getting Started) for more on `.scala` files.
What is a Build Definition?
---------------------------
After examining a project and processing build definition files, sbt ends up
with an immutable map (set of key-value pairs) describing the build.
For example, one key is :key:`name` and it maps to a string value, the name of your project.
*Build definition files do not affect sbt's map directly.*
Instead, the build definition creates a huge list of objects with type
`Setting[T]` where `T` is the type of the value in the map. A `Setting` describes
a *transformation to the map*, such as adding a new key-value pair or
appending to an existing value. (In the spirit of functional
programming with immutable data structures and values, a transformation returns a new map
- it does not update the old map in-place.)
In `build.sbt`, you might create a `Setting[String]` for the name of
your project like this:
::
name := "hello"
This `Setting[String]` transforms the map by adding (or replacing) the
`name` key, giving it the value `"hello"`. The transformed map
becomes sbt's new map.
To create the map, sbt first sorts the list of settings so that all
changes to the same key are made together, and values that depend on
other keys are processed after the keys they depend on. Then sbt walks
over the sorted list of `Setting`\ s and applies each one to the map in
turn.
Summary: A build definition defines a list of `Setting[T]`, where a
`Setting[T]` is a transformation affecting sbt's map of key-value
pairs and `T` is the type of each value.
How `build.sbt` defines settings
----------------------------------
`build.sbt` defines a `Seq[Setting[_]]`; it's a list of Scala
expressions, separated by blank lines, where each one becomes one
element in the sequence. If you put `Seq(` in front of the `.sbt`
file and `)` at the end and replace the blank lines with commas, you'd
be looking at the equivalent `.scala` code.
Here's an example:
::
name := "hello"
version := "1.0"
scalaVersion := "2.10.4"
Each `Setting` is defined with a Scala expression.
The expressions in `build.sbt` are independent of one another, and
they are expressions, rather than complete Scala statements. These
expressions may be interspersed with `val`\ s, `lazy val`\ s, and `def`\ s.
Top-level `object`\ s and `class`\ es are not allowed in `build.sbt`.
Those should go in the `project/` directory as full Scala source files.
On the left, :key:`name`, :key:`version`, and :key:`scalaVersion` are *keys*. A
key is an instance of `SettingKey[T]`, `TaskKey[T]`, or
`InputKey[T]` where `T` is the expected value type. The kinds of key
are explained below.
Keys have a method called `:=`, which returns a `Setting[T]`. You
could use a Java-like syntax to call the method:
::
name.:=("hello")
But Scala allows `name := "hello"` instead (in Scala, a single-parameter method can
use either syntax).
The `:=` method on key :key:`name` returns a `Setting`, specifically a
`Setting[String]`. `String` also appears in the type of :key:`name`
itself, which is `SettingKey[String]`. In this case, the returned
`Setting[String]` is a transformation to add or replace the :key:`name`
key in sbt's map, giving it the value `"hello"`.
If you use the wrong value type, the build definition will not compile:
::
name := 42 // will not compile
Settings must be separated by blank lines
-------------------------------------
You can't write a `build.sbt` like this:
::
// will NOT compile, no blank lines
name := "hello"
version := "1.0"
scalaVersion := "2.10.3"
sbt needs some kind of delimiter to tell where one expression stops and
the next begins.
`.sbt` files contain a list of Scala expressions, not a single Scala
program. These expressions have to be split up and passed to the
compiler individually.
Keys
----
Types
~~~~~
There are three flavors of key:
- `SettingKey[T]`: a key for a value computed once (the value is
computed when loading the project, and kept around).
- `TaskKey[T]`: a key for a value, called a *task*,
that has to be recomputed each time, potentially with side effects.
- `InputKey[T]`: a key for a task that has command line arguments as
input. Check out :doc:`/Extending/Input-Tasks` for more details.
Built-in Keys
~~~~~~~~~~~~~
The built-in keys are just fields in an object called
`Keys <../../sxr/sbt/Keys.scala.html>`_. A
`build.sbt` implicitly has an `import sbt.Keys._`, so
`sbt.Keys.name` can be referred to as :key:`name`.
Custom Keys
~~~~~~~~~~~
Custom keys may be defined with their respective creation methods: `settingKey`, `taskKey`, and `inputKey`.
Each method expects the type of the value associated with the key as well as a description.
The name of the key is taken from the `val` the key is assigned to.
For example, to define a key for a new task called `hello`, ::
lazy val hello = taskKey[Unit]("An example task")
Here we have used the fact that an `.sbt` file can contain `val`\ s and `def`\ s in addition to settings.
All such definitions are evaluated before settings regardless of where they are defined in the file.
`val`\ s and `def`\ s must be separated from settings by blank lines.
.. note::
Typically, `lazy val`\ s are used instead of `val`\ s to avoid initialization order problems.
Task v. Setting keys
~~~~~~~~~~~~~~~~~~~~
A `TaskKey[T]` is said to define a *task*. Tasks are operations such
as :key:`compile` or :key:`package`. They may return `Unit` (`Unit` is
Scala for `void`), or they may return a value related to the task, for
example :key:`package` is a `TaskKey[File]` and its value is the jar file
it creates.
Each time you start a task execution, for example by typing :key:`compile`
at the interactive sbt prompt, sbt will re-run any tasks involved
exactly once.
sbt's map describing the project can keep around a fixed string value
for a setting such as :key:`name`, but it has to keep around some
executable code for a task such as :key:`compile` -- even if that
executable code eventually returns a string, it has to be re-run every
time.
*A given key always refers to either a task or a plain setting.* That
is, "taskiness" (whether to re-run each time) is a property of the key,
not the value.
Defining tasks and settings
---------------------------
Using `:=`, you can assign a value to a setting and a computation to a task.
For a setting, the value will be computed once at project load time.
For a task, the computation will be re-run each time the task is executed.
For example, to implement the `hello` task from the previous section, ::
hello := { println("Hello!") }
We already saw an example of defining settings when we defined the project's name, ::
name := "hello"
Types for tasks and settings
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
From a type-system perspective, the `Setting` created from a task key
is slightly different from the one created from a setting key.
`taskKey := 42` results in a `Setting[Task[T]]` while
`settingKey := 42` results in a `Setting[T]`. For most purposes this
makes no difference; the task key still creates a value of type `T`
when the task executes.
The `T` vs. `Task[T]` type difference has this implication: a
setting can't depend on a task, because a setting is
evaluated only once on project load and is not re-run.
More on this in :doc:`more about settings <More-About-Settings>`, coming up
soon.
Keys in sbt interactive mode
----------------------------
In sbt's interactive mode, you can type the name of any task to execute
that task. This is why typing :key:`compile` runs the compile task.
:key:`compile` is a task key.
If you type the name of a setting key rather than a task key, the value
of the setting key will be displayed. Typing a task key name executes
the task but doesn't display the resulting value; to see a task's
result, use `show <task name>` rather than plain `<task name>`.
The convention for keys names is to use `camelCase` so that the
command line name and the Scala identifiers are the same.
To learn more about any key, type `inspect <keyname>` at the sbt
interactive prompt. Some of the information `inspect` displays won't
make sense yet, but at the top it shows you the setting's value type and
a brief description of the setting.
Imports in `build.sbt`
------------------------
You can place import statements at the top of `build.sbt`; they need
not be separated by blank lines.
There are some implied default imports, as follows:
::
import sbt._
import Process._
import Keys._
(In addition, if you have :doc:`.scala files <Full-Def>`,
the contents of any `Build` or `Plugin` objects in those files will
be imported. More on that when we get to :doc:`.scala build definitions <Full-Def>`.)
Adding library dependencies
---------------------------
To depend on third-party libraries, there are two options. The first is
to drop jars in `lib/` (unmanaged dependencies) and the other is to
add managed dependencies, which will look like this in `build.sbt`:
::
libraryDependencies += "org.apache.derby" % "derby" % "10.4.1.3"
This is how you add a managed dependency on the Apache Derby library,
version 10.4.1.3.
The :key:`libraryDependencies` key involves two complexities: `+=` rather
than `:=`, and the `%` method. `+=` appends to the key's old value
rather than replacing it, this is explained in
:doc:`more about settings </Getting-Started/More-About-Settings>`.
The `%` method is used to construct an Ivy module ID from strings,
explained in :doc:`library dependencies </Getting-Started/Library-Dependencies>`.
We'll skip over the details of library dependencies until later in the
Getting Started Guide. There's a :doc:`whole page </Getting-Started/Library-Dependencies>`
covering it later on.
Next
----
Move on to :doc:`learn about scopes </Getting-Started/Scopes>`.

View File

@ -1,94 +0,0 @@
=========================
Custom Settings and Tasks
=========================
This page gets you started creating your own settings and tasks.
To understand this page, be sure you've read earlier pages in the
Getting Started Guide, especially :doc:`build.sbt <Basic-Def>` and :doc:`more about settings <More-About-Settings>`.
Defining a key
--------------
`Keys <../../sxr/sbt/Keys.scala.html>`_ is
packed with examples illustrating how to define keys. Most of the keys
are implemented in
`Defaults <../../sxr/sbt/Defaults.scala.html>`_.
Keys have one of three types. `SettingKey` and `TaskKey` are
described in :doc:`.sbt build definition <Basic-Def>`. Read
about `InputKey` on the :doc:`/Extending/Input-Tasks` page.
Some examples from `Keys <../../sxr/sbt/Keys.scala.html>`_:
::
val scalaVersion = settingKey[String]("The version of Scala used for building.")
val clean = taskKey[Unit]("Deletes files produced by the build, such as generated sources, compiled classes, and task caches.")
The key constructors have two string parameters: the name of the key
(`"scalaVersion"`) and a documentation string
(`"The version of scala used for building."`).
Remember from :doc:`.sbt build definition <Basic-Def>` that
the type parameter `T` in `SettingKey[T]` indicates the type of
value a setting has. `T` in `TaskKey[T]` indicates the type of the
task's result. Also remember from :doc:`.sbt build definition <Basic-Def>`
that a setting has a fixed value until project
reload, while a task is re-computed for every "task execution" (every
time someone types a command at the sbt interactive prompt or in batch
mode).
Keys may be defined in a :doc:`.sbt file <Basic-Def>`, :doc:`.scala file <Full-Def>`, or in a :doc:`plugin <Using-Plugins>`.
Any `val` found in a `Build` object in your `.scala` build definition files or any
`val` found in a `Plugin` object from a plugin will be imported automatically into your `.sbt` files.
Implementing a task
-------------------
Once you've defined a key for your task, you'll need to complete it
with a task definition. You could be defining your own task, or you
could be planning to redefine an existing task. Either way looks the
same; use `:=` to associate some code with the task key: ::
val sampleStringTask = taskKey[String]("A sample string task.")
val sampleIntTask = taskKey[Int]("A sample int task.")
sampleStringTask := System.getProperty("user.home")
sampleIntTask := {
val sum = 1 + 2
println("sum: " + sum)
sum
}
If the task has dependencies, you'd reference their value using
`value`, as discussed in :doc:`more about settings <More-About-Settings>`.
The hardest part about implementing tasks is often not sbt-specific;
tasks are just Scala code. The hard part could be writing the "meat" of
your task that does whatever you're trying to do. For example, maybe
you're trying to format HTML in which case you might want to use an HTML
library (you would :doc:`add a library dependency to your build definition <Using-Plugins>`
and write code based on the HTML library, perhaps).
sbt has some utility libraries and convenience functions, in particular
you can often use the convenient APIs in
`IO <../../api/index.html#sbt.IO$>`_ to manipulate files and directories.
Use plugins!
------------
If you find you have a lot of custom code, consider
moving it to a plugin for re-use across multiple builds.
It's very easy to create a plugin, as :doc:`teased earlier <Using-Plugins>` and :doc:`discussed at more length here </Extending/Plugins>`.
Next
----
This page has been a quick taste; there's much much more about custom
tasks on the :doc:`/Detailed-Topics/Tasks` page.
Move on to :doc:`Full-Def`.

View File

@ -1,93 +0,0 @@
===================
Directory structure
===================
This page assumes you've :doc:`installed sbt <Setup>` and
seen the :doc:`Hello, World <Hello>` example.
Base directory
--------------
In sbt's terminology, the "base directory" is the directory containing
the project. So if you created a project `hello` containing
`hello/build.sbt` and `hello/hw.scala` as in the :doc:`Hello, World <Hello>`
example, `hello` is your base directory.
Source code
-----------
Source code can be placed in the project's base directory as with
`hello/hw.scala`. However, most people don't do this for real
projects; too much clutter.
sbt uses the same directory structure as
`Maven <http://maven.apache.org/>`_ for source files by default (all
paths are relative to the base directory):
.. code-block:: text
src/
main/
resources/
<files to include in main jar here>
scala/
<main Scala sources>
java/
<main Java sources>
test/
resources
<files to include in test jar here>
scala/
<test Scala sources>
java/
<test Java sources>
Other directories in `src/` will be ignored. Additionally, all hidden
directories will be ignored.
sbt build definition files
--------------------------
You've already seen `build.sbt` in the project's base directory. Other
sbt files appear in a `project` subdirectory.
`project` can contain `.scala` files, which are combined with
`.sbt` files to form the complete build definition.
See :doc:`.scala build definitions <Full-Def>` for more.
.. code-block:: text
build.sbt
project/
Build.scala
You may see `.sbt` files inside `project/` but they are not
equivalent to `.sbt` files in the project's base directory. Explaining
this will :doc:`come later <Full-Def>`, since you'll need
some background information first.
Build products
--------------
Generated files (compiled classes, packaged jars, managed files, caches,
and documentation) will be written to the `target` directory by
default.
Configuring version control
---------------------------
Your `.gitignore` (or equivalent for other version control systems)
should contain:
.. code-block:: text
target/
Note that this deliberately has a trailing `/` (to match only
directories) and it deliberately has no leading `/` (to match
`project/target/` in addition to plain `target/`).
Next
====
Learn about :doc:`running sbt <Running>`.

View File

@ -1,259 +0,0 @@
===========================
`.scala` Build Definition
===========================
This page assumes you've read previous pages in the Getting Started
Guide, *especially* :doc:`.sbt build definition <Basic-Def>`
and :doc:`more about settings <More-About-Settings>`.
sbt is recursive
----------------
`build.sbt` is so simple, it conceals how sbt really works. sbt builds
are defined with Scala code. That code, itself, has to be built. What
better way than with sbt?
The `project` directory *is another project inside your project* which
knows how to build your project. The project inside `project` can (in
theory) do anything any other project can do. *Your build definition is
an sbt project.*
And the turtles go all the way down. If you like, you can tweak the
build definition of the build definition project, by creating a
`project/project/` directory.
Here's an illustration.
.. code-block:: text
hello/ # your project's base directory
Hello.scala # a source file in your project (could be in
# src/main/scala too)
build.sbt # build.sbt is part of the source code for the
# build definition project inside project/
project/ # base directory of the build definition project
Build.scala # a source file in the project/ project,
# that is, a source file in the build definition
build.sbt # this is part of a build definition for a project
# in project/project ; build definition's build
# definition
project/ # base directory of the build definition project
# for the build definition
Build.scala # source file in the project/project/ project
*Don't worry!* Most of the time you are not going to need all that. But
understanding the principle can be helpful.
By the way: any time files ending in `.scala` or `.sbt` are used,
naming them `build.sbt` and `Build.scala` are conventions only. This
also means that multiple files are allowed.
`.scala` source files in the build definition project
-------------------------------------------------------
`.sbt` files are merged into their sibling `project` directory.
Looking back at the project layout:
.. code-block:: text
hello/ # your project's base directory
build.sbt # build.sbt is part of the source code for the
# build definition project inside project/
project/ # base directory of the build definition project
Build.scala # a source file in the project/ project,
# that is, a source file in the build definition
The Scala expressions in `build.sbt` are compiled alongside and merged
with `Build.scala` (or any other `.scala` files in the `project/`
directory).
*`.sbt` files in the base directory for a project become part of the
`project` build definition project also located in that base
directory.*
The `.sbt` file format is a convenient shorthand for adding settings
to the build definition project.
Relating `build.sbt` to `Build.scala`
-----------------------------------------
To mix `.sbt` and `.scala` files in your build definition, you need
to understand how they relate.
The following two files illustrate. First, if your project is in
`hello`, create `hello/project/Build.scala` as follows:
::
import sbt._
import Keys._
object HelloBuild extends Build {
val sampleKeyA = settingKey[String]("demo key A")
val sampleKeyB = settingKey[String]("demo key B")
val sampleKeyC = settingKey[String]("demo key C")
val sampleKeyD = settingKey[String]("demo key D")
override lazy val settings = super.settings ++
Seq(sampleKeyA := "A: in Build.settings in Build.scala", resolvers := Seq())
lazy val root = Project(id = "hello",
base = file("."),
settings = Seq(sampleKeyB := "B: in the root project settings in Build.scala"))
}
Now, create `hello/build.sbt` as follows:
::
sampleKeyC in ThisBuild := "C: in build.sbt scoped to ThisBuild"
sampleKeyD := "D: in build.sbt"
Start up the sbt interactive prompt. Type `inspect sampleKeyA` and you
should see (among other things):
.. code-block:: text
[info] Setting: java.lang.String = A: in Build.settings in Build.scala
[info] Provided by:
[info] {file:/home/hp/checkout/hello/}/*:sampleKeyA
and then `inspect sampleKeyC` and you should see:
.. code-block:: text
[info] Setting: java.lang.String = C: in build.sbt scoped to ThisBuild
[info] Provided by:
[info] {file:/home/hp/checkout/hello/}/*:sampleKeyC
Note that the "Provided by" shows the same scope for the two values.
That is, `sampleKeyC in ThisBuild` in a `.sbt` file is equivalent to
placing a setting in the `Build.settings` list in a `.scala` file.
sbt takes build-scoped settings from both places to create the build
definition.
Now, `inspect sampleKeyB`:
.. code-block:: text
[info] Setting: java.lang.String = B: in the root project settings in Build.scala
[info] Provided by:
[info] {file:/home/hp/checkout/hello/}hello/*:sampleKeyB
Note that `sampleKeyB` is scoped to the project
(`{file:/home/hp/checkout/hello/}hello`) rather than the entire build
(`{file:/home/hp/checkout/hello/}`).
As you've probably guessed, `inspect sampleKeyD` matches `sampleKeyB`:
.. code-block:: text
[info] Setting: java.lang.String = D: in build.sbt
[info] Provided by:
[info] {file:/home/hp/checkout/hello/}hello/*:sampleKeyD
sbt *appends* the settings from `.sbt` files to the settings from
`Build.settings` and `Project.settings` which means `.sbt`
settings take precedence. Try changing `Build.scala` so it sets key
`sampleC` or `sampleD`, which are also set in `build.sbt`. The
setting in `build.sbt` should "win" over the one in `Build.scala`.
One other thing you may have noticed: `sampleKeyC` and `sampleKeyD`
were available inside `build.sbt`. That's because sbt imports the
contents of your `Build` object into your `.sbt` files. In this case
`import HelloBuild._` was implicitly done for the `build.sbt` file.
In summary:
- In `.scala` files, you can add settings to `Build.settings` for
sbt to find, and they are automatically build-scoped.
- In `.scala` files, you can add settings to `Project.settings` for
sbt to find, and they are automatically project-scoped.
- Any `Build` object you write in a `.scala` file will have its
contents imported and available to `.sbt` files.
- The settings in `.sbt` files are *appended* to the settings in
`.scala` files.
- The settings in `.sbt` files are project-scoped unless you
explicitly specify another scope.
When to use `.scala` files
----------------------------
In `.scala` files, you can write any Scala code, including top-level classes and objects.
Also, there are no restrictions on blank lines, since they are standard `.scala` files.
The recommended approach is to define most configuration in `.sbt` files, using
`.scala` files for task implementations or to share values, such as keys, across `.sbt` files.
The build definition project in interactive mode
------------------------------------------------
You can switch the sbt interactive prompt to have the build definition
project in `project/` as the current project. To do so, type
`reload plugins`.
.. code-block:: text
> reload plugins
[info] Set current project to default-a0e8e4 (in build file:/home/hp/checkout/hello/project/)
> show sources
[info] ArrayBuffer(/home/hp/checkout/hello/project/Build.scala)
> reload return
[info] Loading project definition from /home/hp/checkout/hello/project
[info] Set current project to hello (in build file:/home/hp/checkout/hello/)
> show sources
[info] ArrayBuffer(/home/hp/checkout/hello/hw.scala)
>
As shown above, you use `reload return` to leave the build definition
project and return to your regular project.
Reminder: it's all immutable
----------------------------
It would be wrong to think that the settings in `build.sbt` are added
to the `settings` fields in `Build` and `Project` objects.
Instead, the settings list from `Build` and `Project`, and the
settings from `build.sbt`, are concatenated into another immutable
list which is then used by sbt. The `Build` and `Project` objects
are "immutable configuration" forming only part of the complete build
definition.
In fact, there are other sources of settings as well. They are appended
in this order:
- Settings from `Build.settings` and `Project.settings` in your
`.scala` files.
- Your user-global settings; for example in :sublit:`|globalSbtFile|` you
can define settings affecting *all* your projects.
- Settings injected by plugins, see :doc:`using plugins <Using-Plugins>` coming up next.
- Settings from `.sbt` files in the project.
- Build definition projects (i.e. projects inside `project`) have
settings from global plugins (:sublit:`|globalPluginsBase|`) added. :doc:`Using plugins <Using-Plugins>` explains this more.
Later settings override earlier ones. The entire list of settings forms
the build definition.
Next
----
You're at the end of Getting Started! There's a :doc:`brief recap <Summary>`.
.. |globalBase| replace:: ~/.sbt/|version|/
.. |globalSbtFile| replace:: |globalBase|\ global.sbt
.. |globalPluginsBase| replace:: |globalBase|\ plugins/

View File

@ -1,93 +0,0 @@
============
Hello, World
============
This page assumes you've :doc:`installed sbt <Setup>`.
Create a project directory with source code
-------------------------------------------
A valid sbt project can be a directory containing a single source file.
Try creating a directory `hello` with a file `hw.scala`, containing
the following:
::
object Hi {
def main(args: Array[String]) = println("Hi!")
}
Now from inside the `hello` directory, start sbt and type `run` at
the sbt interactive console. On Linux or OS X the commands might look
like this:
.. code-block:: text
$ mkdir hello
$ cd hello
$ echo 'object Hi { def main(args: Array[String]) = println("Hi!") }' > hw.scala
$ sbt
...
> run
...
Hi!
In this case, sbt works purely by convention. sbt will find the
following automatically:
- Sources in the base directory
- Sources in `src/main/scala` or `src/main/java`
- Tests in `src/test/scala` or `src/test/java`
- Data files in `src/main/resources` or `src/test/resources`
- jars in `lib`
By default, sbt will build projects with the same version of Scala used
to run sbt itself.
You can run the project with `sbt run` or enter the `Scala
REPL <http://www.scala-lang.org/node/2097>`_ with `sbt console`.
`sbt console` sets up your project's classpath so you can try out live
Scala examples based on your project's code.
Build definition
----------------
Most projects will need some manual setup. Basic build settings go in a
file called `build.sbt`, located in the project's base directory.
For example, if your project is in the directory `hello`, in
`hello/build.sbt` you might write:
.. parsed-literal::
name := "hello"
version := "1.0"
scalaVersion := "|scalaRelease|"
Notice the blank line between every item. This isn't just for show;
they're actually required in order to separate each item. In :doc:`.sbt build definition <Basic-Def>` you'll learn more about
how to write a `build.sbt` file.
If you plan to package your project in a jar, you will want to set at
least the name and version in a `build.sbt`.
Setting the sbt version
-----------------------
You can force a particular version of sbt by creating a file
`hello/project/build.properties`. In this file, write:
.. parsed-literal::
sbt.version=\ |release|
to force the use of sbt |release|. sbt is 99% source compatible from release to release.
Still, setting the sbt version in `project/build.properties` avoids
any potential confusion.
Next
====
Learn about the :doc:`file and directory layout <Directories>` of an sbt project.

View File

@ -1,246 +0,0 @@
====================
Library Dependencies
====================
This page assumes you've read the earlier Getting Started pages, in
particular :doc:`.sbt build definition <Basic-Def>`,
:doc:`scopes <Scopes>`, and :doc:`more about settings <More-About-Settings>`.
Library dependencies can be added in two ways:
- *unmanaged dependencies* are jars dropped into the `lib` directory
- *managed dependencies* are configured in the build definition and
downloaded automatically from repositories
Unmanaged dependencies
----------------------
Most people use managed dependencies instead of unmanaged. But unmanaged
can be simpler when starting out.
Unmanaged dependencies work like this: add jars to `lib` and they will
be placed on the project classpath. Not much else to it!
You can place test jars such as
`ScalaCheck <https://github.com/rickynils/scalacheck>`_,
`specs <http://code.google.com/p/specs/>`_, and
`ScalaTest <http://www.scalatest.org/>`_ in `lib` as well.
Dependencies in `lib` go on all the classpaths (for :key:`compile`,
:key:`test`, :key:`run`, and :key:`console`). If you wanted to change the
classpath for just one of those, you would adjust
`dependencyClasspath in Compile` or `dependencyClasspath in Runtime`
for example.
There's nothing to add to `build.sbt` to use unmanaged dependencies,
though you could change the :key:`unmanagedBase` key if you'd like to use
a different directory rather than `lib`.
To use `custom_lib` instead of `lib`:
::
unmanagedBase := baseDirectory.value / "custom_lib"
:key:`baseDirectory` is the project's root directory, so here you're
changing :key:`unmanagedBase` depending on :key:`baseDirectory` using the
special `value` method as explained in :doc:`more about settings <More-About-Settings>`.
There's also an :key:`unmanagedJars` task which lists the jars from the
:key:`unmanagedBase` directory. If you wanted to use multiple directories
or do something else complex, you might need to replace the whole
:key:`unmanagedJars` task with one that does something else.
Managed Dependencies
--------------------
sbt uses `Apache Ivy <http://ant.apache.org/ivy/>`_ to implement managed
dependencies, so if you're familiar with Maven or Ivy, you won't have
much trouble.
The `libraryDependencies` key
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Most of the time, you can simply list your dependencies in the setting
:key:`libraryDependencies`. It's also possible to write a Maven POM file or
Ivy configuration file to externally configure your dependencies, and
have sbt use those external configuration files. You can learn more
about that :ref:`here <external-maven-ivy>`.
Declaring a dependency looks like this, where `groupId`,
`artifactId`, and `revision` are strings:
::
libraryDependencies += groupID % artifactID % revision
or like this, where `configuration` is also a string:
::
libraryDependencies += groupID % artifactID % revision % configuration
:key:`libraryDependencies` is declared in `Keys <../../sxr/sbt/Keys.scala.html#sbt.Keys.libraryDependencies>`_ like this:
::
val libraryDependencies = settingKey[Seq[ModuleID]]("Declares managed dependencies.")
The `%` methods create `ModuleID` objects from strings, then you add
those `ModuleID` to :key:`libraryDependencies`.
Of course, sbt (via Ivy) has to know where to download the module. If
your module is in one of the default repositories sbt comes with, this
will just work. For example, Apache Derby is in a default repository:
::
libraryDependencies += "org.apache.derby" % "derby" % "10.4.1.3"
If you type that in `build.sbt` and then :key:`update`, sbt should
download Derby to `~/.ivy2/cache/org.apache.derby/`. (By the way,
:key:`update` is a dependency of :key:`compile` so there's no need to manually
type :key:`update` most of the time.)
Of course, you can also use `++=` to add a list of dependencies all at
once:
::
libraryDependencies ++= Seq(
groupID % artifactID % revision,
groupID % otherID % otherRevision
)
In rare cases you might find reasons to use `:=` with :key:`libraryDependencies` as well.
Getting the right Scala version with `%%`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you use `groupID %% artifactID % revision` rather than
`groupID % artifactID % revision` (the difference is the double `%%`
after the groupID), sbt will add your project's Scala version to the
artifact name. This is just a shortcut. You could write this without the
`%%`:
::
libraryDependencies += "org.scala-tools" % "scala-stm_2.9.1" % "0.3"
Assuming the :key:`scalaVersion` for your build is `2.9.1`, the following
is identical:
::
libraryDependencies += "org.scala-tools" %% "scala-stm" % "0.3"
The idea is that many dependencies are compiled for multiple Scala
versions, and you'd like to get the one that matches your project.
The complexity in practice is that often a dependency will work with a
slightly different Scala version; but `%%` is not smart about that. So
if the dependency is available for `2.9.0` but you're using
`scalaVersion := "2.9.1"`, you won't be able to use `%%` even though
the `2.9.0` dependency likely works. If `%%` stops working just go
see which versions the dependency is really built for, and hardcode the
one you think will work (assuming there is one).
See :doc:`/Detailed-Topics/Cross-Build` for some more detail on this.
Ivy revisions
~~~~~~~~~~~~~
The `revision` in `groupID % artifactID % revision` does not have to
be a single fixed version. Ivy can select the latest revision of a
module according to constraints you specify. Instead of a fixed revision
like `"1.6.1"`, you specify `"latest.integration"`, `"2.9.+"`, or
`"[1.0,)"`. See the `Ivy
revisions <http://ant.apache.org/ivy/history/2.3.0/ivyfile/dependency.html#revision>`_
documentation for details.
Resolvers
~~~~~~~~~
Not all packages live on the same server; sbt uses the standard Maven2
repository by default. If your dependency isn't on one of the default
repositories, you'll have to add a *resolver* to help Ivy find it.
To add an additional repository, use
::
resolvers += name at location
For example:
::
resolvers += "Sonatype OSS Snapshots" at "https://oss.sonatype.org/content/repositories/snapshots"
The :key:`resolvers` key is defined in
`Keys <../../sxr/sbt/Keys.scala.html#sbt.Keys.resolvers>`_ like
this:
::
val resolvers = settingKey[Seq[Resolver]]("The user-defined additional resolvers for automatically managed dependencies.")
The `at` method creates a `Resolver` object from two strings.
sbt can search your local Maven repository if you add it as a
repository:
::
resolvers += "Local Maven Repository" at "file://"+Path.userHome.absolutePath+"/.m2/repository"
See :doc:`/Detailed-Topics/Resolvers` for details on defining other types of repositories.
Overriding default resolvers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:key:`resolvers` does not contain the default resolvers; only additional
ones added by your build definition.
`sbt` combines :key:`resolvers` with some default repositories to form
:key:`externalResolvers`.
Therefore, to change or remove the default resolvers, you would need to
override :key:`externalResolvers` instead of :key:`resolvers`.
.. _gsg-ivy-configurations:
Per-configuration dependencies
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Often a dependency is used by your test code (in `src/test/scala`,
which is compiled by the `Test` configuration) but not your main code.
If you want a dependency to show up in the classpath only for the
`Test` configuration and not the `Compile` configuration, add
`% "test"` like this:
::
libraryDependencies += "org.apache.derby" % "derby" % "10.4.1.3" % "test"
Now, if you type `show compile:dependencyClasspath` at the sbt
interactive prompt, you should not see derby. But if you type
`show test:dependencyClasspath`, you should see the derby jar in the
list.
Typically, test-related dependencies such as
`ScalaCheck <https://github.com/rickynils/scalacheck>`_,
`specs <http://code.google.com/p/specs/>`_, and
`ScalaTest <http://www.scalatest.org/>`_ would be defined with
`% "test"`.
Next
====
There are some more details and tips-and-tricks related to library
dependencies :doc:`on this page </Detailed-Topics/Library-Management>`, if you didn't find an
answer on this introductory page.
If you're reading Getting Started in order, for now, you might move on
to read :doc:`Multi-Project`.

View File

@ -1,203 +0,0 @@
=====================
More Kinds of Setting
=====================
This page explains other ways to create a `Setting`, beyond the basic
`:=` method. It assumes you've read :doc:`.sbt build definition <Basic-Def>` and :doc:`scopes <Scopes>`.
Refresher: Settings
-------------------
:doc:`Remember <Basic-Def>`, a build definition creates a
list of `Setting`, which is then used to transform sbt's description
of the build (which is a map of key-value pairs). A `Setting` is a
transformation with sbt's earlier map as input and a new map as output.
The new map becomes sbt's new state.
Different settings transform the map in different ways.
:doc:`Earlier <Basic-Def>`, you read about the `:=` method.
The `Setting` which `:=` creates puts a fixed, constant value in the
new, transformed map. For example, if you transform a map with the
setting `name := "hello"` the new map has the string `"hello"`
stored under the key :key:`name`.
Settings must end up in the master list of settings to do any good (all
lines in a `build.sbt` automatically end up in the list, but in a
:doc:`.scala file <Full-Def>` you can get it wrong by
creating a `Setting` without putting it where sbt will find it).
Appending to previous values: `+=` and `++=`
------------------------------------------------
Assignment with `:=` is the simplest transformation, but keys have
other methods as well. If the `T` in `SettingKey[T]` is a sequence,
i.e. the key's value type is a sequence, you can append to the sequence
rather than replacing it.
- `+=` will append a single element to the sequence.
- `++=` will concatenate another sequence.
For example, the key `sourceDirectories in Compile` has a
`Seq[File]` as its value. By default this key's value would include
`src/main/scala`. If you wanted to also compile source code in a
directory called `source` (since you just have to be nonstandard), you
could add that directory:
::
sourceDirectories in Compile += new File("source")
Or, using the `file()` function from the sbt package for convenience:
::
sourceDirectories in Compile += file("source")
(`file()` just creates a new `File`.)
You could use `++=` to add more than one directory at a time:
::
sourceDirectories in Compile ++= Seq(file("sources1"), file("sources2"))
Where `Seq(a, b, c, ...)` is standard Scala syntax to construct a
sequence.
To replace the default source directories entirely, you use `:=` of
course:
::
sourceDirectories in Compile := Seq(file("sources1"), file("sources2"))
Computing a value based on other keys' values
---------------------------------------------
Reference the value of another task or setting by calling `value`
on the key for the task or setting. The `value` method is special and may
only be called in the argument to `:=`, `+=`, or `++=`.
As a first example, consider defining the project organization to be the same as the project name.
::
// name our organization after our project (both are SettingKey[String])
organization := name.value
Or, set the name to the name of the project's directory:
::
// name is a Key[String], baseDirectory is a Key[File]
// name the project after the directory it's inside
name := baseDirectory.value.getName
This transforms the value of :key:`baseDirectory` using the standard `getName` method of `java.io.File`.
Using multiple inputs is similar. For example,
::
name := "project " + name.value + " from " + organization.value + " version " + version.value
This sets the name in terms of its previous value as well as the organization and version settings.
Settings with dependencies
~~~~~~~~~~~~~~~~~~~~~~~~~~
In the setting `name := baseDirectory.value.getName`, :key:`name` will have
a *dependency* on :key:`baseDirectory`. If you place the above in
`build.sbt` and run the sbt interactive console, then type
`inspect name`, you should see (in part):
.. code-block:: text
[info] Dependencies:
[info] *:baseDirectory
This is how sbt knows which settings depend on which other settings.
Remember that some settings describe tasks, so this approach also
creates dependencies between tasks.
For example, if you `inspect compile` you'll see it depends on another
key :key:`compileInputs`, and if you inspect :key:`compileInputs` it in turn
depends on other keys. Keep following the dependency chains and magic
happens. When you type :key:`compile` sbt automatically performs an
:key:`update`, for example. It Just Works because the values required as
inputs to the :key:`compile` computation require sbt to do the :key:`update`
computation first.
In this way, all build dependencies in sbt are *automatic* rather than
explicitly declared. If you use a key's value in another computation,
then the computation depends on that key. It just works!
When settings are undefined
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Whenever a setting uses `:=`, `+=`, or `++=` to create a dependency on
itself or another key's value, the value it depends on must exist. If it
does not, sbt will complain. It might say *"Reference to undefined
setting"*, for example. When this happens, be sure you're using the key
in the :doc:`scope <Scopes>` that defines it.
It's possible to create cycles, which is an error; sbt will tell you if
you do this.
Tasks with dependencies
~~~~~~~~~~~~~~~~~~~~~~~
As noted in :doc:`.sbt build definition <Basic-Def>`, task
keys create a `Setting[Task[T]]` rather than a `Setting[T]` when you
build a setting with `:=`, etc. Tasks can use settings as inputs, but
settings cannot use tasks as inputs.
Take these two keys (from `Keys <../../sxr/sbt/Keys.scala.html>`_):
::
val scalacOptions = taskKey[Seq[String]]("Options for the Scala compiler.")
val checksums = settingKey[Seq[String]]("The list of checksums to generate and to verify for dependencies.")
(:key:`scalacOptions` and :key:`checksums` have nothing to do with each other,
they are just two keys with the same value type, where one is a task.)
It is possible to compile a `build.sbt` that aliases :key:`scalacOptions` to :key:`checksums`, but not the other way.
For example, this is allowed:
::
// The scalacOptions task may be defined in terms of the checksums setting
scalacOptions := checksums.value
There is no way to go the *other* direction. That is, a setting key
can't depend on a task key. That's because a setting key is only
computed once on project load, so the task would not be re-run every
time, and tasks expect to re-run every time.
::
// The checksums setting may not be defined in terms of the scalacOptions task
checksums := scalacOptions.value
Appending with dependencies: `+=` and `++=`
-------------------------------------------------
Other keys can be used when appending to an existing setting or task, just like they can for assigning with `:=`.
For example, say you have a coverage report named after the project, and
you want to add it to the files removed by :key:`clean`:
::
cleanFiles += file("coverage-report-" + name.value + ".txt")
Next
----
At this point you know how to get things done with settings, so we can
move on to a specific key that comes up often: :key:`libraryDependencies`.
:doc:`Learn about library dependencies <Library-Dependencies>`.

View File

@ -1,220 +0,0 @@
====================
Multi-Project Builds
====================
This page introduces multiple projects in a single build.
Please read the earlier pages in the Getting Started Guide first, in
particular you need to understand :doc:`build.sbt <Basic-Def>` before
reading this page.
Multiple projects
-----------------
It can be useful to keep multiple related projects in a single build,
especially if they depend on one another and you tend to modify them
together.
Each sub-project in a build has its own source directories, generates
its own jar file when you run :key:`package`, and in general works like any
other project.
A project is defined by declaring a `lazy val` of type `Project <../../api/sbt/Project.html>`_.
For example, ::
lazy val util = project
lazy val core = project
The name of the val is used as the project's ID and base directory name.
The ID is used to refer to the project at the command line.
The base directory may be changed from the default using the `in` method.
For example, the following is a more explicit way to write the previous example: ::
lazy val util = project.in( file("util") )
lazy val core = project in file("core")
Dependencies
------------
Projects in the build can be completely independent of one another, but usually
they will be related to one another by some kind of dependency. There are two
types of dependencies: aggregate and classpath.
Aggregation
~~~~~~~~~~~
Aggregation means that running a task on the aggregate project will also
run it on the aggregated projects. For example, ::
lazy val root =
project.in( file(".") )
.aggregate(util, core)
lazy val util = project
lazy val core = project
In the above example, the root project aggregates `util` and `core`.
Start up sbt with two subprojects as in the example, and try :key:`compile`.
You should see that all three projects are compiled.
*In the project doing the aggregating*, the `root` project in
this case, you can control aggregation per-task.
For example, to avoid aggregating the :key:`update` task:
::
lazy val root =
project.in( file(".") )
.aggregate(util, core)
.settings(
aggregate in update := false
)
[...]
`aggregate in update` is the :key:`aggregate` key scoped to the :key:`update` task.
(See :doc:`scopes <Scopes>`.)
Note: aggregation will run the aggregated tasks in parallel and with no
defined ordering between them.
Classpath dependencies
~~~~~~~~~~~~~~~~~~~~~~
A project may depend on code in another project. This is done by adding
a `dependsOn` method call. For example, if `core` needed
`util` on its classpath, you would define `core` as:
::
lazy val core = project.dependsOn(util)
Now code in `core` can use classes from `util`.
This also creates an ordering between the projects when compiling them;
`util` must be updated and compiled before `core` can be compiled.
To depend on multiple projects, use multiple arguments to `dependsOn`,
like `dependsOn(bar, baz)`.
Per-configuration classpath dependencies
++++++++++++++++++++++++++++++++++++++++
`foo dependsOn(bar)` means that the `compile` configuration in
`foo` depends on the `compile` configuration in `bar`. You could
write this explicitly as `dependsOn(bar % "compile->compile")`.
The `->` in `"compile->compile"` means "depends on" so
`"test->compile"` means the `test` configuration in `foo` would
depend on the `compile` configuration in `bar`.
Omitting the `->config` part implies `->compile`, so
`dependsOn(bar % "test")` means that the `test` configuration in
`foo` depends on the `Compile` configuration in `bar`.
A useful declaration is `"test->test"` which means `test` depends on `test`.
This allows you to put utility code for testing in `bar/src/test/scala` and then use that code in `foo/src/test/scala`,
for example.
You can have multiple configurations for a dependency, separated by
semicolons. For example,
`dependsOn(bar % "test->test;compile->compile")`.
Default root project
--------------------
If a project is not defined for the root directory in the build, sbt creates a default
one that aggregates all other projects in the build.
Because project `hello-foo` is defined with `base = file("foo")`, it
will be contained in the subdirectory `foo`. Its sources could be
directly under `foo`, like `foo/Foo.scala`, or in
`foo/src/main/scala`. The usual sbt :doc:`directory structure <Directories>`
applies underneath `foo` with the exception of build definition files.
Any `.sbt` files in `foo`, say `foo/build.sbt`, will be merged
with the build definition for the entire build, but scoped to the
`hello-foo` project.
If your whole project is in `hello`, try defining a different version
(`version := "0.6"`) in `hello/build.sbt`, `hello/foo/build.sbt`,
and `hello/bar/build.sbt`. Now `show version` at the sbt interactive
prompt. You should get something like this (with whatever versions you
defined):
.. code-block:: console
> show version
[info] hello-foo/*:version
[info] 0.7
[info] hello-bar/*:version
[info] 0.9
[info] hello/*:version
[info] 0.5
`hello-foo/*:version` was defined in `hello/foo/build.sbt`,
`hello-bar/*:version` was defined in `hello/bar/build.sbt`, and
`hello/*:version` was defined in `hello/build.sbt`. Remember the
:doc:`syntax for scoped keys <Scopes>`. Each `version` key
is scoped to a project, based on the location of the `build.sbt`. But
all three `build.sbt` are part of the same build definition.
*Each project's settings can go in `.sbt` files in the base directory
of that project*, while the `.scala` file can be as simple as the one
shown above, listing the projects and base directories. *There is no
need to put settings in the `.scala` file.*
You may find it cleaner to put everything including settings in
`.scala` files in order to keep all build definition under a single
`project` directory, however. It's up to you.
You cannot have a `project` subdirectory or `project/*.scala` files
in the sub-projects. `foo/project/Build.scala` would be ignored.
Navigating projects interactively
---------------------------------
At the sbt interactive prompt, type `projects` to list your projects
and `project <projectname>` to select a current project. When you run
a task like :key:`compile`, it runs on the current project. So you don't
necessarily have to compile the root project, you could compile only a
subproject.
You can run a task in another project by explicitly specifying the
project ID, such as `subProjectID/compile`.
Common code
-----------
The definitions in `.sbt` files are not visible in other `.sbt` files.
In order to share code between `.sbt` files, define one or more Scala
files in the `project/` directory of the build root. This directory
is also an sbt project, but for your build.
For example:
`<root>/project/Common.scala` ::
import sbt._
import Keys._
object Common {
def text = "org.example"
}
`<root>`/build.sbt ::
organization := Common.text
See :doc:`Full-Def` for details.
Next
----
Move on to :doc:`using plugins <Using-Plugins>`.

View File

@ -1,124 +0,0 @@
=======
Running
=======
This page describes how to use `sbt` once you have set up your
project. It assumes you've :doc:`installed sbt <Setup>` and
created a :doc:`Hello, World <Hello>` or other project.
Interactive mode
----------------
Run sbt in your project directory with no arguments:
.. code-block:: console
$ sbt
Running sbt with no command line arguments starts it in interactive
mode. Interactive mode has a command prompt (with tab completion and
history!).
For example, you could type :key:`compile` at the sbt prompt:
.. code-block:: console
> compile
To :key:`compile` again, press up arrow and then enter.
To run your program, type :key:`run`.
To leave interactive mode, type `exit` or use Ctrl+D (Unix) or Ctrl+Z
(Windows).
Batch mode
----------
You can also run sbt in batch mode, specifying a space-separated list of
sbt commands as arguments. For sbt commands that take arguments, pass
the command and arguments as one argument to `sbt` by enclosing them
in quotes. For example,
.. code-block:: console
$ sbt clean compile "testOnly TestA TestB"
In this example, :key:`testOnly` has arguments, `TestA` and `TestB`.
The commands will be run in sequence (:key:`clean`, :key:`compile`, then
:key:`testOnly`).
Continuous build and test
-------------------------
To speed up your edit-compile-test cycle, you can ask sbt to
automatically recompile or run tests whenever you save a source file.
Make a command run when one or more source files change by prefixing the
command with `~`. For example, in interactive mode try:
.. code-block:: console
> ~ compile
Press enter to stop watching for changes.
You can use the `~` prefix with either interactive mode or batch mode.
See :doc:`/Detailed-Topics/Triggered-Execution` for more details.
Common commands
---------------
Here are some of the most common sbt commands. For a more complete list,
see :doc:`/Detailed-Topics/Command-Line-Reference`.
- :key:`clean` Deletes all generated files (in the :key:`target` directory).
- :key:`compile` Compiles the main sources (in `src/main/scala` and
`src/main/java` directories).
- :key:`test` Compiles and runs all tests.
- :key:`console` Starts the Scala interpreter with a classpath including
the compiled sources and all dependencies. To return to sbt, type
`:quit`, Ctrl+D (Unix), or Ctrl+Z (Windows).
- `run <argument>*` Runs the main class for the project in the same
virtual machine as `sbt`.
- :key:`package` Creates a jar file containing the files in
`src/main/resources` and the classes compiled from
`src/main/scala` and `src/main/java`.
- `help <command>` Displays detailed help for the specified command.
If no command is provided, displays brief descriptions of all
commands.
- `reload` Reloads the build definition (`build.sbt`,
`project/*.scala`, `project/*.sbt` files). Needed if you change
the build definition.
Tab completion
--------------
Interactive mode has tab completion, including at an empty prompt. A
special sbt convention is that pressing tab once may show only a subset
of most likely completions, while pressing it more times shows more
verbose choices.
History Commands
----------------
Interactive mode remembers history, even if you exit sbt and restart it.
The simplest way to access history is with the up arrow key. The
following commands are also supported:
- `!` Show history command help.
- `!!` Execute the previous command again.
- `!:` Show all previous commands.
- `!:n` Show the last n commands.
- `!n` Execute the command with index `n`, as shown by the `!:`
command.
- `!-n` Execute the nth command before this one.
- `!string` Execute the most recent command starting with 'string'
- `!?string` Execute the most recent command containing 'string'
Next
----
Move on to :doc:`understanding build.sbt <Basic-Def>`.

Some files were not shown because too many files have changed in this diff Show More