{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# Spark\n", "\n", "[Apache Spark](https://spark.apache.org/) is a lightning-fast cluster computing API based on Scala.\n", "This notebook shows how you can load and use Spark just like any other library.\n", "There is also a [Spark Magic](SparkUI.ipynb) that enables deeper integration." ] }, { "cell_type": "code", "execution_count": null, "metadata": { "scrolled": false }, "outputs": [], "source": [ "%classpath add mvn org.apache.spark spark-sql_2.11 2.2.1\n", "org.apache.log4j.Logger.getRootLogger().setLevel(org.apache.log4j.Level.ERROR);" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import org.apache.spark.sql.SparkSession\n", "\n", "val spark = SparkSession.builder()\n", " .appName(\"Simple Application\")\n", " .master(\"local[4]\")\n", " .config(\"spark.ui.enabled\", \"false\")\n", " .getOrCreate()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "val NUM_SAMPLES = 10000000\n", "\n", "val count = spark.sparkContext.parallelize(1 to NUM_SAMPLES).map{i =>\n", " val x = Math.random()\n", " val y = Math.random()\n", " if (x*x + y*y < 1) 1 else 0\n", "}.reduce(_ + _)\n", "\n", "println(\"Pi is roughly \" + 4.0 * count / NUM_SAMPLES)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "spark.stop()" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Scala", "language": "scala", "name": "scala" }, "language_info": { "codemirror_mode": "text/x-scala", "file_extension": ".scala", "mimetype": "", "name": "Scala", "nbconverter_exporter": "", "version": "2.11.12" } }, "nbformat": 4, "nbformat_minor": 2 }